text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import numpy
import os, inspect
from orbkit import read
from orbkit.analytical_integrals import get_dipole_moment
from orbkit.test.tools import equal
from orbkit import options
'''Reference dipole moments for 'h2o_rhf_cart' (2015-09-30):
\tMolpro 2012 (.out): 0.00000000 0.00000000 0.81739430
\tGaussian 09 (.fchk): -4.58602321E-17 -3.46944695E-18 -8.17393478E-01
\tTurbomole 6.5 (dscf.log): 0.000000 0.000000 0.817391
'''
ref_dip = {'molpro': [ 0.00000000, 0.00000000, 0.81739430],
'gaussian': [-4.58602321E-17, -3.46944695E-18, -8.17393478E-01],
'turbomole': [0.000000, 0.000000, 0.817391],
}
options.quiet = True
tests_home = os.path.dirname(inspect.getfile(inspect.currentframe()))
output_folder = os.path.join(tests_home, '../outputs_for_testing')
tests = ['h2o_rhf_cart','h2o_rhf_sph','h2o_uhf_cart','h2o_uhf_sph']
ok_opt = ['molden',
'gaussian.log',
'cclib',
'gaussian.fchk',
'aomix']
folder = ['molpro',
'gaussian',
'gaussian',
'gaussian',
'turbomole']
fileext = ['.molden',
'.inp.log',
'.inp.log',
'.fchk',
'/aomix.in']
for i in range(len(tests)):
for j in range(len(folder)):
skip = False
if ok_opt[j] == 'cclib':
try:
__import__(ok_opt[j])
except ImportError:
skip = True
if not skip:
fid = os.path.join(output_folder,'%s/%s%s'%(folder[j],tests[i],fileext[j]))
if 'uhf' in tests[i] and folder[j] == 'molpro':
# Read the alpha input file
qc = read.main_read(fid,itype=ok_opt[j],
all_mo=True,spin=None,i_md=0,interactive=False)
# Extend the beta input file
qc_b = read.main_read(fid,itype=ok_opt[j],
all_mo=True,spin=None,i_md=1,interactive=False)
qc.mo_spec.extend(qc_b.mo_spec)
qc.mo_spec.update()
else:
qc = read.main_read(fid ,itype=ok_opt[j],interactive=False,
all_mo=True,cclib_parser='Gaussian')
dip = get_dipole_moment(qc,component=['x','y','z'])
equal(dip, ref_dip[folder[j]])
'''
Old tests
'''
tests_home = os.path.dirname(inspect.getfile(inspect.currentframe()))
folder = os.path.join(tests_home, '../outputs_for_testing/molpro')
filepath = os.path.join(folder, 'h2o_rhf_sph.molden')
qc = read.main_read(filepath, all_mo=True)
dip = get_dipole_moment(qc,component=['x','y','z'])
ref_dip = [0.00000000e+00, -1.01130147e-16, 8.17259184e-01]
equal(dip, ref_dip)
qc.geo_spec += numpy.array([1,1,0])
dip = get_dipole_moment(qc,component=['x','y','z'])
equal(dip, ref_dip)
#Slightly move one atom and calculate dipoles again
qc.geo_spec[1] += numpy.array([1,1,0])
dip = get_dipole_moment(qc,component=['x','y','z'])
equal(dip, [0.25214432, -0.20529275, 1.09887067])
|
orbkit/orbkit
|
orbkit/test/analytical_properties/dipole.py
|
Python
|
lgpl-3.0
| 3,049
|
[
"Gaussian",
"Molpro",
"TURBOMOLE",
"cclib"
] |
8a11d270d8ca6c30452b335f7e53c2d03cac3e48378d2875a0fa9f7108511ab7
|
"""
=================
Lorentzian Fitter
=================
"""
import numpy
from numpy.ma import median
from numpy import pi
from pyspeckit.mpfit import mpfit
from . import fitter
class LorentzianFitter(fitter.SimpleFitter):
def __init__(self,multisingle='multi'):
self.npars = 3
self.npeaks = 1
self.onepeaklorentzfit = self._fourparfitter(self.onepeaklorentzian)
if multisingle in ('multi','single'):
self.multisingle = multisingle
else:
raise Exception("multisingle must be multi or single")
def __call__(self,*args,**kwargs):
if self.multisingle == 'single':
return self.onepeaklorentzfit(*args,**kwargs)
elif self.multisingle == 'multi':
return self.multilorentzfit(*args,**kwargs)
def onedlorentzian(x,H,A,dx,w):
"""
Returns a 1-dimensional gaussian of form
H+A*numpy.exp(-(x-dx)**2/(2*w**2))
"""
return H+A/(2*pi)*w/((x-dx)**2 + (w/2.0)**2)
def n_lorentzian(pars=None,a=None,dx=None,width=None):
"""
Returns a function that sums over N lorentzians, where N is the length of
a,dx,sigma *OR* N = len(pars) / 3
The background "height" is assumed to be zero (you must "baseline" your
spectrum before fitting)
pars - a list with len(pars) = 3n, assuming a,dx,sigma repeated
dx - offset (velocity center) values
width - line widths (Lorentzian FWHM)
a - amplitudes
"""
if len(pars) % 3 == 0:
a = [pars[ii] for ii in xrange(0,len(pars),3)]
dx = [pars[ii] for ii in xrange(1,len(pars),3)]
width = [pars[ii] for ii in xrange(2,len(pars),3)]
elif not(len(dx) == len(width) == len(a)):
raise ValueError("Wrong array lengths! dx: %i width %i a: %i" % (len(dx),len(width),len(a)))
def L(x):
v = numpy.zeros(len(x))
for i in range(len(dx)):
v += a[i] / (2*pi) * w / ((x-dx)**2 + (w/2.0)**2)
return v
return L
def multilorentzfit(self):
"""
not implemented
"""
print "Not implemented"
|
keflavich/pyspeckit-obsolete
|
pyspeckit/spectrum/models/lorentzian.py
|
Python
|
mit
| 2,204
|
[
"Gaussian"
] |
ad0216cce1601fa939ad00c2212458735c941ef7fed27f3a0911a625658579d3
|
# Copyright 2010 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility to use the Closure Compiler CLI from Python."""
import logging
import re
import subprocess
# Pulls a version number from the first line of 'java -version'.
# Versions are in the format of n.n.*. See
# http://www.oracle.com/technetwork/java/javase/versioning-naming-139433.html
_VERSION_REGEX = re.compile(r'"([0-9]\.[0-9]+)')
class JsCompilerError(Exception):
"""Raised if there's an error in calling the compiler."""
pass
def _GetJavaVersionString():
"""Get the version string from the Java VM."""
return subprocess.check_output(['java', '-version'], stderr=subprocess.STDOUT)
def _ParseJavaVersion(version_string):
"""Returns the string for the current version of Java installed.
Args:
version_string: String of the Java version (e.g. '1.7.2-ea').
Returns:
The major and minor versions, as a float (e.g. 1.7).
"""
match = _VERSION_REGEX.search(version_string)
if match:
return float(match.group(1))
def _GetJsCompilerArgs(compiler_jar_path, java_version, source_paths,
jvm_flags, compiler_flags):
"""Assembles arguments for call to JsCompiler."""
if java_version < 1.6:
raise JsCompilerError('Closure Compiler requires Java 1.6 or higher. '
'Please visit http://www.java.com/getjava')
args = ['java']
# Add JVM flags we believe will produce the best performance. See
# https://groups.google.com/forum/#!topic/closure-library-discuss/7w_O9-vzlj4
# Attempt 32-bit mode if we're <= Java 1.7
if java_version >= 1.7:
args += ['-d32']
# Prefer the "client" VM.
args += ['-client']
# Add JVM flags, if any
if jvm_flags:
args += jvm_flags
# Add the application JAR.
args += ['-jar', compiler_jar_path]
for path in source_paths:
args += ['--js', path]
# Add compiler flags, if any.
if compiler_flags:
args += compiler_flags
return args
def Compile(compiler_jar_path, source_paths,
jvm_flags=None,
compiler_flags=None):
"""Prepares command-line call to Closure Compiler.
Args:
compiler_jar_path: Path to the Closure compiler .jar file.
source_paths: Source paths to build, in order.
jvm_flags: A list of additional flags to pass on to JVM.
compiler_flags: A list of additional flags to pass on to Closure Compiler.
Returns:
The compiled source, as a string, or None if compilation failed.
"""
java_version = _ParseJavaVersion(_GetJavaVersionString())
args = _GetJsCompilerArgs(
compiler_jar_path, java_version, source_paths, jvm_flags, compiler_flags)
logging.info('Compiling with the following command: %s', ' '.join(args))
try:
return subprocess.check_output(args)
except subprocess.CalledProcessError:
raise JsCompilerError('JavaScript compilation failed.')
|
gregrperkins/closure-library
|
closure/bin/build/jscompiler.py
|
Python
|
apache-2.0
| 3,423
|
[
"VisIt"
] |
9d65f44fc60b95360379800a64c1bb69d65568b5e2393ab68d184915ae4c3c9e
|
import logging
from copy import deepcopy
from . import ModelChecker
from indra.statements import *
from indra.ontology.bio import bio_ontology
from .model_checker import signed_edges_to_signed_nodes
logger = logging.getLogger(__name__)
class PybelModelChecker(ModelChecker):
"""Check a PyBEL model against a set of INDRA statements.
Parameters
----------
model : pybel.BELGraph
A Pybel model to check.
statements : Optional[list[indra.statements.Statement]]
A list of INDRA Statements to check the model against.
do_sampling : bool
Whether to use breadth-first search or weighted sampling to
generate paths. Default is False (breadth-first search).
seed : int
Random seed for sampling (optional, default is None).
nodes_to_agents : dict
A dictionary mapping nodes of intermediate signed edges graph to INDRA
agents.
Attributes
----------
graph : nx.Digraph
A DiGraph with signed nodes to find paths in.
"""
def __init__(self, model, statements=None, do_sampling=False, seed=None,
nodes_to_agents=None):
super().__init__(model, statements, do_sampling, seed, nodes_to_agents)
def get_graph(self, include_variants=False, symmetric_variant_links=False,
include_components=True, symmetric_component_links=True):
"""Convert a PyBELGraph to a graph with signed nodes."""
# This import is done here rather than at the top level to avoid
# making pybel an implicit dependency of the model checker
from indra.assemblers.pybel.assembler import belgraph_to_signed_graph
if self.graph:
return self.graph
signed_edges = belgraph_to_signed_graph(
self.model,
include_variants=include_variants,
symmetric_variant_links=symmetric_variant_links,
include_components=include_components,
symmetric_component_links=symmetric_component_links,
propagate_annotations=True)
self.graph = signed_edges_to_signed_nodes(
signed_edges, copy_edge_data={'belief'})
self.get_nodes_to_agents()
return self.graph
def process_statement(self, stmt):
# Check if this is one of the statement types that we can check
if not isinstance(stmt, (Modification, RegulateAmount,
RegulateActivity, Influence)):
logger.info('Statement type %s not handled' %
stmt.__class__.__name__)
return (None, None, 'STATEMENT_TYPE_NOT_HANDLED')
subj, obj = stmt.agent_list()
if obj is None:
# Cannot check modifications for statements without object
if isinstance(stmt, Modification):
return (None, None, 'STATEMENT_TYPE_NOT_HANDLED')
obj_nodes = [None]
else:
# Get the polarity for the statement
if isinstance(stmt, Modification):
target_polarity = 1 if isinstance(stmt, RemoveModification) \
else 0
obj_agent = deepcopy(obj)
obj_agent.mods.append(stmt._get_mod_condition())
obj = obj_agent
elif isinstance(stmt, RegulateActivity):
target_polarity = 0 if stmt.is_activation else 1
obj_agent = deepcopy(obj)
obj_agent.activity = stmt._get_activity_condition()
obj_agent.activity.is_active = True
obj = obj_agent
elif isinstance(stmt, RegulateAmount):
target_polarity = 1 if isinstance(stmt, DecreaseAmount) else 0
elif isinstance(stmt, Influence):
target_polarity = 1 if stmt.overall_polarity() == -1 else 0
obj_nodes = self.get_nodes(obj, self.graph, target_polarity)
# Statement has object but it's not in the graph
if not obj_nodes:
return (None, None, 'OBJECT_NOT_FOUND')
return ([subj], obj_nodes, None)
def process_subject(self, subj):
# We will not get here if subject is None
subj_nodes = self.get_nodes(subj, self.graph, 0)
# Statement has subject but it's not in the graph
if not subj_nodes:
return (None, 'SUBJECT_NOT_FOUND')
return subj_nodes, None
def get_nodes(self, agent, graph, target_polarity):
# This import is done here rather than at the top level to avoid
# making pybel an implicit dependency of the model checker
from indra.assemblers.pybel.assembler import _get_agent_node
nodes = set()
# First get exact match
agent_node = _get_agent_node(agent)[0]
if agent_node:
node = (agent_node, target_polarity)
if node in graph.nodes:
nodes.add(node)
# Try get refined versions
for n, ag in self.nodes_to_agents.items():
if ag is not None and ag.refinement_of(agent, bio_ontology):
node = (n, target_polarity)
if node in graph.nodes:
nodes.add(node)
return nodes
def get_nodes_to_agents(self):
"""Return a dictionary mapping PyBEL nodes to INDRA agents."""
if self.nodes_to_agents:
return self.nodes_to_agents
# This import is done here rather than at the top level to avoid
# making pybel an implicit dependency of the model checker
from indra.sources.bel.processor import get_agent
self.nodes_to_agents = {
node: get_agent(node) for node in self.model.nodes}
return self.nodes_to_agents
|
johnbachman/belpy
|
indra/explanation/model_checker/pybel.py
|
Python
|
mit
| 5,708
|
[
"Pybel"
] |
3291310a8a640b10ed09ca37aa1652bf62f93dcc886a44f69fbc9d554c7559e4
|
from setuptools import setup
from os import path
# Get the long description from the README file
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="CAI",
packages=["CAI"],
version="1.0.3",
description="Python implementation of codon adaptation index",
long_description=long_description,
author="Benjamin Lee",
author_email="benjamin_lee@college.harvard.edu",
url="https://github.com/Benjamin-Lee/CodonAdaptationIndex", # use the URL to the github repo
classifiers=[
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Programming Language :: Python",
],
install_requires=["scipy", "biopython", "click>=7"],
tests_require=["pytest"],
setup_requires=["pytest-runner"],
license="MIT",
use_2to3=True,
python_requires=">=3.4",
entry_points={"console_scripts": ["CAI=CAI.cli:cli"]},
)
|
Benjamin-Lee/CodonAdaptationIndex
|
setup.py
|
Python
|
mit
| 1,021
|
[
"Biopython"
] |
0e420a802d78172fab1982902fbccb5d48af6f6d3e3f466c49620bffb8e50da4
|
#!/usr/bin/env python3
#
# Copyright (c) 2017 Weitian LI <liweitianux@live.com>
# MIT license
"""
Calculate the coordinate of the emission centroid within the image.
The image are smoothed first, and then an iterative procedure with
two phases is applied to determine the emission centroid.
"""
import os
import sys
import argparse
import subprocess
from _context import acispy
from acispy.manifest import get_manifest
from acispy.ciao import setup_pfiles
from acispy.ds9 import ds9_view
from acispy.region import Regions
def smooth_image(infile, outfile=None,
kernelspec="lib:gaus(2,5,1,10,10)", method="fft",
clobber=False):
"""
Smooth the image by a Gaussian kernel using the ``aconvolve`` tool.
Parameters
----------
infile : str
Path to the input image file
outfile : str, optional
Filename/path of the output smoothed image
(default: build in format ``<infile_basename>_aconv.fits``)
kernelspec : str, optional
Kernel specification for ``aconvolve``
method : str, optional
Smooth method for ``aconvolve``
Returns
-------
outfile : str
Filename/path of the smoothed image
"""
clobber = "yes" if clobber else "no"
if outfile is None:
outfile = os.path.splitext(infile)[0] + "_aconv.fits"
subprocess.check_call(["punlearn", "aconvolve"])
subprocess.check_call([
"aconvolve", "infile=%s" % infile, "outfile=%s" % outfile,
"kernelspec=%s" % kernelspec, "method=%s" % method,
"clobber=%s" % clobber
])
return outfile
def get_peak(image):
"""
Get the peak coordinate on the image.
Returns
-------
peak : 2-float tuple
(Physical) coordinate of the peak.
"""
subprocess.check_call(["punlearn", "dmstat"])
subprocess.check_call([
"dmstat", "infile=%s" % image,
"centroid=no", "media=no", "sigma=no", "clip=no", "verbose=0"
])
peak = subprocess.check_output([
"pget", "dmstat", "out_max_loc"
]).decode("utf-8").strip()
peak = peak.split(",")
return (float(peak[0]), float(peak[1]))
def get_centroid(image, center, radius=50):
"""
Calculate the centroid on image within the specified circle.
Parameters
----------
image : str
Path to the image file.
center : 2-float tuple
Central (physical) coordinate of the circle.
radius : float
Radius (pixel) of the circle.
Returns
-------
centroid : 2-float tuple
(Physical) coordinate of the centroid.
"""
x, y = center
region = "circle(%f,%f,%f)" % (x, y, radius)
subprocess.check_call(["punlearn", "dmstat"])
subprocess.check_call([
"dmstat", "infile=%s[sky=%s]" % (image, region),
"centroid=yes", "media=no", "sigma=no", "clip=no", "verbose=0"
])
centroid = subprocess.check_output([
"pget", "dmstat", "out_cntrd_phys"
]).decode("utf-8").strip()
centroid = centroid.split(",")
return (float(centroid[0]), float(centroid[1]))
def main():
parser = argparse.ArgumentParser(
description="Calculate the emission centroid within the image")
parser.add_argument("-i", "--infile", dest="infile", required=True,
help="input image file (e.g., 0.7-2.0 keV)")
parser.add_argument("-o", "--outfile", dest="outfile",
default="centroid.reg",
help="output centroid region file " +
"(default: centroid.reg")
parser.add_argument("-R", "--radius1", dest="radius1",
type=float, default=100,
help="circle radius [pixel] for first phase " +
"centroid calculation (default: 100 pixel)")
parser.add_argument("-r", "--radius2", dest="radius2",
type=float, default=50,
help="circle radius [pixel] for second phase " +
"calculation to tune centroid (default: 50 pixel)")
parser.add_argument("-n", "--niter", dest="niter",
type=int, default=10,
help="iterations for each phase (default: 10)")
parser.add_argument("-s", "--start", dest="start",
help="a region file containing a circle/point " +
"that specifies the starting point " +
"(default: using the peak of the image)")
parser.add_argument("-V", "--view", dest="view", action="store_true",
help="open DS9 to view output centroid")
parser.add_argument("-C", "--clobber", dest="clobber", action="store_true",
help="overwrite existing files")
args = parser.parse_args()
setup_pfiles(["aconvolve", "dmstat"])
print("Smooth input image using 'aconvolve' ...", file=sys.stderr)
img_smoothed = smooth_image(args.infile, clobber=args.clobber)
if args.start:
print("Get starting point from region file: %s" % args.start,
file=sys.stderr)
region = Regions(args.start).regions[0]
center = (region.xc, region.yc)
else:
print("Use peak as the starting point ...", file=sys.stderr)
center = get_peak(img_smoothed)
print("Starting point: (%f, %f)" % center, file=sys.stderr)
centroid = center
for phase, radius in enumerate([args.radius1, args.radius2]):
print("Calculate centroid phase %d (circle radius: %.1f)" %
(phase+1, radius), file=sys.stderr)
for i in range(args.niter):
print("%d..." % (i+1), end="", flush=True, file=sys.stderr)
centroid = get_centroid(img_smoothed, center=centroid,
radius=radius)
print("Done!", file=sys.stderr)
with open(args.outfile, "w") as f:
f.write("point(%f,%f)\n" % centroid)
print("Saved centroid to file:", args.outfile, file=sys.stderr)
if args.view:
ds9_view(img_smoothed, regfile=args.outfile)
# Add calculated centroid region to manifest
manifest = get_manifest()
key = "reg_centroid"
manifest.setpath(key, args.outfile)
print("Added item '%s' to manifest: %s" % (key, manifest.get(key)),
file=sys.stderr)
if __name__ == "__main__":
main()
|
liweitianux/chandra-acis-analysis
|
bin/calc_centroid.py
|
Python
|
mit
| 6,366
|
[
"Gaussian"
] |
02973f0ae82d8de959d293f014241a5aece976c0555303bbe1951335f47db555
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import mmap
import os
import re
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
parser.add_argument("-e", "--skip-exceptions", help="ignore hack/verify-flags/exceptions.txt and print all output", action="store_true")
args = parser.parse_args()
# Cargo culted from http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python
def is_binary(pathname):
"""Return true if the given filename is binary.
@raise EnvironmentError: if the file does not exist or cannot be accessed.
@attention: found @ http://bytes.com/topic/python/answers/21222-determine-file-type-binary-text on 6/08/2010
@author: Trent Mick <TrentM@ActiveState.com>
@author: Jorge Orpinel <jorge@orpinel.com>"""
try:
with open(pathname, 'r') as f:
CHUNKSIZE = 1024
while 1:
chunk = f.read(CHUNKSIZE)
if '\0' in chunk: # found null byte
return True
if len(chunk) < CHUNKSIZE:
break # done
except:
return True
return False
def get_all_files(rootdir):
all_files = []
for root, dirs, files in os.walk(rootdir):
# don't visit certain dirs
if 'Godeps' in dirs:
dirs.remove('Godeps')
if 'third_party' in dirs:
dirs.remove('third_party')
if '.git' in dirs:
dirs.remove('.git')
if 'exceptions.txt' in files:
files.remove('exceptions.txt')
if 'known-flags.txt' in files:
files.remove('known-flags.txt')
if 'vendor' in dirs:
dirs.remove('vendor')
for name in files:
if name.endswith(".svg"):
continue
if name.endswith(".gliffy"):
continue
pathname = os.path.join(root, name)
if is_binary(pathname):
continue
all_files.append(pathname)
return all_files
def normalize_files(rootdir, files):
newfiles = []
a = ['Godeps', 'vendor', 'third_party', 'exceptions.txt', 'known-flags.txt']
for f in files:
if any(x in f for x in a):
continue
if f.endswith(".svg"):
continue
if f.endswith(".gliffy"):
continue
newfiles.append(f)
for i, f in enumerate(newfiles):
if not os.path.isabs(f):
newfiles[i] = os.path.join(rootdir, f)
return newfiles
def line_has_bad_flag(line, flagre):
results = flagre.findall(line)
for result in results:
if not "_" in result:
return False
# this should exclude many cases where jinja2 templates use kube flags
# as variables, except it uses _ for the variable name
if "{% set" + result + "= \"" in line:
return False
if "pillar[" + result + "]" in line:
return False
if "grains" + result in line:
return False
# These are usually yaml definitions
if result.endswith(":"):
return False
# something common in juju variables...
if "template_data[" + result + "]" in line:
return False
return True
return False
# The list of files might not be the whole repo. If someone only changed a
# couple of files we don't want to run all of the golang files looking for
# flags. Instead load the list of flags from hack/verify-flags/known-flags.txt
# If running the golang files finds a new flag not in that file, return an
# error and tell the user to add the flag to the flag list.
def get_flags(rootdir, files):
# preload the 'known' flags
pathname = os.path.join(rootdir, "hack/verify-flags/known-flags.txt")
f = open(pathname, 'r')
flags = set(f.read().splitlines())
f.close()
# preload the 'known' flags which don't follow the - standard
pathname = os.path.join(rootdir, "hack/verify-flags/excluded-flags.txt")
f = open(pathname, 'r')
excluded_flags = set(f.read().splitlines())
f.close()
regexs = [ re.compile('Var[P]?\([^,]*, "([^"]*)"'),
re.compile('.String[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Int[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Bool[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Duration[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.StringSlice[P]?\("([^"]*)",[^,]+,[^)]+\)') ]
new_flags = set()
new_excluded_flags = set()
# walk all the files looking for any flags being declared
for pathname in files:
if not pathname.endswith(".go"):
continue
f = open(pathname, 'r')
data = f.read()
f.close()
matches = []
for regex in regexs:
matches = matches + regex.findall(data)
for flag in matches:
if any(x in flag for x in excluded_flags):
continue
if "_" in flag:
new_excluded_flags.add(flag)
if not "-" in flag:
continue
if flag not in flags:
new_flags.add(flag)
if len(new_excluded_flags) != 0:
print("Found a flag declared with an _ but which is not explicitly listed as a valid flag name in hack/verify-flags/excluded-flags.txt")
print("Are you certain this flag should not have been declared with an - instead?")
l = list(new_excluded_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
if len(new_flags) != 0:
print("Found flags in golang files not in the list of known flags. Please add these to hack/verify-flags/known-flags.txt")
l = list(new_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
return list(flags)
def flags_to_re(flags):
"""turn the list of all flags we found into a regex find both - and _ versions"""
dashRE = re.compile('[-_]')
flagREs = []
for flag in flags:
# turn all flag names into regexs which will find both types
newre = dashRE.sub('[-_]', flag)
# only match if there is not a leading or trailing alphanumeric character
flagREs.append("[^\w${]" + newre + "[^\w]")
# turn that list of regex strings into a single large RE
flagRE = "|".join(flagREs)
flagRE = re.compile(flagRE)
return flagRE
def load_exceptions(rootdir):
exceptions = set()
if args.skip_exceptions:
return exceptions
exception_filename = os.path.join(rootdir, "hack/verify-flags/exceptions.txt")
exception_file = open(exception_filename, 'r')
for exception in exception_file.read().splitlines():
out = exception.split(":", 1)
if len(out) != 2:
printf("Invalid line in exceptions file: %s" % exception)
continue
filename = out[0]
line = out[1]
exceptions.add((filename, line))
return exceptions
def main():
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
exceptions = load_exceptions(rootdir)
if len(args.filenames) > 0:
files = args.filenames
else:
files = get_all_files(rootdir)
files = normalize_files(rootdir, files)
flags = get_flags(rootdir, files)
flagRE = flags_to_re(flags)
bad_lines = []
# walk all the file looking for any flag that was declared and now has an _
for pathname in files:
relname = os.path.relpath(pathname, rootdir)
f = open(pathname, 'r')
for line in f.read().splitlines():
if line_has_bad_flag(line, flagRE):
if (relname, line) not in exceptions:
bad_lines.append((relname, line))
f.close()
if len(bad_lines) != 0:
if not args.skip_exceptions:
print("Found illegal 'flag' usage. If these are false positives you should run `hack/verify-flags-underscore.py -e > hack/verify-flags/exceptions.txt` to update the list.")
bad_lines.sort()
for (relname, line) in bad_lines:
print("%s:%s" % (relname, line))
return 1
if __name__ == "__main__":
sys.exit(main())
|
caesarxuchao/contrib
|
hack/verify-flags-underscore.py
|
Python
|
apache-2.0
| 8,944
|
[
"VisIt"
] |
67660fd7cae6cf1de9af6c069e8cdfa07f922b7c8ab0c5cf61463dfdbe2957af
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
""" Module for translating ONNX operators into Mxnet operatoes"""
# pylint: disable=unused-argument,protected-access
import numpy as np
from . import _translation_utils as translation_utils
from .... import symbol
# Method definitions for the callable objects mapped in the import_helper module
def identity(attrs, inputs, proto_obj):
"""Returns the identity function of the the input."""
return 'identity', attrs, inputs
def random_uniform(attrs, inputs, proto_obj):
"""Draw random samples from a uniform distribtuion."""
new_attr = translation_utils._remove_attributes(attrs, ['seed'])
return 'random_uniform', new_attr, inputs
def random_normal(attrs, inputs, proto_obj):
"""Draw random samples from a Gaussian distribution."""
new_attr = translation_utils._remove_attributes(attrs, ['seed'])
new_attr = translation_utils._fix_attribute_names(new_attr, {'mean' : 'loc'})
return 'random_uniform', new_attr, inputs
# Arithmetic Operations
def add(attrs, inputs, proto_obj):
"""Adding two tensors"""
new_attr = {}
if 'broadcast' in attrs and attrs['broadcast'] == 1:
broadcast_axis = attrs['axis']
op_value = translation_utils._fix_broadcast('broadcast_add', inputs,
broadcast_axis, proto_obj)
return op_value, new_attr, inputs
return 'broadcast_add', new_attr, inputs
def subtract(attrs, inputs, proto_obj):
"""Subtracting two tensors"""
new_attr = {}
if 'broadcast' in attrs and attrs['broadcast'] == 1:
broadcast_axis = attrs['axis']
op_value = translation_utils._fix_broadcast('broadcast_sub', inputs,
broadcast_axis, proto_obj)
return op_value, new_attr, inputs
return 'broadcast_sub', new_attr, inputs
def multiply(attrs, inputs, proto_obj):
"""Multiply two tensors"""
new_attr = {}
if 'broadcast' in attrs and attrs['broadcast'] == 1:
broadcast_axis = attrs['axis']
op_value = translation_utils._fix_broadcast('broadcast_mul', inputs,
broadcast_axis, proto_obj)
return op_value, new_attr, inputs
return 'broadcast_mul', new_attr, inputs
def divide(attrs, inputs, proto_obj):
"""Divide two tensors"""
new_attr = {}
if 'broadcast' in attrs and attrs['broadcast'] == 1:
broadcast_axis = attrs['axis']
op_value = translation_utils._fix_broadcast('broadcast_div', inputs,
broadcast_axis, proto_obj)
return op_value, new_attr, inputs
return 'broadcast_div', new_attr, inputs
def logical_and(attrs, inputs, proto_obj):
"""Logical and of two input arrays."""
return 'broadcast_logical_and', attrs, inputs
def logical_or(attrs, inputs, proto_obj):
"""Logical or of two input arrays."""
return 'broadcast_logical_or', attrs, inputs
def logical_xor(attrs, inputs, proto_obj):
"""Logical xor of two input arrays."""
return 'broadcast_logical_xor', attrs, inputs
def logical_not(attrs, inputs, proto_obj):
"""Logical not of two input arrays."""
return 'logical_not', attrs, inputs
def absolute(attrs, inputs, proto_obj):
"""Returns element-wise absolute value of the input."""
return 'abs', attrs, inputs
def negative(attrs, inputs, proto_obj):
"""Negation of every element in a tensor"""
return 'negative', attrs, inputs
def add_n(attrs, inputs, proto_obj):
"""Elementwise sum of arrays"""
return 'add_n', attrs, inputs
# Sorting and Searching
def argmax(attrs, inputs, proto_obj):
"""Returns indices of the maximum values along an axis"""
return 'argmax', attrs, inputs
def argmin(attrs, inputs, proto_obj):
"""Returns indices of the minimum values along an axis."""
return 'argmin', attrs, inputs
def maximum(attrs, inputs, proto_obj):
"""
Elementwise maximum of arrays.
MXNet maximum compares only two symbols at a time.
ONNX can send more than two to compare.
Breaking into multiple mxnet ops to compare two symbols at a time
"""
if len(inputs) > 1:
mxnet_op = symbol.maximum(inputs[0], inputs[1])
for op_input in inputs[2:]:
mxnet_op = symbol.maximum(mxnet_op, op_input)
else:
mxnet_op = symbol.maximum(inputs[0], inputs[0])
return mxnet_op, attrs, inputs
def minimum(attrs, inputs, proto_obj):
"""Elementwise minimum of arrays."""
# MXNet minimum compares only two symbols at a time.
# ONNX can send more than two to compare.
# Breaking into multiple mxnet ops to compare two symbols at a time
if len(inputs) > 1:
mxnet_op = symbol.minimum(inputs[0], inputs[1])
for op_input in inputs[2:]:
mxnet_op = symbol.minimum(mxnet_op, op_input)
else:
mxnet_op = symbol.minimum(inputs[0], inputs[0])
return mxnet_op, attrs, inputs
def lesser(attrs, inputs, proto_obj):
"""Logical Lesser operator with broadcasting."""
return 'broadcast_lesser', attrs, inputs
def greater(attrs, inputs, proto_obj):
"""Logical Greater operator with broadcasting."""
return 'broadcast_greater', attrs, inputs
def equal(attrs, inputs, proto_obj):
"""Logical Equal operator with broadcasting."""
return 'broadcast_equal', attrs, inputs
#Hyperbolic functions
def tanh(attrs, inputs, proto_obj):
"""Returns the hyperbolic tangent of the input array."""
return 'tanh', attrs, inputs
# Rounding
def ceil(attrs, inputs, proto_obj):
""" Calculate ceil value for input """
return 'ceil', attrs, inputs
def floor(attrs, inputs, proto_obj):
""" Calculate floor value for input """
return 'floor', attrs, inputs
# Joining and spliting
def concat(attrs, inputs, proto_obj):
""" Joins input arrays along a given axis. """
new_attrs = translation_utils._fix_attribute_names(attrs, {'axis': 'dim'})
return 'concat', new_attrs, inputs
# Basic neural network functions
def softsign(attrs, inputs, proto_obj):
"""Computes softsign of x element-wise."""
return 'softsign', attrs, inputs
def sigmoid(attrs, inputs, proto_obj):
"""Computes elementwise sigmoid of the input array"""
return 'sigmoid', attrs, inputs
def relu(attrs, inputs, proto_obj):
"""Computes rectified linear function."""
return 'relu', attrs, inputs
def pad(attrs, inputs, proto_obj):
""" Add padding to input tensor"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'pads' : 'pad_width',
'value' : 'constant_value'
})
new_attrs['pad_width'] = translation_utils._pad_sequence_fix(new_attrs.get('pad_width'))
return 'pad', new_attrs, inputs
def matrix_multiplication(attrs, inputs, proto_obj):
"""Performs general matrix multiplication"""
return 'linalg_gemm2', attrs, inputs
def batch_norm(attrs, inputs, proto_obj):
"""Batch normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon': 'eps',
'is_test': 'fix_gamma'})
new_attrs = translation_utils._remove_attributes(new_attrs,
['spatial', 'consumed_inputs'])
# Disable cuDNN BN only if epsilon from model is < than minimum cuDNN eps (1e-5)
cudnn_min_eps = 1e-5
cudnn_off = 0 if attrs.get('epsilon', cudnn_min_eps) >= cudnn_min_eps else 1
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'cudnn_off': cudnn_off})
# in test mode "fix_gamma" should be unset.
new_attrs['fix_gamma'] = not attrs.get('is_test', 1)
return 'BatchNorm', new_attrs, inputs
def instance_norm(attrs, inputs, proto_obj):
"""Instance Normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon' : 'eps'})
return 'InstanceNorm', new_attrs, inputs
def leaky_relu(attrs, inputs, proto_obj):
"""Leaky Relu function"""
if 'alpha' in attrs:
new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'})
else:
new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 0.01})
return 'LeakyReLU', new_attrs, inputs
def _elu(attrs, inputs, proto_obj):
"""Elu function"""
if 'alpha' in attrs:
new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'})
else:
new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 1.0})
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'act_type': 'elu'})
return 'LeakyReLU', new_attrs, inputs
def _prelu(attrs, inputs, proto_obj):
"""PRelu function"""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'prelu'})
return 'LeakyReLU', new_attrs, inputs
def softmax(attrs, inputs, proto_obj):
"""Softmax function."""
if 'axis' not in attrs:
attrs = translation_utils._add_extra_attributes(attrs, {'axis': 1})
return 'softmax', attrs, inputs
def log_softmax(attrs, inputs, proto_obj):
"""Computes the log softmax of the input. This is equivalent to
computing softmax followed by log."""
return 'log_softmax', attrs, inputs
def softplus(attrs, inputs, proto_obj):
"""Applies the sofplus activation function element-wise to the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type' : 'softrelu'})
return 'Activation', new_attrs, inputs
def conv(attrs, inputs, proto_obj):
"""Compute N-D convolution on (N+2)-D input."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel',
'strides' : 'stride',
'pads': 'pad',
'dilations': 'dilate',
'group': 'num_group'})
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'num_group' : 1})
new_attrs = translation_utils._fix_bias('Convolution', new_attrs, len(inputs))
new_attrs = translation_utils._fix_channels('Convolution', new_attrs, inputs, proto_obj)
kernel = new_attrs['kernel']
stride = new_attrs['stride'] if 'stride' in new_attrs else []
padding = new_attrs['pad'] if 'pad' in new_attrs else []
dilations = new_attrs['dilate'] if 'dilate' in new_attrs else []
num_filter = new_attrs['num_filter']
num_group = new_attrs['num_group']
no_bias = new_attrs['no_bias'] if 'no_bias' in new_attrs else 0
bias = None if no_bias is True else inputs[2]
# Unlike ONNX, MXNet's convolution operator does not support asymmetric padding, so we first
# use 'Pad' operator, which supports asymmetric padding. Then use the convolution operator.
pad_width = (0, 0, 0, 0) + translation_utils._pad_sequence_fix(padding, kernel_dim=len(kernel))
pad_op = symbol.pad(inputs[0], mode='constant', pad_width=pad_width)
conv_op = symbol.Convolution(pad_op, inputs[1], bias,
kernel=kernel, stride=stride, dilate=dilations,
num_filter=num_filter, num_group=num_group, no_bias=no_bias)
return conv_op, new_attrs, inputs
def deconv(attrs, inputs, proto_obj):
"""Computes transposed convolution of the input tensor."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel',
'strides' : 'stride',
'pads': 'pad',
'dilations': 'dilate',
'group': 'num_group'})
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'num_group' : 1})
new_attrs = translation_utils._fix_bias('Deconvolution', new_attrs, len(inputs))
new_attrs = translation_utils._fix_channels('Deconvolution', new_attrs, inputs, proto_obj)
kernel = new_attrs['kernel']
stride = new_attrs['stride'] if 'stride' in new_attrs else []
padding = new_attrs['pad'] if 'pad' in new_attrs else []
dilations = new_attrs['dilate'] if 'dilate' in new_attrs else []
num_filter = new_attrs['num_filter']
num_group = new_attrs['num_group']
no_bias = new_attrs['no_bias'] if 'no_bias' in new_attrs else False
bias = None if no_bias is True else inputs[2]
# Unlike ONNX, MXNet's deconvolution operator does not support asymmetric padding, so we first
# use 'Pad' operator, which supports asymmetric padding. Then use the deconvolution operator.
pad_width = (0, 0, 0, 0) + translation_utils._pad_sequence_fix(padding, kernel_dim=len(kernel))
pad_op = symbol.pad(inputs[0], mode='constant', pad_width=pad_width)
deconv_op = symbol.Deconvolution(pad_op, inputs[1], bias,
kernel=kernel, stride=stride, dilate=dilations,
num_filter=num_filter, num_group=num_group, no_bias=no_bias)
return deconv_op, new_attrs, inputs
def fully_connected(attrs, inputs, proto_obj):
"""Applies a linear transformation: Y=XWT+b."""
new_attrs = translation_utils._remove_attributes(attrs, ['axis'])
new_attrs = translation_utils._fix_bias('FullyConnected', new_attrs, len(inputs))
new_attrs = translation_utils._fix_channels('FullyConnected', new_attrs, inputs, proto_obj)
return 'FullyConnected', new_attrs, inputs
def global_maxpooling(attrs, inputs, proto_obj):
"""Performs max pooling on the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True,
'kernel': (1, 1),
'pool_type': 'max'})
return 'Pooling', new_attrs, inputs
def global_avgpooling(attrs, inputs, proto_obj):
"""Performs avg pooling on the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True,
'kernel': (1, 1),
'pool_type': 'avg'})
return 'Pooling', new_attrs, inputs
def linalg_gemm(attrs, inputs, proto_obj):
"""Performs general matrix multiplication and accumulation"""
trans_a = 0
trans_b = 0
alpha = 1
beta = 1
if 'transA' in attrs:
trans_a = attrs['transA']
if 'transB' in attrs:
trans_b = attrs['transB']
if 'alpha' in attrs:
alpha = attrs['alpha']
if 'beta' in attrs:
beta = attrs['beta']
flatten_a = symbol.flatten(inputs[0])
matmul_op = symbol.linalg_gemm2(A=flatten_a, B=inputs[1],
transpose_a=trans_a, transpose_b=trans_b,
alpha=alpha)
gemm_op = symbol.broadcast_add(matmul_op, beta*inputs[2])
new_attrs = translation_utils._fix_attribute_names(attrs, {'transA': 'transpose_a',
'transB': 'transpose_b'})
new_attrs = translation_utils._remove_attributes(new_attrs, ['broadcast'])
return gemm_op, new_attrs, inputs
def local_response_norm(attrs, inputs, proto_obj):
"""Local Response Normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'bias': 'knorm',
'size' : 'nsize'})
return 'LRN', new_attrs, inputs
def dropout(attrs, inputs, proto_obj):
"""Dropout Regularization."""
mode = 'training'
if 'is_test' in attrs and attrs['is_test'] == 0:
mode = 'always'
new_attrs = translation_utils._fix_attribute_names(attrs,
{'ratio': 'p'})
new_attrs = translation_utils._remove_attributes(new_attrs, ['is_test'])
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'mode': mode})
return 'Dropout', new_attrs, inputs
# Changing shape and type.
def reshape(attrs, inputs, proto_obj):
"""Reshape the given array by the shape attribute."""
if len(inputs) == 1:
return 'reshape', attrs, inputs[0]
reshape_shape = list(proto_obj._params[inputs[1].name].asnumpy())
reshape_shape = [int(i) for i in reshape_shape]
new_attrs = {'shape': reshape_shape}
return 'reshape', new_attrs, inputs[:1]
def cast(attrs, inputs, proto_obj):
""" Cast input to a given dtype"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'to' : 'dtype'})
new_attrs['dtype'] = new_attrs['dtype'].lower()
return 'cast', new_attrs, inputs
def split(attrs, inputs, proto_obj):
"""Splits an array along a particular axis into multiple sub-arrays."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'split' : 'num_outputs'})
return 'split', new_attrs, inputs
def _slice(attrs, inputs, proto_obj):
"""Returns a slice of the input tensor along multiple axes."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis',
'ends' : 'end',
'starts' : 'begin'})
# onnx slice provides slicing on multiple axis. Adding multiple slice_axis operator
# for multiple axes from mxnet
begin = new_attrs.get('begin')
end = new_attrs.get('end')
axes = new_attrs.get('axis', tuple(range(len(begin))))
slice_op = symbol.slice_axis(inputs[0], axis=axes[0], begin=begin[0], end=end[0])
if len(axes) > 1:
for i, axis in enumerate(axes):
slice_op = symbol.slice_axis(slice_op, axis=axis, begin=begin[i], end=end[i])
return slice_op, new_attrs, inputs
def transpose(attrs, inputs, proto_obj):
"""Transpose the input array."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'perm' : 'axes'})
return 'transpose', new_attrs, inputs
def squeeze(attrs, inputs, proto_obj):
"""Remove single-dimensional entries from the shape of a tensor."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis'})
return 'squeeze', new_attrs, inputs
def unsqueeze(attrs, inputs, cls):
"""Inserts a new axis of size 1 into the array shape"""
# MXNet can only add one axis at a time.
mxnet_op = inputs[0]
for axis in attrs["axes"]:
mxnet_op = symbol.expand_dims(mxnet_op, axis=axis)
return mxnet_op, attrs, inputs
def flatten(attrs, inputs, proto_obj):
"""Flattens the input array into a 2-D array by collapsing the higher dimensions."""
#Mxnet does not have axis support. By default uses axis=1
if 'axis' in attrs and attrs['axis'] != 1:
raise RuntimeError("Flatten operator only supports axis=1")
new_attrs = translation_utils._remove_attributes(attrs, ['axis'])
return 'Flatten', new_attrs, inputs
def clip(attrs, inputs, proto_obj):
"""Clips (limits) the values in an array."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'min' : 'a_min',
'max' : 'a_max'})
if 'a_max' not in new_attrs:
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_max' : np.inf})
if 'a_min' not in new_attrs:
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_min' : -np.inf})
return 'clip', new_attrs, inputs
#Powers
def reciprocal(attrs, inputs, proto_obj):
"""Returns the reciprocal of the argument, element-wise."""
return 'reciprocal', attrs, inputs
def squareroot(attrs, inputs, proto_obj):
"""Returns element-wise square-root value of the input."""
return 'sqrt', attrs, inputs
def power(attrs, inputs, proto_obj):
"""Returns element-wise result of base element raised to powers from exp element."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'exponent':'exp'})
if 'broadcast' in attrs and attrs['broadcast'] == 1:
new_attrs = translation_utils._remove_attributes(new_attrs, ['broadcast'])
return 'broadcast_power', new_attrs, inputs
return 'pow', new_attrs, inputs
def exponent(attrs, inputs, proto_obj):
"""Elementwise exponent of input array."""
return 'exp', attrs, inputs
def _log(attrs, inputs, proto_obj):
"""Elementwise log of input array."""
return 'log', attrs, inputs
# Reduce Functions
def reduce_max(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by maximum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'max', new_attrs, inputs
def reduce_mean(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by mean value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'mean', new_attrs, inputs
def reduce_min(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by minimum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'min', new_attrs, inputs
def reduce_sum(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by sum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'sum', new_attrs, inputs
def reduce_prod(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by product value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'prod', new_attrs, inputs
def reduce_log_sum(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by log sum value"""
keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims')
sum_op = symbol.sum(inputs[0], axis=attrs.get('axes'),
keepdims=keep_dims)
log_sym = symbol.log(sum_op)
return log_sym, attrs, inputs
def reduce_log_sum_exp(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by log sum exp value"""
keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims')
exp_op = symbol.exp(inputs[0])
sum_op = symbol.sum(exp_op, axis=attrs.get('axes'),
keepdims=keep_dims)
log_sym = symbol.log(sum_op)
return log_sym, attrs, inputs
def reduce_sum_square(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by sum square value"""
square_op = symbol.square(inputs[0])
sum_op = symbol.sum(square_op, axis=attrs.get('axes'),
keepdims=attrs.get('keepdims'))
return sum_op, attrs, inputs
def reduce_l2(attrs, inputs, proto_obj):
"""Reduce input tensor by l2 normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'norm', new_attrs, inputs
def avg_pooling(attrs, inputs, proto_obj):
""" Average pooling"""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'kernel_shape': 'kernel',
'strides': 'stride',
'pads': 'pad',
})
new_attrs = translation_utils._add_extra_attributes(new_attrs,
{'pooling_convention': 'valid'
})
new_op = translation_utils._fix_pooling('avg', inputs, new_attrs)
return new_op, new_attrs, inputs
def max_pooling(attrs, inputs, proto_obj):
""" Average pooling"""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'kernel_shape': 'kernel',
'strides': 'stride',
'pads': 'pad',
})
new_attrs = translation_utils._add_extra_attributes(new_attrs,
{'pooling_convention': 'valid'
})
new_op = translation_utils._fix_pooling('max', inputs, new_attrs)
return new_op, new_attrs, inputs
def max_roi_pooling(attrs, inputs, proto_obj):
"""Max ROI Pooling."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'pooled_shape': 'pooled_size',
'spatial_scale': 'spatial_scale'
})
return 'ROIPooling', new_attrs, inputs
|
precedenceguo/mxnet
|
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
|
Python
|
apache-2.0
| 26,004
|
[
"Gaussian"
] |
11481e034a3f028234330fd0d8809c75e9192466348b4b3a721ff379956ec41b
|
"""
Tests for discussion pages
"""
import datetime
from uuid import uuid4
from flaky import flaky
from nose.plugins.attrib import attr
from nose.tools import nottest
from pytz import UTC
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.fixtures.discussion import (
Comment,
Response,
SearchResult,
SearchResultFixture,
SingleThreadViewFixture,
Thread,
UserProfileViewFixture
)
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.discussion import (
DiscussionSortPreferencePage,
DiscussionTabHomePage,
DiscussionTabSingleThreadPage,
DiscussionUserProfilePage,
InlineDiscussionPage
)
from common.test.acceptance.pages.lms.learner_profile import LearnerProfilePage
from common.test.acceptance.pages.lms.tab_nav import TabNavPage
from common.test.acceptance.tests.discussion.helpers import BaseDiscussionMixin, BaseDiscussionTestCase
from common.test.acceptance.tests.helpers import UniqueCourseTest, get_modal_alert, skip_if_browser
THREAD_CONTENT_WITH_LATEX = """Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
\n\n----------\n\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur. (b).\n\n
**(a)** $H_1(e^{j\\omega}) = \\sum_{n=-\\infty}^{\\infty}h_1[n]e^{-j\\omega n} =
\\sum_{n=-\\infty} ^{\\infty}h[n]e^{-j\\omega n}+\\delta_2e^{-j\\omega n_0}$
$= H(e^{j\\omega})+\\delta_2e^{-j\\omega n_0}=A_e (e^{j\\omega}) e^{-j\\omega n_0}
+\\delta_2e^{-j\\omega n_0}=e^{-j\\omega n_0} (A_e(e^{j\\omega})+\\delta_2)
$H_3(e^{j\\omega})=A_e(e^{j\\omega})+\\delta_2$. Dummy $A_e(e^{j\\omega})$ dummy post $.
$A_e(e^{j\\omega}) \\ge -\\delta_2$, it follows that $H_3(e^{j\\omega})$ is real and
$H_3(e^{j\\omega})\\ge 0$.\n\n**(b)** Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.\n\n
**Case 1:** If $re^{j\\theta}$ is a Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
\n\n**Case 3:** Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem $H_3(e^{j\\omega}) = P(cos\\omega)(cos\\omega - cos\\theta)^k$,
Lorem Lorem Lorem Lorem Lorem Lorem $P(cos\\omega)$ has no
$(cos\\omega - cos\\theta)$ factor.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
$P(cos\\theta) \\neq 0$. Since $P(cos\\omega)$ this is a dummy data post $\\omega$,
dummy $\\delta > 0$ such that for all $\\omega$ dummy $|\\omega - \\theta|
< \\delta$, $P(cos\\omega)$ Lorem ipsum dolor sit amet, consectetur adipiscing elit,
sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim
veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo
consequat. Duis aute irure dolor in reprehenderit in voluptate velit sse cillum dolore
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
"""
class DiscussionResponsePaginationTestMixin(BaseDiscussionMixin):
"""
A mixin containing tests for response pagination for use by both inline
discussion and the discussion tab
"""
def assert_response_display_correct(self, response_total, displayed_responses):
"""
Assert that various aspects of the display of responses are all correct:
* Text indicating total number of responses
* Presence of "Add a response" button
* Number of responses actually displayed
* Presence and text of indicator of how many responses are shown
* Presence and text of button to load more responses
"""
self.assertEqual(
self.thread_page.get_response_total_text(),
str(response_total) + " responses"
)
self.assertEqual(self.thread_page.has_add_response_button(), response_total != 0)
self.assertEqual(self.thread_page.get_num_displayed_responses(), displayed_responses)
self.assertEqual(
self.thread_page.get_shown_responses_text(),
(
None if response_total == 0 else
"Showing all responses" if response_total == displayed_responses else
"Showing first {} responses".format(displayed_responses)
)
)
self.assertEqual(
self.thread_page.get_load_responses_button_text(),
(
None if response_total == displayed_responses else
"Load all responses" if response_total - displayed_responses < 100 else
"Load next 100 responses"
)
)
def test_pagination_no_responses(self):
self.setup_thread(0)
self.assert_response_display_correct(0, 0)
def test_pagination_few_responses(self):
self.setup_thread(5)
self.assert_response_display_correct(5, 5)
def test_pagination_two_response_pages(self):
self.setup_thread(50)
self.assert_response_display_correct(50, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(50, 50)
def test_pagination_exactly_two_response_pages(self):
self.setup_thread(125)
self.assert_response_display_correct(125, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(125, 125)
def test_pagination_three_response_pages(self):
self.setup_thread(150)
self.assert_response_display_correct(150, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(150, 125)
self.thread_page.load_more_responses()
self.assert_response_display_correct(150, 150)
def test_add_response_button(self):
self.setup_thread(5)
self.assertTrue(self.thread_page.has_add_response_button())
self.thread_page.click_add_response_button()
def test_add_response_button_closed_thread(self):
self.setup_thread(5, closed=True)
self.assertFalse(self.thread_page.has_add_response_button())
@attr(shard=2)
class DiscussionHomePageTest(BaseDiscussionTestCase):
"""
Tests for the discussion home page.
"""
SEARCHED_USERNAME = "gizmo"
def setUp(self):
super(DiscussionHomePageTest, self).setUp()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.page = DiscussionTabHomePage(self.browser, self.course_id)
self.page.visit()
@attr(shard=2)
def test_new_post_button(self):
"""
Scenario: I can create new posts from the Discussion home page.
Given that I am on the Discussion home page
When I click on the 'New Post' button
Then I should be shown the new post form
"""
self.assertIsNotNone(self.page.new_post_button)
self.page.click_new_post_button()
self.assertIsNotNone(self.page.new_post_form)
def test_receive_update_checkbox(self):
"""
Scenario: I can save the receive update email notification checkbox
on Discussion home page.
Given that I am on the Discussion home page
When I click on the 'Receive update' checkbox
Then it should always shown selected.
"""
receive_updates_selector = '.email-setting'
receive_updates_checkbox = self.page.is_element_visible(receive_updates_selector)
self.assertTrue(receive_updates_checkbox)
self.assertFalse(self.page.is_checkbox_selected(receive_updates_selector))
self.page.click_element(receive_updates_selector)
self.assertTrue(self.page.is_checkbox_selected(receive_updates_selector))
self.page.refresh_and_wait_for_load()
self.assertTrue(self.page.is_checkbox_selected(receive_updates_selector))
@attr('a11y')
def test_page_accessibility(self):
self.page.a11y_audit.config.set_rules({
"ignore": [
'section', # TODO: AC-491
'aria-required-children', # TODO: AC-534
]
})
self.page.a11y_audit.check_for_accessibility_errors()
@attr(shard=2)
class DiscussionNavigationTest(BaseDiscussionTestCase):
"""
Tests for breadcrumbs navigation in the Discussions page nav bar
"""
def setUp(self):
super(DiscussionNavigationTest, self).setUp()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(
id=thread_id,
body=THREAD_CONTENT_WITH_LATEX,
commentable_id=self.discussion_id
)
)
thread_fixture.push()
self.thread_page = DiscussionTabSingleThreadPage(
self.browser,
self.course_id,
self.discussion_id,
thread_id
)
self.thread_page.visit()
def test_breadcrumbs_push_topic(self):
topic_button = self.thread_page.q(
css=".forum-nav-browse-menu-item[data-discussion-id='{}']".format(self.discussion_id)
)
self.assertTrue(topic_button.visible)
topic_button.click()
# Verify the thread's topic has been pushed to breadcrumbs
breadcrumbs = self.thread_page.q(css=".breadcrumbs .nav-item")
self.assertEqual(len(breadcrumbs), 3)
self.assertEqual(breadcrumbs[2].text, "Topic-Level Student-Visible Label")
def test_breadcrumbs_back_to_all_topics(self):
topic_button = self.thread_page.q(
css=".forum-nav-browse-menu-item[data-discussion-id='{}']".format(self.discussion_id)
)
self.assertTrue(topic_button.visible)
topic_button.click()
# Verify clicking the first breadcrumb takes you back to all topics
self.thread_page.q(css=".breadcrumbs .nav-item")[0].click()
self.assertEqual(len(self.thread_page.q(css=".breadcrumbs .nav-item")), 1)
def test_breadcrumbs_clear_search(self):
self.thread_page.q(css=".search-input").fill("search text")
self.thread_page.q(css=".search-button").click()
# Verify that clicking the first breadcrumb clears your search
self.thread_page.q(css=".breadcrumbs .nav-item")[0].click()
self.assertEqual(self.thread_page.q(css=".search-input").text[0], "")
def test_navigation_and_sorting(self):
"""
Test that after adding the post, user sorting preference is changing properly
and recently added post is shown.
"""
topic_button = self.thread_page.q(
css=".forum-nav-browse-menu-item[data-discussion-id='{}']".format(self.discussion_id)
)
self.assertTrue(topic_button.visible)
topic_button.click()
sort_page = DiscussionSortPreferencePage(self.browser, self.course_id)
for sort_type in ["votes", "comments", "activity"]:
sort_page.change_sort_preference(sort_type)
# Verify that recently added post titled "dummy thread title" is shown in each sorting preference
self.assertEqual(self.thread_page.q(css=".forum-nav-thread-title").text[0], 'dummy thread title')
@attr(shard=2)
class DiscussionTabSingleThreadTest(BaseDiscussionTestCase, DiscussionResponsePaginationTestMixin):
"""
Tests for the discussion page displaying a single thread
"""
def setUp(self):
super(DiscussionTabSingleThreadTest, self).setUp()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.tab_nav = TabNavPage(self.browser)
def setup_thread_page(self, thread_id):
self.thread_page = self.create_single_thread_page(thread_id) # pylint: disable=attribute-defined-outside-init
self.thread_page.visit()
def test_mathjax_rendering(self):
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(
id=thread_id,
body=THREAD_CONTENT_WITH_LATEX,
commentable_id=self.discussion_id,
thread_type="discussion"
)
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertTrue(self.thread_page.is_discussion_body_visible())
self.thread_page.verify_mathjax_preview_available()
self.thread_page.verify_mathjax_rendered()
def test_markdown_reference_link(self):
"""
Check markdown editor renders reference link correctly
and colon(:) in reference link is not converted to %3a
"""
sample_link = "http://example.com/colon:test"
thread_content = """[enter link description here][1]\n[1]: http://example.com/colon:test"""
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(
id=thread_id,
body=thread_content,
commentable_id=self.discussion_id,
thread_type="discussion"
)
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertEqual(self.thread_page.get_link_href(), sample_link)
def test_marked_answer_comments(self):
thread_id = "test_thread_{}".format(uuid4().hex)
response_id = "test_response_{}".format(uuid4().hex)
comment_id = "test_comment_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(id=thread_id, commentable_id=self.discussion_id, thread_type="question")
)
thread_fixture.addResponse(
Response(id=response_id, endorsed=True),
[Comment(id=comment_id)]
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertFalse(self.thread_page.is_comment_visible(comment_id))
self.assertFalse(self.thread_page.is_add_comment_visible(response_id))
self.assertTrue(self.thread_page.is_show_comments_visible(response_id))
self.thread_page.show_comments(response_id)
self.assertTrue(self.thread_page.is_comment_visible(comment_id))
self.assertTrue(self.thread_page.is_add_comment_visible(response_id))
self.assertFalse(self.thread_page.is_show_comments_visible(response_id))
def test_discussion_blackout_period(self):
"""
Verify that new discussion can not be started during course blackout period.
Blackout period is the period between which students cannot post new or contribute
to existing discussions.
"""
now = datetime.datetime.now(UTC)
# Update course advance settings with a valid blackout period.
self.course_fixture.add_advanced_settings(
{
u"discussion_blackouts": {
"value": [
[
(now - datetime.timedelta(days=14)).isoformat(),
(now + datetime.timedelta(days=2)).isoformat()
]
]
}
}
)
self.course_fixture._add_advanced_settings() # pylint: disable=protected-access
self.browser.refresh()
thread = Thread(id=uuid4().hex, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.addResponse(
Response(id="response1"),
[Comment(id="comment1")])
thread_fixture.push()
self.setup_thread_page(thread.get("id")) # pylint: disable=no-member
# Verify that `Add a Post` is not visible on course tab nav.
self.assertFalse(self.tab_nav.has_new_post_button_visible_on_tab())
# Verify that `Add a response` button is not visible.
self.assertFalse(self.thread_page.has_add_response_button())
# Verify user can not add new responses or modify existing responses.
self.assertFalse(self.thread_page.has_discussion_reply_editor())
self.assertFalse(self.thread_page.is_response_editable("response1"))
self.assertFalse(self.thread_page.is_response_deletable("response1"))
# Verify that user can not add new comment to a response or modify existing responses.
self.assertFalse(self.thread_page.is_add_comment_visible("response1"))
self.assertFalse(self.thread_page.is_comment_editable("comment1"))
self.assertFalse(self.thread_page.is_comment_deletable("comment1"))
class DiscussionTabMultipleThreadTest(BaseDiscussionTestCase, BaseDiscussionMixin):
"""
Tests for the discussion page with multiple threads
"""
def setUp(self):
super(DiscussionTabMultipleThreadTest, self).setUp()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.thread_count = 2
self.thread_ids = []
self.setup_multiple_threads(thread_count=self.thread_count)
self.thread_page_1 = DiscussionTabSingleThreadPage(
self.browser,
self.course_id,
self.discussion_id,
self.thread_ids[0]
)
self.thread_page_2 = DiscussionTabSingleThreadPage(
self.browser,
self.course_id,
self.discussion_id,
self.thread_ids[1]
)
self.thread_page_1.visit()
@attr('a11y')
def test_page_accessibility(self):
self.thread_page_1.a11y_audit.config.set_rules({
"ignore": [
'section', # TODO: AC-491
'aria-required-children', # TODO: AC-534
]
})
self.thread_page_1.a11y_audit.check_for_accessibility_errors()
self.thread_page_2.a11y_audit.config.set_rules({
"ignore": [
'section', # TODO: AC-491
'aria-required-children', # TODO: AC-534
]
})
self.thread_page_2.a11y_audit.check_for_accessibility_errors()
class DiscussionOpenClosedThreadTest(BaseDiscussionTestCase):
"""
Tests for checking the display of attributes on open and closed threads
"""
def setUp(self):
super(DiscussionOpenClosedThreadTest, self).setUp()
self.thread_id = "test_thread_{}".format(uuid4().hex)
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self, **thread_kwargs):
thread_kwargs.update({'commentable_id': self.discussion_id})
view = SingleThreadViewFixture(
Thread(id=self.thread_id, **thread_kwargs)
)
view.addResponse(Response(id="response1"))
view.push()
def setup_openclosed_thread_page(self, closed=False):
self.setup_user(roles=['Moderator'])
if closed:
self.setup_view(closed=True)
else:
self.setup_view()
page = self.create_single_thread_page(self.thread_id)
page.visit()
page.close_open_thread()
return page
@attr(shard=2)
def test_originally_open_thread_vote_display(self):
page = self.setup_openclosed_thread_page()
self.assertFalse(page.is_element_visible('.thread-main-wrapper .action-vote'))
self.assertTrue(page.is_element_visible('.thread-main-wrapper .display-vote'))
self.assertFalse(page.is_element_visible('.response_response1 .action-vote'))
self.assertTrue(page.is_element_visible('.response_response1 .display-vote'))
@attr(shard=2)
def test_originally_closed_thread_vote_display(self):
page = self.setup_openclosed_thread_page(True)
self.assertTrue(page.is_element_visible('.thread-main-wrapper .action-vote'))
self.assertFalse(page.is_element_visible('.thread-main-wrapper .display-vote'))
self.assertTrue(page.is_element_visible('.response_response1 .action-vote'))
self.assertFalse(page.is_element_visible('.response_response1 .display-vote'))
@attr('a11y')
def test_page_accessibility(self):
page = self.setup_openclosed_thread_page()
page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'aria-required-children', # TODO: AC-534
'color-contrast', # Commented out for now because they reproducibly fail on Jenkins but not locally
]
})
page.a11y_audit.check_for_accessibility_errors()
page = self.setup_openclosed_thread_page(True)
page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'aria-required-children', # TODO: AC-534
'color-contrast', # Commented out for now because they reproducibly fail on Jenkins but not locally
]
})
page.a11y_audit.check_for_accessibility_errors()
@attr(shard=2)
class DiscussionCommentDeletionTest(BaseDiscussionTestCase):
"""
Tests for deleting comments displayed beneath responses in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="comment_deletion_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response1"), [
Comment(id="comment_other_author"),
Comment(id="comment_self_author", user_id=self.user_id, thread_id="comment_deletion_test_thread")
]
)
view.push()
def test_comment_deletion_as_student(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_deletion_test_thread")
page.visit()
self.assertTrue(page.is_comment_deletable("comment_self_author"))
self.assertTrue(page.is_comment_visible("comment_other_author"))
self.assertFalse(page.is_comment_deletable("comment_other_author"))
page.delete_comment("comment_self_author")
def test_comment_deletion_as_moderator(self):
self.setup_user(roles=['Moderator'])
self.setup_view()
page = self.create_single_thread_page("comment_deletion_test_thread")
page.visit()
self.assertTrue(page.is_comment_deletable("comment_self_author"))
self.assertTrue(page.is_comment_deletable("comment_other_author"))
page.delete_comment("comment_self_author")
page.delete_comment("comment_other_author")
class DiscussionResponseEditTest(BaseDiscussionTestCase):
"""
Tests for editing responses displayed beneath thread in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="response_edit_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response_other_author", user_id="other", thread_id="response_edit_test_thread"),
)
view.addResponse(
Response(id="response_self_author", user_id=self.user_id, thread_id="response_edit_test_thread"),
)
view.push()
def edit_response(self, page, response_id):
self.assertTrue(page.is_response_editable(response_id))
page.start_response_edit(response_id)
new_response = "edited body"
page.set_response_editor_value(response_id, new_response)
page.submit_response_edit(response_id, new_response)
@attr(shard=2)
def test_edit_response_add_link(self):
"""
Scenario: User submits valid input to the 'add link' form
Given I am editing a response on a discussion page
When I click the 'add link' icon in the editor toolbar
And enter a valid url to the URL input field
And enter a valid string in the Description input field
And click the 'OK' button
Then the edited response should contain the new link
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
response_id = "response_self_author"
url = "http://example.com"
description = "example"
page.start_response_edit(response_id)
page.set_response_editor_value(response_id, "")
page.add_content_via_editor_button(
"link", response_id, url, description)
page.submit_response_edit(response_id, description)
expected_response_html = (
'<p><a href="{}">{}</a></p>'.format(url, description)
)
actual_response_html = page.q(
css=".response_{} .response-body".format(response_id)
).html[0]
self.assertEqual(expected_response_html, actual_response_html)
@attr(shard=2)
def test_edit_response_add_image(self):
"""
Scenario: User submits valid input to the 'add image' form
Given I am editing a response on a discussion page
When I click the 'add image' icon in the editor toolbar
And enter a valid url to the URL input field
And enter a valid string in the Description input field
And click the 'OK' button
Then the edited response should contain the new image
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
response_id = "response_self_author"
url = "http://www.example.com/something.png"
description = "image from example.com"
page.start_response_edit(response_id)
page.set_response_editor_value(response_id, "")
page.add_content_via_editor_button(
"image", response_id, url, description)
page.submit_response_edit(response_id, '')
expected_response_html = (
'<p><img src="{}" alt="{}" title=""></p>'.format(url, description)
)
actual_response_html = page.q(
css=".response_{} .response-body".format(response_id)
).html[0]
self.assertEqual(expected_response_html, actual_response_html)
@attr(shard=2)
def test_edit_response_add_image_error_msg(self):
"""
Scenario: User submits invalid input to the 'add image' form
Given I am editing a response on a discussion page
When I click the 'add image' icon in the editor toolbar
And enter an invalid url to the URL input field
And enter an empty string in the Description input field
And click the 'OK' button
Then I should be shown 2 error messages
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
page.start_response_edit("response_self_author")
page.add_content_via_editor_button(
"image", "response_self_author", '', '')
page.verify_link_editor_error_messages_shown()
@attr(shard=2)
def test_edit_response_add_decorative_image(self):
"""
Scenario: User submits invalid input to the 'add image' form
Given I am editing a response on a discussion page
When I click the 'add image' icon in the editor toolbar
And enter a valid url to the URL input field
And enter an empty string in the Description input field
And I check the 'image is decorative' checkbox
And click the 'OK' button
Then the edited response should contain the new image
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
response_id = "response_self_author"
url = "http://www.example.com/something.png"
description = ""
page.start_response_edit(response_id)
page.set_response_editor_value(response_id, "Some content")
page.add_content_via_editor_button(
"image", response_id, url, description, is_decorative=True)
page.submit_response_edit(response_id, "Some content")
expected_response_html = (
'<p>Some content<img src="{}" alt="{}" title=""></p>'.format(
url, description)
)
actual_response_html = page.q(
css=".response_{} .response-body".format(response_id)
).html[0]
self.assertEqual(expected_response_html, actual_response_html)
@attr(shard=2)
def test_edit_response_add_link_error_msg(self):
"""
Scenario: User submits invalid input to the 'add link' form
Given I am editing a response on a discussion page
When I click the 'add link' icon in the editor toolbar
And enter an invalid url to the URL input field
And enter an empty string in the Description input field
And click the 'OK' button
Then I should be shown 2 error messages
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
page.start_response_edit("response_self_author")
page.add_content_via_editor_button(
"link", "response_self_author", '', '')
page.verify_link_editor_error_messages_shown()
@attr(shard=2)
def test_edit_response_as_student(self):
"""
Scenario: Students should be able to edit the response they created not responses of other users
Given that I am on discussion page with student logged in
When I try to edit the response created by student
Then the response should be edited and rendered successfully
And responses from other users should be shown over there
And the student should be able to edit the response of other people
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.assertTrue(page.is_response_visible("response_other_author"))
self.assertFalse(page.is_response_editable("response_other_author"))
self.edit_response(page, "response_self_author")
@attr(shard=2)
def test_edit_response_as_moderator(self):
"""
Scenario: Moderator should be able to edit the response they created and responses of other users
Given that I am on discussion page with moderator logged in
When I try to edit the response created by moderator
Then the response should be edited and rendered successfully
And I try to edit the response created by other users
Then the response should be edited and rendered successfully
"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.edit_response(page, "response_self_author")
self.edit_response(page, "response_other_author")
@attr(shard=2)
@flaky # TODO fix this, see TNL-5453
def test_vote_report_endorse_after_edit(self):
"""
Scenario: Moderator should be able to vote, report or endorse after editing the response.
Given that I am on discussion page with moderator logged in
When I try to edit the response created by moderator
Then the response should be edited and rendered successfully
And I try to edit the response created by other users
Then the response should be edited and rendered successfully
And I try to vote the response created by moderator
Then the response should not be able to be voted
And I try to vote the response created by other users
Then the response should be voted successfully
And I try to report the response created by moderator
Then the response should not be able to be reported
And I try to report the response created by other users
Then the response should be reported successfully
And I try to endorse the response created by moderator
Then the response should be endorsed successfully
And I try to endorse the response created by other users
Then the response should be endorsed successfully
"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.edit_response(page, "response_self_author")
self.edit_response(page, "response_other_author")
page.cannot_vote_response('response_self_author')
page.vote_response('response_other_author')
page.cannot_report_response('response_self_author')
page.report_response('response_other_author')
page.endorse_response('response_self_author')
page.endorse_response('response_other_author')
@attr('a11y')
def test_page_accessibility(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'aria-required-children', # TODO: AC-534
]
})
page.visit()
page.a11y_audit.check_for_accessibility_errors()
class DiscussionCommentEditTest(BaseDiscussionTestCase):
"""
Tests for editing comments displayed beneath responses in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="comment_edit_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response1"),
[Comment(id="comment_other_author", user_id="other"), Comment(id="comment_self_author", user_id=self.user_id)])
view.push()
def edit_comment(self, page, comment_id):
page.start_comment_edit(comment_id)
new_comment = "edited body"
page.set_comment_editor_value(comment_id, new_comment)
page.submit_comment_edit(comment_id, new_comment)
@attr(shard=2)
def test_edit_comment_as_student(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_visible("comment_other_author"))
self.assertFalse(page.is_comment_editable("comment_other_author"))
self.edit_comment(page, "comment_self_author")
@attr(shard=2)
def test_edit_comment_as_moderator(self):
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_editable("comment_other_author"))
self.edit_comment(page, "comment_self_author")
self.edit_comment(page, "comment_other_author")
@attr(shard=2)
def test_cancel_comment_edit(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
page.set_comment_editor_value("comment_self_author", "edited body")
page.cancel_comment_edit("comment_self_author", original_body)
@attr(shard=2)
def test_editor_visibility(self):
"""Only one editor should be visible at a time within a single response"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_editable("comment_other_author"))
self.assertTrue(page.is_add_comment_visible("response1"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
self.assertFalse(page.is_add_comment_visible("response1"))
self.assertTrue(page.is_comment_editor_visible("comment_self_author"))
page.set_comment_editor_value("comment_self_author", "edited body")
page.start_comment_edit("comment_other_author")
self.assertFalse(page.is_comment_editor_visible("comment_self_author"))
self.assertTrue(page.is_comment_editor_visible("comment_other_author"))
self.assertEqual(page.get_comment_body("comment_self_author"), original_body)
page.start_response_edit("response1")
self.assertFalse(page.is_comment_editor_visible("comment_other_author"))
self.assertTrue(page.is_response_editor_visible("response1"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
self.assertFalse(page.is_response_editor_visible("response1"))
self.assertTrue(page.is_comment_editor_visible("comment_self_author"))
page.cancel_comment_edit("comment_self_author", original_body)
self.assertFalse(page.is_comment_editor_visible("comment_self_author"))
self.assertTrue(page.is_add_comment_visible("response1"))
@attr('a11y')
def test_page_accessibility(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'aria-required-children', # TODO: AC-534
]
})
page.a11y_audit.check_for_accessibility_errors()
@attr(shard=2)
class DiscussionEditorPreviewTest(UniqueCourseTest):
def setUp(self):
super(DiscussionEditorPreviewTest, self).setUp()
CourseFixture(**self.course_info).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.page = DiscussionTabHomePage(self.browser, self.course_id)
self.page.visit()
self.page.click_new_post_button()
def test_text_rendering(self):
"""When I type plain text into the editor, it should be rendered as plain text in the preview box"""
self.page.set_new_post_editor_value("Some plain text")
self.assertEqual(self.page.get_new_post_preview_value(), "<p>Some plain text</p>")
def test_markdown_rendering(self):
"""When I type Markdown into the editor, it should be rendered as formatted Markdown in the preview box"""
self.page.set_new_post_editor_value(
"Some markdown\n"
"\n"
"- line 1\n"
"- line 2"
)
self.assertEqual(self.page.get_new_post_preview_value(), (
"<p>Some markdown</p>\n"
"\n"
"<ul>\n"
"<li>line 1</li>\n"
"<li>line 2</li>\n"
"</ul>"
))
def test_mathjax_rendering_in_order(self):
"""
Tests that mathjax is rendered in proper order.
When user types mathjax expressions into discussion editor, it should render in the proper
order.
"""
self.page.set_new_post_editor_value(
'Text line 1 \n'
'$$e[n]=d_1$$ \n'
'Text line 2 \n'
'$$e[n]=d_2$$'
)
self.assertEqual(self.page.get_new_post_preview_text(), 'Text line 1\nText line 2')
def test_mathjax_not_rendered_after_post_cancel(self):
"""
Tests that mathjax is not rendered when we cancel the post
When user types the mathjax expression into discussion editor, it will appear in te preview
box, and when user cancel it and again click the "Add new post" button, mathjax will not
appear in the preview box
"""
self.page.set_new_post_editor_value(
'\\begin{equation}'
'\\tau_g(\omega) = - \\frac{d}{d\omega}\phi(\omega) \hspace{2em} (1) '
'\\end{equation}'
)
self.assertIsNotNone(self.page.get_new_post_preview_text())
self.page.click_element(".cancel")
alert = get_modal_alert(self.browser)
alert.accept()
self.assertIsNotNone(self.page.new_post_button)
self.page.click_new_post_button()
self.assertEqual(self.page.get_new_post_preview_value('.wmd-preview'), "")
@attr(shard=2)
class InlineDiscussionTest(UniqueCourseTest, DiscussionResponsePaginationTestMixin):
"""
Tests for inline discussions
"""
def setUp(self):
super(InlineDiscussionTest, self).setUp()
self.thread_ids = []
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
self.additional_discussion_id = "test_discussion_{}".format(uuid4().hex)
self.course_fix = CourseFixture(**self.course_info).add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
XBlockFixtureDesc("vertical", "Test Unit").add_children(
XBlockFixtureDesc(
"discussion",
"Test Discussion",
metadata={"discussion_id": self.discussion_id}
),
XBlockFixtureDesc(
"discussion",
"Test Discussion 1",
metadata={"discussion_id": self.additional_discussion_id}
)
)
)
)
).install()
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id).visit().get_user_id()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.courseware_page.visit()
self.discussion_page = InlineDiscussionPage(self.browser, self.discussion_id)
self.additional_discussion_page = InlineDiscussionPage(self.browser, self.additional_discussion_id)
def setup_thread_page(self, thread_id):
self.discussion_page.expand_discussion()
self.discussion_page.show_thread(thread_id)
self.thread_page = self.discussion_page.thread_page # pylint: disable=attribute-defined-outside-init
# This test is too flaky to run at all. TNL-6215
@attr('a11y')
@nottest
def test_inline_a11y(self):
"""
Tests Inline Discussion for accessibility issues.
"""
self.setup_multiple_threads(thread_count=3)
# First test the a11y of the expanded list of threads
self.discussion_page.expand_discussion()
self.discussion_page.a11y_audit.config.set_rules({
'ignore': [
'section'
]
})
self.discussion_page.a11y_audit.check_for_accessibility_errors()
# Now show the first thread and test the a11y again
self.discussion_page.show_thread(self.thread_ids[0])
self.discussion_page.a11y_audit.check_for_accessibility_errors()
# Finally show the new post form and test its a11y
self.discussion_page.click_new_post_button()
self.discussion_page.a11y_audit.check_for_accessibility_errors()
def test_add_a_post_is_present_if_can_create_thread_when_expanded(self):
self.discussion_page.expand_discussion()
# Add a Post link is present
self.assertTrue(self.discussion_page.q(css='.new-post-btn').present)
def test_add_post_not_present_if_discussion_blackout_period_started(self):
"""
If discussion blackout period has started Add a post button should not appear.
"""
self.start_discussion_blackout_period()
self.browser.refresh()
self.discussion_page.expand_discussion()
self.assertFalse(self.discussion_page.is_new_post_button_visible())
def test_initial_render(self):
self.assertFalse(self.discussion_page.is_discussion_expanded())
def test_expand_discussion_empty(self):
self.discussion_page.expand_discussion()
self.assertEqual(self.discussion_page.get_num_displayed_threads(), 0)
def check_anonymous_to_peers(self, is_staff):
thread = Thread(id=uuid4().hex, anonymous_to_peers=True, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.push()
self.setup_thread_page(thread.get("id")) # pylint: disable=no-member
self.assertEqual(self.thread_page.is_thread_anonymous(), not is_staff)
def test_anonymous_to_peers_threads_as_staff(self):
AutoAuthPage(self.browser, course_id=self.course_id, roles="Administrator").visit()
self.courseware_page.visit()
self.check_anonymous_to_peers(True)
def test_anonymous_to_peers_threads_as_peer(self):
self.check_anonymous_to_peers(False)
def test_discussion_blackout_period(self):
self.start_discussion_blackout_period()
self.browser.refresh()
thread = Thread(id=uuid4().hex, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.addResponse(
Response(id="response1"),
[Comment(id="comment1", user_id="other"), Comment(id="comment2", user_id=self.user_id)])
thread_fixture.push()
self.setup_thread_page(thread.get("id")) # pylint: disable=no-member
self.assertFalse(self.thread_page.has_add_response_button())
self.assertFalse(self.thread_page.is_element_visible("action-more"))
def test_dual_discussion_xblock(self):
"""
Scenario: Two discussion xblocks in one unit shouldn't override their actions
Given that I'm on a courseware page where there are two inline discussion
When I click on the first discussion block's new post button
Then I should be shown only the new post form for the first block
When I click on the second discussion block's new post button
Then I should be shown both new post forms
When I cancel the first form
Then I should be shown only the new post form for the second block
When I cancel the second form
And I click on the first discussion block's new post button
Then I should be shown only the new post form for the first block
When I cancel the first form
Then I should be shown none of the forms
"""
self.discussion_page.wait_for_page()
self.additional_discussion_page.wait_for_page()
# Expand the first discussion, click to add a post
self.discussion_page.expand_discussion()
self.discussion_page.click_new_post_button()
# Verify that only the first discussion's form is shown
self.assertIsNotNone(self.discussion_page.new_post_form)
self.assertIsNone(self.additional_discussion_page.new_post_form)
# Expand the second discussion, click to add a post
self.additional_discussion_page.expand_discussion()
self.additional_discussion_page.click_new_post_button()
# Verify that both discussion's forms are shown
self.assertIsNotNone(self.discussion_page.new_post_form)
self.assertIsNotNone(self.additional_discussion_page.new_post_form)
# Cancel the first form
self.discussion_page.click_cancel_new_post()
# Verify that only the second discussion's form is shown
self.assertIsNone(self.discussion_page.new_post_form)
self.assertIsNotNone(self.additional_discussion_page.new_post_form)
# Cancel the second form and click to show the first one
self.additional_discussion_page.click_cancel_new_post()
self.discussion_page.click_new_post_button()
# Verify that only the first discussion's form is shown
self.assertIsNotNone(self.discussion_page.new_post_form)
self.assertIsNone(self.additional_discussion_page.new_post_form)
# Cancel the first form
self.discussion_page.click_cancel_new_post()
# Verify that neither discussion's forms are shwon
self.assertIsNone(self.discussion_page.new_post_form)
self.assertIsNone(self.additional_discussion_page.new_post_form)
def start_discussion_blackout_period(self):
"""
Start discussion blackout period, starting 14 days before now to 2 days ago.
"""
now = datetime.datetime.now(UTC)
self.course_fix.add_advanced_settings(
{
u"discussion_blackouts": {
"value": [
[
(now - datetime.timedelta(days=14)).isoformat(),
(now + datetime.timedelta(days=2)).isoformat()
]
]
}
}
)
self.course_fix._add_advanced_settings() # pylint: disable=protected-access
@attr(shard=2)
class DiscussionUserProfileTest(UniqueCourseTest):
"""
Tests for user profile page in discussion tab.
"""
PAGE_SIZE = 20 # discussion.views.THREADS_PER_PAGE
PROFILED_USERNAME = "profiled-user"
def setUp(self):
super(DiscussionUserProfileTest, self).setUp()
self.setup_course()
# The following line creates a user enrolled in our course, whose
# threads will be viewed, but not the one who will view the page.
# It isn't necessary to log them in, but using the AutoAuthPage
# saves a lot of code.
self.profiled_user_id = self.setup_user(username=self.PROFILED_USERNAME)
# now create a second user who will view the profile.
self.user_id = self.setup_user()
def setup_course(self):
"""
Set up the for the course discussion user-profile tests.
"""
return CourseFixture(**self.course_info).install()
def setup_user(self, roles=None, **user_info):
"""
Helper method to create and authenticate a user.
"""
roles_str = ''
if roles:
roles_str = ','.join(roles)
return AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str, **user_info).visit().get_user_id()
def test_redirects_to_learner_profile(self):
"""
Scenario: Verify that learner-profile link is present on forum discussions page and we can navigate to it.
Given that I am on discussion forum user's profile page.
And I can see a username on the page
When I click on my username.
Then I will be navigated to Learner Profile page.
And I can my username on Learner Profile page
"""
learner_profile_page = LearnerProfilePage(self.browser, self.PROFILED_USERNAME)
page = DiscussionUserProfilePage(
self.browser,
self.course_id,
self.profiled_user_id,
self.PROFILED_USERNAME
)
page.visit()
page.click_on_sidebar_username()
learner_profile_page.wait_for_page()
self.assertTrue(learner_profile_page.field_is_visible('username'))
def test_learner_profile_roles(self):
"""
Test that on the learner profile page user roles are correctly listed according to the course.
"""
# Setup a learner with roles in a Course-A.
expected_student_roles = ['Administrator', 'Community TA', 'Moderator', 'Student']
self.profiled_user_id = self.setup_user(
roles=expected_student_roles,
username=self.PROFILED_USERNAME
)
# Visit the page and verify the roles are listed correctly.
page = DiscussionUserProfilePage(
self.browser,
self.course_id,
self.profiled_user_id,
self.PROFILED_USERNAME
)
page.visit()
student_roles = page.get_user_roles()
self.assertEqual(student_roles, ', '.join(expected_student_roles))
# Save the course_id of Course-A before setting up a new course.
old_course_id = self.course_id
# Setup Course-B and set user do not have additional roles and test roles are displayed correctly.
self.course_info['number'] = self.unique_id
self.setup_course()
new_course_id = self.course_id
# Set the user to have no extra role in the Course-B and verify the existing
# user is updated.
profiled_student_user_id = self.setup_user(roles=None, username=self.PROFILED_USERNAME)
self.assertEqual(self.profiled_user_id, profiled_student_user_id)
self.assertNotEqual(old_course_id, new_course_id)
# Visit the user profile in course discussion page of Course-B. Make sure the
# roles are listed correctly.
page = DiscussionUserProfilePage(
self.browser,
self.course_id,
self.profiled_user_id,
self.PROFILED_USERNAME
)
page.visit()
self.assertEqual(page.get_user_roles(), u'Student')
class DiscussionSearchAlertTest(UniqueCourseTest):
"""
Tests for spawning and dismissing alerts related to user search actions and their results.
"""
SEARCHED_USERNAME = "gizmo"
def setUp(self):
super(DiscussionSearchAlertTest, self).setUp()
CourseFixture(**self.course_info).install()
# first auto auth call sets up a user that we will search for in some tests
self.searched_user_id = AutoAuthPage(
self.browser,
username=self.SEARCHED_USERNAME,
course_id=self.course_id
).visit().get_user_id()
# this auto auth call creates the actual session user
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.page = DiscussionTabHomePage(self.browser, self.course_id)
self.page.visit()
def setup_corrected_text(self, text):
SearchResultFixture(SearchResult(corrected_text=text)).push()
def check_search_alert_messages(self, expected):
actual = self.page.get_search_alert_messages()
self.assertTrue(all(map(lambda msg, sub: msg.lower().find(sub.lower()) >= 0, actual, expected)))
@attr(shard=2)
def test_no_rewrite(self):
self.setup_corrected_text(None)
self.page.perform_search()
self.check_search_alert_messages(["no posts"])
@attr(shard=2)
def test_rewrite_dismiss(self):
self.setup_corrected_text("foo")
self.page.perform_search()
self.check_search_alert_messages(["foo"])
self.page.dismiss_alert_message("foo")
self.check_search_alert_messages([])
@attr(shard=2)
def test_new_search(self):
self.setup_corrected_text("foo")
self.page.perform_search()
self.check_search_alert_messages(["foo"])
self.setup_corrected_text("bar")
self.page.perform_search()
self.check_search_alert_messages(["bar"])
self.setup_corrected_text(None)
self.page.perform_search()
self.check_search_alert_messages(["no posts"])
@attr(shard=2)
def test_rewrite_and_user(self):
self.setup_corrected_text("foo")
self.page.perform_search(self.SEARCHED_USERNAME)
self.check_search_alert_messages(["foo", self.SEARCHED_USERNAME])
@attr(shard=2)
def test_user_only(self):
self.setup_corrected_text(None)
self.page.perform_search(self.SEARCHED_USERNAME)
self.check_search_alert_messages(["no posts", self.SEARCHED_USERNAME])
# make sure clicking the link leads to the user profile page
UserProfileViewFixture([]).push()
self.page.get_search_alert_links().first.click()
DiscussionUserProfilePage(
self.browser,
self.course_id,
self.searched_user_id,
self.SEARCHED_USERNAME
).wait_for_page()
@attr('a11y')
def test_page_accessibility(self):
self.page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'aria-required-children', # TODO: AC-534
]
})
self.page.a11y_audit.check_for_accessibility_errors()
@attr(shard=2)
class DiscussionSortPreferenceTest(UniqueCourseTest):
"""
Tests for the discussion page displaying a single thread.
"""
def setUp(self):
super(DiscussionSortPreferenceTest, self).setUp()
# Create a course to register for.
CourseFixture(**self.course_info).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.sort_page = DiscussionSortPreferencePage(self.browser, self.course_id)
self.sort_page.visit()
self.sort_page.show_all_discussions()
def test_default_sort_preference(self):
"""
Test to check the default sorting preference of user. (Default = date )
"""
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, "activity")
@skip_if_browser('chrome') # TODO TE-1542 and TE-1543
def test_change_sort_preference(self):
"""
Test that if user sorting preference is changing properly.
"""
selected_sort = ""
for sort_type in ["votes", "comments", "activity"]:
self.assertNotEqual(selected_sort, sort_type)
self.sort_page.change_sort_preference(sort_type)
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
@skip_if_browser('chrome') # TODO TE-1542 and TE-1543
def test_last_preference_saved(self):
"""
Test that user last preference is saved.
"""
selected_sort = ""
for sort_type in ["votes", "comments", "activity"]:
self.assertNotEqual(selected_sort, sort_type)
self.sort_page.change_sort_preference(sort_type)
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
self.sort_page.refresh_page()
self.sort_page.show_all_discussions()
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
|
pepeportela/edx-platform
|
common/test/acceptance/tests/discussion/test_discussion.py
|
Python
|
agpl-3.0
| 64,335
|
[
"VisIt"
] |
f4137748aef83566c0d65dfa2d456263227063edc079ef1e135cc4d0901265b3
|
import os
from docker_build import __version__
from setuptools import setup, find_packages
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
setup(
name='docker-build-tool',
version=__version__,
description='Build tool for creating Docker Images',
url='https://github.com/brian-bason/docker-build-tool',
author='Brian Bason',
author_email='brianbason@gmail.com',
classifiers=[],
packages=find_packages(exclude=['test*']),
include_package_data=True,
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'docker~=2.0',
'pyYAML~=3.11',
'enum34~=1.1'
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'test': [
'coverage==4.0.1',
'mock==1.3.0',
'nose==1.3.7',
'testfixtures==4.3.3'
]
},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'docker-build=docker_build.__main__:main'
]
}
)
|
brian-bason/docker-build-tool
|
setup.py
|
Python
|
mit
| 1,672
|
[
"Brian"
] |
54573df71abcc327809d207022f380f4f610c6bf254fb1c0bab3076fff6953fd
|
# -*- coding: utf-8 -*-
"""Functions to plot evoked M/EEG data (besides topographies)."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Cathy Nangini <cnangini@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
# Daniel McCloy <dan.mccloy@gmail.com>
#
# License: Simplified BSD
from copy import deepcopy
from functools import partial
from numbers import Integral
import numpy as np
from ..fixes import _is_last_row
from ..io.pick import (channel_type,
_VALID_CHANNEL_TYPES, channel_indices_by_type,
_DATA_CH_TYPES_SPLIT, _pick_inst, _get_channel_types,
_PICK_TYPES_DATA_DICT, _picks_to_idx, pick_info)
from ..defaults import _handle_default
from .utils import (_draw_proj_checkbox, tight_layout, _check_delayed_ssp,
plt_show, _process_times, DraggableColorbar, _setup_cmap,
_setup_vmin_vmax, _check_cov, _make_combine_callable,
_validate_if_list_of_axes, _triage_rank_sss,
_connection_line, _get_color_list, _setup_ax_spines,
_setup_plot_projector, _prepare_joint_axes, _check_option,
_set_title_multiple_electrodes, _check_time_unit,
_plot_masked_image, _trim_ticks, _set_window_title,
_prop_kw)
from ..utils import (logger, _clean_names, warn, _pl, verbose, _validate_type,
_check_if_nan, _check_ch_locs, fill_doc, _is_numeric,
_to_rgb)
from .topo import _plot_evoked_topo
from .topomap import (_prepare_topomap_plot, plot_topomap, _get_pos_outlines,
_draw_outlines, _prepare_topomap, _set_contour_locator,
_check_sphere, _make_head_outlines)
from ..channels.layout import _pair_grad_sensors, find_layout
def _butterfly_onpick(event, params):
"""Add a channel name on click."""
params['need_draw'] = True
ax = event.artist.axes
ax_idx = np.where([ax is a for a in params['axes']])[0]
if len(ax_idx) == 0: # this can happen if ax param is used
return # let the other axes handle it
else:
ax_idx = ax_idx[0]
lidx = np.where([
line is event.artist for line in params['lines'][ax_idx]])[0][0]
ch_name = params['ch_names'][params['idxs'][ax_idx][lidx]]
text = params['texts'][ax_idx]
x = event.artist.get_xdata()[event.ind[0]]
y = event.artist.get_ydata()[event.ind[0]]
text.set_x(x)
text.set_y(y)
text.set_text(ch_name)
text.set_color(event.artist.get_color())
text.set_alpha(1.)
text.set_zorder(len(ax.lines)) # to make sure it goes on top of the lines
text.set_path_effects(params['path_effects'])
# do NOT redraw here, since for butterfly plots hundreds of lines could
# potentially be picked -- use on_button_press (happens once per click)
# to do the drawing
def _butterfly_on_button_press(event, params):
"""Only draw once for picking."""
if params['need_draw']:
event.canvas.draw()
else:
idx = np.where([event.inaxes is ax for ax in params['axes']])[0]
if len(idx) == 1:
text = params['texts'][idx[0]]
text.set_alpha(0.)
text.set_path_effects([])
event.canvas.draw()
params['need_draw'] = False
def _line_plot_onselect(xmin, xmax, ch_types, info, data, times, text=None,
psd=False, time_unit='s', sphere=None):
"""Draw topomaps from the selected area."""
import matplotlib.pyplot as plt
ch_types = [type_ for type_ in ch_types if type_ in ('eeg', 'grad', 'mag')]
if len(ch_types) == 0:
raise ValueError('Interactive topomaps only allowed for EEG '
'and MEG channels.')
if ('grad' in ch_types and
len(_pair_grad_sensors(info, topomap_coords=False,
raise_error=False)) < 2):
ch_types.remove('grad')
if len(ch_types) == 0:
return
vert_lines = list()
if text is not None:
text.set_visible(True)
ax = text.axes
vert_lines.append(ax.axvline(xmin, zorder=0, color='red'))
vert_lines.append(ax.axvline(xmax, zorder=0, color='red'))
fill = ax.axvspan(xmin, xmax, alpha=0.2, color='green')
evoked_fig = plt.gcf()
evoked_fig.canvas.draw()
evoked_fig.canvas.flush_events()
minidx = np.abs(times - xmin).argmin()
maxidx = np.abs(times - xmax).argmin()
fig, axarr = plt.subplots(1, len(ch_types), squeeze=False,
figsize=(3 * len(ch_types), 3))
for idx, ch_type in enumerate(ch_types):
if ch_type not in ('eeg', 'grad', 'mag'):
continue
picks, pos, merge_channels, _, ch_type, this_sphere, clip_origin = \
_prepare_topomap_plot(info, ch_type, sphere=sphere)
outlines = _make_head_outlines(this_sphere, pos, 'head', clip_origin)
if len(pos) < 2:
fig.delaxes(axarr[0][idx])
continue
this_data = data[picks, minidx:maxidx]
if merge_channels:
from ..channels.layout import _merge_ch_data
method = 'mean' if psd else 'rms'
this_data, _ = _merge_ch_data(this_data, ch_type, [],
method=method)
title = '%s %s' % (ch_type, method.upper())
else:
title = ch_type
this_data = np.average(this_data, axis=1)
axarr[0][idx].set_title(title)
vmin = min(this_data) if psd else None
vmax = max(this_data) if psd else None # All negative for dB psd.
cmap = 'Reds' if psd else None
plot_topomap(this_data, pos, cmap=cmap, vmin=vmin, vmax=vmax,
axes=axarr[0][idx], show=False, sphere=this_sphere,
outlines=outlines)
unit = 'Hz' if psd else time_unit
fig.suptitle('Average over %.2f%s - %.2f%s' % (xmin, unit, xmax, unit),
y=0.1)
tight_layout(pad=2.0, fig=fig)
plt_show()
if text is not None:
text.set_visible(False)
close_callback = partial(_topo_closed, ax=ax, lines=vert_lines,
fill=fill)
fig.canvas.mpl_connect('close_event', close_callback)
evoked_fig.canvas.draw()
evoked_fig.canvas.flush_events()
def _topo_closed(events, ax, lines, fill):
"""Remove lines from evoked plot as topomap is closed."""
for line in lines:
ax.lines.remove(line)
ax.patches.remove(fill)
ax.get_figure().canvas.draw()
def _rgb(x, y, z):
"""Transform x, y, z values into RGB colors."""
rgb = np.array([x, y, z]).T
rgb -= rgb.min(0)
rgb /= np.maximum(rgb.max(0), 1e-16) # avoid div by zero
return rgb
def _plot_legend(pos, colors, axis, bads, outlines, loc, size=30):
"""Plot (possibly colorized) channel legends for evoked plots."""
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
axis.get_figure().canvas.draw()
bbox = axis.get_window_extent() # Determine the correct size.
ratio = bbox.width / bbox.height
ax = inset_axes(axis, width=str(size / ratio) + '%',
height=str(size) + '%', loc=loc)
ax.set_adjustable('box')
ax.set_aspect('equal')
_prepare_topomap(pos, ax, check_nonzero=False)
pos_x, pos_y = pos.T
ax.scatter(pos_x, pos_y, color=colors, s=size * .8, marker='.', zorder=1)
if bads:
bads = np.array(bads)
ax.scatter(pos_x[bads], pos_y[bads], s=size / 6, marker='.',
color='w', zorder=1)
_draw_outlines(ax, outlines)
def _plot_evoked(evoked, picks, exclude, unit, show, ylim, proj, xlim, hline,
units, scalings, titles, axes, plot_type, cmap=None,
gfp=False, window_title=None, spatial_colors=False,
selectable=True, zorder='unsorted',
noise_cov=None, colorbar=True, mask=None, mask_style=None,
mask_cmap=None, mask_alpha=.25, time_unit='s',
show_names=False, group_by=None, sphere=None):
"""Aux function for plot_evoked and plot_evoked_image (cf. docstrings).
Extra param is:
plot_type : str, value ('butterfly' | 'image')
The type of graph to plot: 'butterfly' plots each channel as a line
(x axis: time, y axis: amplitude). 'image' plots a 2D image where
color depicts the amplitude of each channel at a given time point
(x axis: time, y axis: channel). In 'image' mode, the plot is not
interactive.
"""
import matplotlib.pyplot as plt
# For evoked.plot_image ...
# First input checks for group_by and axes if any of them is not None.
# Either both must be dicts, or neither.
# If the former, the two dicts provide picks and axes to plot them to.
# Then, we call this function recursively for each entry in `group_by`.
if plot_type == "image" and isinstance(group_by, dict):
if axes is None:
axes = dict()
for sel in group_by:
plt.figure()
axes[sel] = plt.axes()
if not isinstance(axes, dict):
raise ValueError("If `group_by` is a dict, `axes` must be "
"a dict of axes or None.")
_validate_if_list_of_axes(list(axes.values()))
remove_xlabels = any([_is_last_row(ax) for ax in axes.values()])
for sel in group_by: # ... we loop over selections
if sel not in axes:
raise ValueError(sel + " present in `group_by`, but not "
"found in `axes`")
ax = axes[sel]
# the unwieldy dict comp below defaults the title to the sel
titles = ({channel_type(evoked.info, idx): sel
for idx in group_by[sel]} if titles is None else titles)
_plot_evoked(evoked, group_by[sel], exclude, unit, show, ylim,
proj, xlim, hline, units, scalings, titles,
ax, plot_type, cmap=cmap, gfp=gfp,
window_title=window_title,
selectable=selectable, noise_cov=noise_cov,
colorbar=colorbar, mask=mask,
mask_style=mask_style, mask_cmap=mask_cmap,
mask_alpha=mask_alpha, time_unit=time_unit,
show_names=show_names,
sphere=sphere)
if remove_xlabels and not _is_last_row(ax):
ax.set_xticklabels([])
ax.set_xlabel("")
ims = [ax.images[0] for ax in axes.values()]
clims = np.array([im.get_clim() for im in ims])
min, max = clims.min(), clims.max()
for im in ims:
im.set_clim(min, max)
figs = [ax.get_figure() for ax in axes.values()]
if len(set(figs)) == 1:
return figs[0]
else:
return figs
elif isinstance(axes, dict):
raise ValueError("If `group_by` is not a dict, "
"`axes` must not be a dict either.")
time_unit, times = _check_time_unit(time_unit, evoked.times)
evoked = evoked.copy() # we modify info
info = evoked.info
if axes is not None and proj == 'interactive':
raise RuntimeError('Currently only single axis figures are supported'
' for interactive SSP selection.')
_check_option('gfp', gfp, [True, False, 'only'])
scalings = _handle_default('scalings', scalings)
titles = _handle_default('titles', titles)
units = _handle_default('units', units)
if plot_type == "image":
if ylim is not None and not isinstance(ylim, dict):
# The user called Evoked.plot_image() or plot_evoked_image(), the
# clim parameters of those functions end up to be the ylim here.
raise ValueError("`clim` must be a dict. "
"E.g. clim = dict(eeg=[-20, 20])")
picks = _picks_to_idx(info, picks, none='all', exclude=())
if len(picks) != len(set(picks)):
raise ValueError("`picks` are not unique. Please remove duplicates.")
bad_ch_idx = [info['ch_names'].index(ch) for ch in info['bads']
if ch in info['ch_names']]
if len(exclude) > 0:
if isinstance(exclude, str) and exclude == 'bads':
exclude = bad_ch_idx
elif (isinstance(exclude, list) and
all(isinstance(ch, str) for ch in exclude)):
exclude = [info['ch_names'].index(ch) for ch in exclude]
else:
raise ValueError(
'exclude has to be a list of channel names or "bads"')
picks = np.array([pick for pick in picks if pick not in exclude])
types = np.array(_get_channel_types(info, picks), str)
ch_types_used = list()
for this_type in _VALID_CHANNEL_TYPES:
if this_type in types:
ch_types_used.append(this_type)
fig = None
if axes is None:
fig, axes = plt.subplots(len(ch_types_used), 1)
fig.subplots_adjust(left=0.125, bottom=0.1, right=0.975, top=0.92,
hspace=0.63)
if isinstance(axes, plt.Axes):
axes = [axes]
fig.set_size_inches(6.4, 2 + len(axes))
if isinstance(axes, plt.Axes):
axes = [axes]
elif isinstance(axes, np.ndarray):
axes = list(axes)
if fig is None:
fig = axes[0].get_figure()
if window_title is not None:
_set_window_title(fig, window_title)
if len(axes) != len(ch_types_used):
raise ValueError('Number of axes (%g) must match number of channel '
'types (%d: %s)' % (len(axes), len(ch_types_used),
sorted(ch_types_used)))
_check_option('proj', proj, (True, False, 'interactive', 'reconstruct'))
noise_cov = _check_cov(noise_cov, info)
if proj == 'reconstruct' and noise_cov is not None:
raise ValueError('Cannot use proj="reconstruct" when noise_cov is not '
'None')
projector, whitened_ch_names = _setup_plot_projector(
info, noise_cov, proj=proj is True, nave=evoked.nave)
if len(whitened_ch_names) > 0:
unit = False
if projector is not None:
evoked.data[:] = np.dot(projector, evoked.data)
if proj == 'reconstruct':
evoked = evoked._reconstruct_proj()
if plot_type == 'butterfly':
_plot_lines(evoked.data, info, picks, fig, axes, spatial_colors, unit,
units, scalings, hline, gfp, types, zorder, xlim, ylim,
times, bad_ch_idx, titles, ch_types_used, selectable,
False, line_alpha=1., nave=evoked.nave,
time_unit=time_unit, sphere=sphere)
plt.setp(axes, xlabel='Time (%s)' % time_unit)
elif plot_type == 'image':
for ai, (ax, this_type) in enumerate(zip(axes, ch_types_used)):
use_nave = evoked.nave if ai == 0 else None
this_picks = list(picks[types == this_type])
_plot_image(evoked.data, ax, this_type, this_picks, cmap, unit,
units, scalings, times, xlim, ylim, titles,
colorbar=colorbar, mask=mask, mask_style=mask_style,
mask_cmap=mask_cmap, mask_alpha=mask_alpha,
nave=use_nave, time_unit=time_unit,
show_names=show_names, ch_names=evoked.ch_names)
if proj == 'interactive':
_check_delayed_ssp(evoked)
params = dict(evoked=evoked, fig=fig, projs=info['projs'], axes=axes,
types=types, units=units, scalings=scalings, unit=unit,
ch_types_used=ch_types_used, picks=picks,
plot_update_proj_callback=_plot_update_evoked,
plot_type=plot_type)
_draw_proj_checkbox(None, params)
plt.setp(fig.axes[:len(ch_types_used) - 1], xlabel='')
fig.canvas.draw() # for axes plots update axes.
plt_show(show)
return fig
def _plot_lines(data, info, picks, fig, axes, spatial_colors, unit, units,
scalings, hline, gfp, types, zorder, xlim, ylim, times,
bad_ch_idx, titles, ch_types_used, selectable, psd,
line_alpha, nave, time_unit, sphere):
"""Plot data as butterfly plot."""
from matplotlib import patheffects, pyplot as plt
from matplotlib.widgets import SpanSelector
assert len(axes) == len(ch_types_used)
texts = list()
idxs = list()
lines = list()
sphere = _check_sphere(sphere, info)
path_effects = [patheffects.withStroke(linewidth=2, foreground="w",
alpha=0.75)]
gfp_path_effects = [patheffects.withStroke(linewidth=5, foreground="w",
alpha=0.75)]
if selectable:
selectables = np.ones(len(ch_types_used), dtype=bool)
for type_idx, this_type in enumerate(ch_types_used):
idx = picks[types == this_type]
if len(idx) < 2 or (this_type == 'grad' and len(idx) < 4):
# prevent unnecessary warnings for e.g. EOG
if this_type in _DATA_CH_TYPES_SPLIT:
logger.info('Need more than one channel to make '
'topography for %s. Disabling interactivity.'
% (this_type,))
selectables[type_idx] = False
if selectable:
# Parameters for butterfly interactive plots
params = dict(axes=axes, texts=texts, lines=lines,
ch_names=info['ch_names'], idxs=idxs, need_draw=False,
path_effects=path_effects)
fig.canvas.mpl_connect('pick_event',
partial(_butterfly_onpick, params=params))
fig.canvas.mpl_connect('button_press_event',
partial(_butterfly_on_button_press,
params=params))
for ai, (ax, this_type) in enumerate(zip(axes, ch_types_used)):
line_list = list() # 'line_list' contains the lines for this axes
if unit is False:
this_scaling = 1.0
ch_unit = 'NA' # no unit
else:
this_scaling = 1. if scalings is None else scalings[this_type]
ch_unit = units[this_type]
idx = list(picks[types == this_type])
idxs.append(idx)
if len(idx) > 0:
# Set amplitude scaling
D = this_scaling * data[idx, :]
_check_if_nan(D)
gfp_only = gfp == 'only'
if not gfp_only:
chs = [info['chs'][i] for i in idx]
locs3d = np.array([ch['loc'][:3] for ch in chs])
if (spatial_colors is True and
not _check_ch_locs(info=info, picks=idx)):
warn('Channel locations not available. Disabling spatial '
'colors.')
spatial_colors = selectable = False
if spatial_colors is True and len(idx) != 1:
x, y, z = locs3d.T
colors = _rgb(x, y, z)
_handle_spatial_colors(colors, info, idx, this_type, psd,
ax, sphere)
else:
if isinstance(spatial_colors, (tuple, str)):
col = [spatial_colors]
else:
col = ['k']
colors = col * len(idx)
for i in bad_ch_idx:
if i in idx:
colors[idx.index(i)] = 'r'
if zorder == 'std':
# find the channels with the least activity
# to map them in front of the more active ones
z_ord = D.std(axis=1).argsort()
elif zorder == 'unsorted':
z_ord = list(range(D.shape[0]))
elif not callable(zorder):
error = ('`zorder` must be a function, "std" '
'or "unsorted", not {0}.')
raise TypeError(error.format(type(zorder)))
else:
z_ord = zorder(D)
# plot channels
for ch_idx, z in enumerate(z_ord):
line_list.append(
ax.plot(times, D[ch_idx], picker=True,
zorder=z + 1 if spatial_colors is True else 1,
color=colors[ch_idx], alpha=line_alpha,
linewidth=0.5)[0])
line_list[-1].set_pickradius(3.)
if gfp:
if gfp in [True, 'only']:
if this_type == 'eeg':
this_gfp = D.std(axis=0, ddof=0)
label = 'GFP'
else:
this_gfp = np.linalg.norm(D, axis=0) / np.sqrt(len(D))
label = 'RMS'
gfp_color = 3 * (0.,) if spatial_colors is True else (0., 1.,
0.)
this_ylim = ax.get_ylim() if (ylim is None or this_type not in
ylim.keys()) else ylim[this_type]
if gfp_only:
y_offset = 0.
else:
y_offset = this_ylim[0]
this_gfp += y_offset
ax.fill_between(times, y_offset, this_gfp, color='none',
facecolor=gfp_color, zorder=1, alpha=0.2)
line_list.append(ax.plot(times, this_gfp, color=gfp_color,
zorder=3, alpha=line_alpha)[0])
ax.text(times[0] + 0.01 * (times[-1] - times[0]),
this_gfp[0] + 0.05 * np.diff(ax.get_ylim())[0],
label, zorder=4, color=gfp_color,
path_effects=gfp_path_effects)
for ii, line in zip(idx, line_list):
if ii in bad_ch_idx:
line.set_zorder(2)
if spatial_colors is True:
line.set_linestyle("--")
ax.set_ylabel(ch_unit)
texts.append(ax.text(0, 0, '', zorder=3,
verticalalignment='baseline',
horizontalalignment='left',
fontweight='bold', alpha=0,
clip_on=True))
if xlim is not None:
if xlim == 'tight':
xlim = (times[0], times[-1])
ax.set_xlim(xlim)
if ylim is not None and this_type in ylim:
ax.set_ylim(ylim[this_type])
ax.set(title=r'%s (%d channel%s)'
% (titles[this_type], len(D), _pl(len(D))))
if ai == 0:
_add_nave(ax, nave)
if hline is not None:
for h in hline:
c = ('grey' if spatial_colors is True else 'r')
ax.axhline(h, linestyle='--', linewidth=2, color=c)
lines.append(line_list)
if selectable:
for ax in np.array(axes)[selectables]:
if len(ax.lines) == 1:
continue
text = ax.annotate('Loading...', xy=(0.01, 0.1),
xycoords='axes fraction', fontsize=20,
color='green', zorder=3)
text.set_visible(False)
callback_onselect = partial(_line_plot_onselect,
ch_types=ch_types_used, info=info,
data=data, times=times, text=text,
psd=psd, time_unit=time_unit,
sphere=sphere)
blit = False if plt.get_backend() == 'MacOSX' else True
minspan = 0 if len(times) < 2 else times[1] - times[0]
rect_kw = _prop_kw('rect', dict(alpha=0.5, facecolor='red'))
ax._span_selector = SpanSelector(
ax, callback_onselect, 'horizontal', minspan=minspan,
useblit=blit, **rect_kw)
def _add_nave(ax, nave):
"""Add nave to axes."""
if nave is not None:
ax.annotate(
r'N$_{\mathrm{ave}}$=%d' % nave, ha='left', va='bottom',
xy=(0, 1), xycoords='axes fraction',
xytext=(0, 5), textcoords='offset pixels')
def _handle_spatial_colors(colors, info, idx, ch_type, psd, ax, sphere):
"""Set up spatial colors."""
used_nm = np.array(_clean_names(info['ch_names']))[idx]
# find indices for bads
bads = [np.where(used_nm == bad)[0][0] for bad in info['bads'] if bad in
used_nm]
pos, outlines = _get_pos_outlines(info, idx, sphere=sphere)
loc = 1 if psd else 2 # Legend in top right for psd plot.
_plot_legend(pos, colors, ax, bads, outlines, loc)
def _plot_image(data, ax, this_type, picks, cmap, unit, units, scalings, times,
xlim, ylim, titles, colorbar=True, mask=None, mask_cmap=None,
mask_style=None, mask_alpha=.25, nave=None,
time_unit='s', show_names=False, ch_names=None):
"""Plot images."""
import matplotlib.pyplot as plt
assert time_unit is not None
if show_names == "auto":
if picks is not None:
show_names = "all" if len(picks) < 25 else True
else:
show_names = False
cmap = _setup_cmap(cmap)
ch_unit = units[this_type]
this_scaling = scalings[this_type]
if unit is False:
this_scaling = 1.0
ch_unit = 'NA' # no unit
if picks is not None:
data = data[picks]
if mask is not None:
mask = mask[picks]
# Show the image
# Set amplitude scaling
data = this_scaling * data
if ylim is None or this_type not in ylim:
vmax = np.abs(data).max()
vmin = -vmax
else:
vmin, vmax = ylim[this_type]
_check_if_nan(data)
im, t_end = _plot_masked_image(
ax, data, times, mask, yvals=None, cmap=cmap[0],
vmin=vmin, vmax=vmax, mask_style=mask_style, mask_alpha=mask_alpha,
mask_cmap=mask_cmap)
# ignore xlim='tight'; happens automatically with `extent` in imshow
xlim = None if xlim == 'tight' else xlim
if xlim is not None:
ax.set_xlim(xlim)
if colorbar:
cbar = plt.colorbar(im, ax=ax)
cbar.ax.set_title(ch_unit)
if cmap[1]:
ax.CB = DraggableColorbar(cbar, im)
ylabel = 'Channels' if show_names else 'Channel (index)'
t = titles[this_type] + ' (%d channel%s' % (len(data), _pl(data)) + t_end
ax.set(ylabel=ylabel, xlabel='Time (%s)' % (time_unit,), title=t)
_add_nave(ax, nave)
yticks = np.arange(len(picks))
if show_names != 'all':
yticks = np.intersect1d(np.round(ax.get_yticks()).astype(int), yticks)
yticklabels = np.array(ch_names)[picks] if show_names else np.array(picks)
ax.set(yticks=yticks, yticklabels=yticklabels[yticks])
@verbose
def plot_evoked(evoked, picks=None, exclude='bads', unit=True, show=True,
ylim=None, xlim='tight', proj=False, hline=None, units=None,
scalings=None, titles=None, axes=None, gfp=False,
window_title=None, spatial_colors=False, zorder='unsorted',
selectable=True, noise_cov=None, time_unit='s', sphere=None,
verbose=None):
"""Plot evoked data using butterfly plots.
Left click to a line shows the channel name. Selecting an area by clicking
and holding left mouse button plots a topographic map of the painted area.
.. note:: If bad channels are not excluded they are shown in red.
Parameters
----------
evoked : instance of Evoked
The evoked data.
%(picks_all)s
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Show figure if True.
ylim : dict | None
Y limits for plots (after scaling has been applied). e.g.
ylim = dict(eeg=[-20, 20])
Valid keys are eeg, mag, grad, misc. If None, the ylim parameter
for each channel equals the pyplot default.
xlim : 'tight' | tuple | None
X limits for plots.
%(plot_proj)s
hline : list of float | None
The values at which to show an horizontal line.
units : dict | None
The units of the channel types used for axes labels. If None,
defaults to ``dict(eeg='µV', grad='fT/cm', mag='fT')``.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,
defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
titles : dict | None
The titles associated with the channels. If None, defaults to
``dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')``.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
gfp : bool | 'only'
Plot the global field power (GFP) or the root mean square (RMS) of the
data. For MEG data, this will plot the RMS. For EEG, it plots GFP,
i.e. the standard deviation of the signal across channels. The GFP is
equivalent to the RMS of an average-referenced signal.
- ``True``
Plot GFP or RMS (for EEG and MEG, respectively) and traces for all
channels.
- ``'only'``
Plot GFP or RMS (for EEG and MEG, respectively), and omit the
traces for individual channels.
The color of the GFP/RMS trace will be green if
``spatial_colors=False``, and black otherwise.
.. versionchanged:: 0.23
Plot GFP for EEG instead of RMS. Label RMS traces correctly as such.
window_title : str | None
The title to put at the top of the figure.
spatial_colors : bool
If True, the lines are color coded by mapping physical sensor
coordinates into color values. Spatially similar channels will have
similar colors. Bad channels will be dotted. If False, the good
channels are plotted black and bad channels red. Defaults to False.
zorder : str | callable
Which channels to put in the front or back. Only matters if
``spatial_colors`` is used.
If str, must be ``std`` or ``unsorted`` (defaults to ``unsorted``). If
``std``, data with the lowest standard deviation (weakest effects) will
be put in front so that they are not obscured by those with stronger
effects. If ``unsorted``, channels are z-sorted as in the evoked
instance.
If callable, must take one argument: a numpy array of the same
dimensionality as the evoked raw data; and return a list of
unique integers corresponding to the number of channels.
.. versionadded:: 0.13.0
selectable : bool
Whether to use interactive features. If True (default), it is possible
to paint an area to draw topomaps. When False, the interactive features
are disabled. Disabling interactive features reduces memory consumption
and is useful when using ``axes`` parameter to draw multiaxes figures.
.. versionadded:: 0.13.0
noise_cov : instance of Covariance | str | None
Noise covariance used to whiten the data while plotting.
Whitened data channel names are shown in italic.
Can be a string to load a covariance from disk.
See also :meth:`mne.Evoked.plot_white` for additional inspection
of noise covariance properties when whitening evoked data.
For data processed with SSS, the effective dependence between
magnetometers and gradiometers may introduce differences in scaling,
consider using :meth:`mne.Evoked.plot_white`.
.. versionadded:: 0.16.0
time_unit : str
The units for the time axis, can be "ms" or "s" (default).
.. versionadded:: 0.16
%(topomap_sphere_auto)s
%(verbose)s
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure containing the butterfly plots.
See Also
--------
mne.viz.plot_evoked_white
"""
return _plot_evoked(
evoked=evoked, picks=picks, exclude=exclude, unit=unit, show=show,
ylim=ylim, proj=proj, xlim=xlim, hline=hline, units=units,
scalings=scalings, titles=titles, axes=axes, plot_type="butterfly",
gfp=gfp, window_title=window_title, spatial_colors=spatial_colors,
selectable=selectable, zorder=zorder, noise_cov=noise_cov,
time_unit=time_unit, sphere=sphere)
def plot_evoked_topo(evoked, layout=None, layout_scale=0.945,
color=None, border='none', ylim=None, scalings=None,
title=None, proj=False, vline=[0.0], fig_background=None,
merge_grads=False, legend=True, axes=None,
background_color='w', noise_cov=None, exclude='bads',
show=True):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
evoked : list of Evoked | Evoked
The evoked response to plot.
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
color : list of color | color | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
Matplotlib borders style to be used for each sensor plot.
ylim : dict | None
Y limits for plots (after scaling has been applied). The value
determines the upper and lower subplot limits. e.g.
ylim = dict(eeg=[-20, 20]). Valid keys are eeg, mag, grad, misc.
If None, the ylim parameter for each channel type is determined by
the minimum and maximum peak.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
title : str
Title of the figure.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
vline : list of float | None
The values at which to show a vertical line.
fig_background : None | ndarray
A background image for the figure. This must work with a call to
plt.imshow. Defaults to None.
merge_grads : bool
Whether to use RMS value of gradiometer pairs. Only works for Neuromag
data. Defaults to False.
legend : bool | int | str | tuple
If True, create a legend based on evoked.comment. If False, disable the
legend. Otherwise, the legend is created and the parameter value is
passed as the location parameter to the matplotlib legend call. It can
be an integer (e.g. 0 corresponds to upper right corner of the plot),
a string (e.g. 'upper right'), or a tuple (x, y coordinates of the
lower left corner of the legend in the axes coordinate system).
See matplotlib documentation for more details.
axes : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
background_color : color
Background color. Typically 'k' (black) or 'w' (white; default).
.. versionadded:: 0.15.0
noise_cov : instance of Covariance | str | None
Noise covariance used to whiten the data while plotting.
Whitened data channel names are shown in italic.
Can be a string to load a covariance from disk.
.. versionadded:: 0.16.0
exclude : list of str | 'bads'
Channels names to exclude from the plot. If 'bads', the
bad channels are excluded. By default, exclude is set to 'bads'.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations.
"""
if not type(evoked) in (tuple, list):
evoked = [evoked]
background_color = _to_rgb(background_color, name='background_color')
dark_background = np.mean(background_color) < 0.5
if dark_background:
fig_facecolor = background_color
axis_facecolor = background_color
font_color = 'w'
else:
fig_facecolor = background_color
axis_facecolor = background_color
font_color = 'k'
if color is None:
if dark_background:
color = ['w'] + _get_color_list()
else:
color = _get_color_list()
color = color * ((len(evoked) % len(color)) + 1)
color = color[:len(evoked)]
return _plot_evoked_topo(evoked=evoked, layout=layout,
layout_scale=layout_scale, color=color,
border=border, ylim=ylim, scalings=scalings,
title=title, proj=proj, vline=vline,
fig_facecolor=fig_facecolor,
fig_background=fig_background,
axis_facecolor=axis_facecolor,
font_color=font_color,
merge_channels=merge_grads,
legend=legend, axes=axes, exclude=exclude,
show=show, noise_cov=noise_cov)
@fill_doc
def plot_evoked_image(evoked, picks=None, exclude='bads', unit=True,
show=True, clim=None, xlim='tight', proj=False,
units=None, scalings=None, titles=None, axes=None,
cmap='RdBu_r', colorbar=True, mask=None,
mask_style=None, mask_cmap="Greys", mask_alpha=.25,
time_unit='s', show_names="auto", group_by=None,
sphere=None):
"""Plot evoked data as images.
Parameters
----------
evoked : instance of Evoked
The evoked data.
%(picks_all)s
This parameter can also be used to set the order the channels
are shown in, as the channel image is sorted by the order of picks.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Show figure if True.
clim : dict | None
Color limits for plots (after scaling has been applied). e.g.
``clim = dict(eeg=[-20, 20])``.
Valid keys are eeg, mag, grad, misc. If None, the clim parameter
for each channel equals the pyplot default.
xlim : 'tight' | tuple | None
X limits for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
units : dict | None
The units of the channel types used for axes labels. If None,
defaults to ``dict(eeg='µV', grad='fT/cm', mag='fT')``.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
titles : dict | None
The titles associated with the channels. If None, defaults to
``dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')``.
axes : instance of Axes | list | dict | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
If ``group_by`` is a dict, this cannot be a list, but it can be a dict
of lists of axes, with the keys matching those of ``group_by``. In that
case, the provided axes will be used for the corresponding groups.
Defaults to ``None``.
cmap : matplotlib colormap | (colormap, bool) | 'interactive'
Colormap. If tuple, the first value indicates the colormap to use and
the second value is a boolean defining interactivity. In interactive
mode the colors are adjustable by clicking and dragging the colorbar
with left and right mouse button. Left mouse button moves the scale up
and down and right mouse button adjusts the range. Hitting space bar
resets the scale. Up and down arrows can be used to change the
colormap. If 'interactive', translates to ``('RdBu_r', True)``.
Defaults to ``'RdBu_r'``.
colorbar : bool
If True, plot a colorbar. Defaults to True.
.. versionadded:: 0.16
mask : ndarray | None
An array of booleans of the same shape as the data. Entries of the
data that correspond to ``False`` in the mask are masked (see
``do_mask`` below). Useful for, e.g., masking for statistical
significance.
.. versionadded:: 0.16
mask_style : None | 'both' | 'contour' | 'mask'
If ``mask`` is not None: if 'contour', a contour line is drawn around
the masked areas (``True`` in ``mask``). If 'mask', entries not
``True`` in ``mask`` are shown transparently. If 'both', both a contour
and transparency are used.
If ``None``, defaults to 'both' if ``mask`` is not None, and is ignored
otherwise.
.. versionadded:: 0.16
mask_cmap : matplotlib colormap | (colormap, bool) | 'interactive'
The colormap chosen for masked parts of the image (see below), if
``mask`` is not ``None``. If None, ``cmap`` is reused. Defaults to
``Greys``. Not interactive. Otherwise, as ``cmap``.
mask_alpha : float
A float between 0 and 1. If ``mask`` is not None, this sets the
alpha level (degree of transparency) for the masked-out segments.
I.e., if 0, masked-out segments are not visible at all.
Defaults to .25.
.. versionadded:: 0.16
time_unit : str
The units for the time axis, can be "ms" or "s" (default).
.. versionadded:: 0.16
show_names : bool | 'auto' | 'all'
Determines if channel names should be plotted on the y axis. If False,
no names are shown. If True, ticks are set automatically by matplotlib
and the corresponding channel names are shown. If "all", all channel
names are shown. If "auto", is set to False if ``picks`` is ``None``,
to ``True`` if ``picks`` contains 25 or more entries, or to "all"
if ``picks`` contains fewer than 25 entries.
group_by : None | dict
If a dict, the values must be picks, and ``axes`` must also be a dict
with matching keys, or None. If ``axes`` is None, one figure and one
axis will be created for each entry in ``group_by``.Then, for each
entry, the picked channels will be plotted to the corresponding axis.
If ``titles`` are None, keys will become plot titles. This is useful
for e.g. ROIs. Each entry must contain only one channel type.
For example::
group_by=dict(Left_ROI=[1, 2, 3, 4], Right_ROI=[5, 6, 7, 8])
If None, all picked channels are plotted to the same axis.
%(topomap_sphere_auto)s
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure containing the images.
"""
return _plot_evoked(evoked=evoked, picks=picks, exclude=exclude, unit=unit,
show=show, ylim=clim, proj=proj, xlim=xlim, hline=None,
units=units, scalings=scalings, titles=titles,
axes=axes, plot_type="image", cmap=cmap,
colorbar=colorbar, mask=mask, mask_style=mask_style,
mask_cmap=mask_cmap, mask_alpha=mask_alpha,
time_unit=time_unit, show_names=show_names,
group_by=group_by, sphere=sphere)
def _plot_update_evoked(params, bools):
"""Update the plot evoked lines."""
picks, evoked = [params[k] for k in ('picks', 'evoked')]
projs = [proj for ii, proj in enumerate(params['projs'])
if ii in np.where(bools)[0]]
params['proj_bools'] = bools
new_evoked = evoked.copy()
new_evoked.info['projs'] = []
new_evoked.add_proj(projs)
new_evoked.apply_proj()
for ax, t in zip(params['axes'], params['ch_types_used']):
this_scaling = params['scalings'][t]
idx = [picks[i] for i in range(len(picks)) if params['types'][i] == t]
D = this_scaling * new_evoked.data[idx, :]
if params['plot_type'] == 'butterfly':
for line, di in zip(ax.lines, D):
line.set_ydata(di)
else:
ax.images[0].set_data(D)
params['fig'].canvas.draw()
@verbose
def plot_evoked_white(evoked, noise_cov, show=True, rank=None, time_unit='s',
sphere=None, axes=None, verbose=None):
"""Plot whitened evoked response.
Plots the whitened evoked response and the whitened GFP as described in
:footcite:`EngemannGramfort2015`. This function is especially useful for
investigating noise covariance properties to determine if data are
properly whitened (e.g., achieving expected values in line with model
assumptions, see Notes below).
Parameters
----------
evoked : instance of mne.Evoked
The evoked response.
noise_cov : list | instance of Covariance | str
The noise covariance. Can be a string to load a covariance from disk.
show : bool
Show figure if True.
%(rank_None)s
time_unit : str
The units for the time axis, can be "ms" or "s" (default).
.. versionadded:: 0.16
%(topomap_sphere_auto)s
axes : list | None
List of axes to plot into.
.. versionadded:: 0.21.0
%(verbose)s
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure object containing the plot.
See Also
--------
mne.Evoked.plot
Notes
-----
If baseline signals match the assumption of Gaussian white noise,
values should be centered at 0, and be within 2 standard deviations
(±1.96) for 95%% of the time points. For the global field power (GFP),
we expect it to fluctuate around a value of 1.
If one single covariance object is passed, the GFP panel (bottom)
will depict different sensor types. If multiple covariance objects are
passed as a list, the left column will display the whitened evoked
responses for each channel based on the whitener from the noise covariance
that has the highest log-likelihood. The left column will depict the
whitened GFPs based on each estimator separately for each sensor type.
Instead of numbers of channels the GFP display shows the estimated rank.
Note. The rank estimation will be printed by the logger
(if ``verbose=True``) for each noise covariance estimator that is passed.
References
----------
.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
covariance estimation and spatial whitening of MEG and EEG
signals, vol. 108, 328-342, NeuroImage.
"""
from ..cov import whiten_evoked, read_cov # recursive import
import matplotlib.pyplot as plt
time_unit, times = _check_time_unit(time_unit, evoked.times)
if isinstance(noise_cov, str):
noise_cov = read_cov(noise_cov)
if not isinstance(noise_cov, (list, tuple)):
noise_cov = [noise_cov]
evoked = evoked.copy() # handle ref meg
passive_idx = [idx for idx, proj in enumerate(evoked.info['projs'])
if not proj['active']]
# either applied already or not-- else issue
for idx in passive_idx[::-1]: # reverse order so idx does not change
evoked.del_proj(idx)
evoked.pick_types(ref_meg=False, exclude='bads', **_PICK_TYPES_DATA_DICT)
n_ch_used, rank_list, picks_list, has_sss = _triage_rank_sss(
evoked.info, noise_cov, rank, scalings=None)
if has_sss:
logger.info('SSS has been applied to data. Showing mag and grad '
'whitening jointly.')
# get one whitened evoked per cov
evokeds_white = [whiten_evoked(evoked, cov, picks=None, rank=r)
for cov, r in zip(noise_cov, rank_list)]
def whitened_gfp(x, rank=None):
"""Whitened Global Field Power.
The MNE inverse solver assumes zero mean whitened data as input.
Therefore, a chi^2 statistic will be best to detect model violations.
"""
return np.sum(x ** 2, axis=0) / (len(x) if rank is None else rank)
# prepare plot
if len(noise_cov) > 1:
n_columns = 2
n_extra_row = 0
else:
n_columns = 1
n_extra_row = 1
n_rows = n_ch_used + n_extra_row
want_shape = (n_rows, n_columns) if len(noise_cov) > 1 else (n_rows,)
_validate_type(axes, (list, tuple, np.ndarray, None), 'axes')
if axes is None:
_, axes = plt.subplots(n_rows,
n_columns, sharex=True, sharey=False,
figsize=(8.8, 2.2 * n_rows))
else:
axes = np.array(axes)
for ai, ax in enumerate(axes.flat):
_validate_type(ax, plt.Axes, 'axes.flat[%d]' % (ai,))
if axes.shape != want_shape:
raise ValueError(f'axes must have shape {want_shape}, got '
f'{axes.shape}')
fig = axes.flat[0].figure
if n_columns > 1:
suptitle = ('Whitened evoked (left, best estimator = "%s")\n'
'and global field power '
'(right, comparison of estimators)' %
noise_cov[0].get('method', 'empirical'))
fig.suptitle(suptitle)
if any(((n_columns == 1 and n_ch_used >= 1),
(n_columns == 2 and n_ch_used == 1))):
axes_evoked = axes[:n_ch_used]
ax_gfp = axes[-1:]
elif n_columns == 2 and n_ch_used > 1:
axes_evoked = axes[:n_ch_used, 0]
ax_gfp = axes[:, 1]
else:
raise RuntimeError('Wrong axes inputs')
titles_ = _handle_default('titles')
if has_sss:
titles_['meg'] = 'MEG (combined)'
colors = [plt.cm.Set1(i) for i in np.linspace(0, 0.5, len(noise_cov))]
ch_colors = _handle_default('color', None)
iter_gfp = zip(evokeds_white, noise_cov, rank_list, colors)
# the first is by law the best noise cov, on the left we plot that one.
if not has_sss:
evokeds_white[0].plot(unit=False, axes=axes_evoked,
hline=[-1.96, 1.96], show=False,
time_unit=time_unit)
else:
for ((ch_type, picks), ax) in zip(picks_list, axes_evoked):
ax.plot(times, evokeds_white[0].data[picks].T, color='k',
lw=0.5)
for hline in [-1.96, 1.96]:
ax.axhline(hline, color='red', linestyle='--', lw=2)
ax.set(title='%s (%d channel%s)'
% (titles_[ch_type], len(picks), _pl(len(picks))))
# Now plot the GFP for all covs if indicated.
for evoked_white, noise_cov, rank_, color in iter_gfp:
i = 0
for ch, sub_picks in picks_list:
this_rank = rank_[ch]
title = '{0} ({2}{1})'.format(
titles_[ch] if n_columns > 1 else ch,
this_rank, 'rank ' if n_columns > 1 else '')
label = noise_cov.get('method', 'empirical')
ax = ax_gfp[i]
ax.set_title(title if n_columns > 1 else
'Whitened GFP, method = "%s"' % label)
data = evoked_white.data[sub_picks]
gfp = whitened_gfp(data, rank=this_rank)
# Wrap SSS-processed data (MEG) to the mag color
color_ch = 'mag' if ch == 'meg' else ch
ax.plot(times, gfp,
label=label if n_columns > 1 else title,
color=color if n_columns > 1 else ch_colors[color_ch],
lw=0.5)
ax.set(xlabel='Time (%s)' % (time_unit,), ylabel=r'GFP ($\chi^2$)',
xlim=[times[0], times[-1]], ylim=(0, 10))
ax.axhline(1, color='red', linestyle='--', lw=2.)
if n_columns > 1:
i += 1
ax = ax_gfp[0]
if n_columns == 1:
ax.legend( # mpl < 1.2.1 compatibility: use prop instead of fontsize
loc='upper right', bbox_to_anchor=(0.98, 0.9), prop=dict(size=12))
else:
ax.legend(loc='upper right', prop=dict(size=10))
params = dict(top=[0.69, 0.82, 0.87][n_rows - 1],
bottom=[0.22, 0.13, 0.09][n_rows - 1])
if has_sss:
params['hspace'] = 0.49
fig.subplots_adjust(**params)
fig.canvas.draw()
plt_show(show)
return fig
@verbose
def plot_snr_estimate(evoked, inv, show=True, axes=None, verbose=None):
"""Plot a data SNR estimate.
Parameters
----------
evoked : instance of Evoked
The evoked instance. This should probably be baseline-corrected.
inv : instance of InverseOperator
The minimum-norm inverse operator.
show : bool
Show figure if True.
axes : instance of Axes | None
The axes to plot into.
.. versionadded:: 0.21.0
%(verbose)s
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
The bluish green line is the SNR determined by the GFP of the whitened
evoked data. The orange line is the SNR estimated based on the mismatch
between the data and the data re-estimated from the regularized inverse.
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
from ..minimum_norm import estimate_snr
snr, snr_est = estimate_snr(evoked, inv)
_validate_type(axes, (None, plt.Axes))
if axes is None:
_, ax = plt.subplots(1, 1)
else:
ax = axes
del axes
fig = ax.figure
lims = np.concatenate([evoked.times[[0, -1]], [-1, snr_est.max()]])
ax.axvline(0, color='k', ls=':', lw=1)
ax.axhline(0, color='k', ls=':', lw=1)
# Colors are "bluish green" and "vermilion" taken from:
# http://bconnelly.net/2013/10/creating-colorblind-friendly-figures/
hs = list()
labels = ('Inverse', 'Whitened GFP')
hs.append(ax.plot(
evoked.times, snr_est, color=[0.0, 0.6, 0.5])[0])
hs.append(ax.plot(
evoked.times, snr - 1, color=[0.8, 0.4, 0.0])[0])
ax.set(xlim=lims[:2], ylim=lims[2:], ylabel='SNR',
xlabel='Time (s)')
if evoked.comment is not None:
ax.set_title(evoked.comment)
ax.legend(hs, labels, title='Estimation method')
plt_show(show)
return fig
@fill_doc
def plot_evoked_joint(evoked, times="peaks", title='', picks=None,
exclude=None, show=True, ts_args=None,
topomap_args=None):
"""Plot evoked data as butterfly plot and add topomaps for time points.
.. note:: Axes to plot in can be passed by the user through ``ts_args`` or
``topomap_args``. In that case both ``ts_args`` and
``topomap_args`` axes have to be used. Be aware that when the
axes are provided, their position may be slightly modified.
Parameters
----------
evoked : instance of Evoked
The evoked instance.
times : float | array of float | "auto" | "peaks"
The time point(s) to plot. If ``"auto"``, 5 evenly spaced topographies
between the first and last time instant will be shown. If ``"peaks"``,
finds time points automatically by checking for 3 local maxima in
Global Field Power. Defaults to ``"peaks"``.
title : str | None
The title. If ``None``, suppress printing channel type title. If an
empty string, a default title is created. Defaults to ''. If custom
axes are passed make sure to set ``title=None``, otherwise some of your
axes may be removed during placement of the title axis.
%(picks_all)s
exclude : None | list of str | 'bads'
Channels names to exclude from being shown. If ``'bads'``, the
bad channels are excluded. Defaults to ``None``.
show : bool
Show figure if ``True``. Defaults to ``True``.
ts_args : None | dict
A dict of ``kwargs`` that are forwarded to :meth:`mne.Evoked.plot` to
style the butterfly plot. If they are not in this dict, the following
defaults are passed: ``spatial_colors=True``, ``zorder='std'``.
``show`` and ``exclude`` are illegal.
If ``None``, no customizable arguments will be passed.
Defaults to ``None``.
topomap_args : None | dict
A dict of ``kwargs`` that are forwarded to
:meth:`mne.Evoked.plot_topomap` to style the topomaps.
If it is not in this dict, ``outlines='skirt'`` will be passed.
``show``, ``times``, ``colorbar`` are illegal.
If ``None``, no customizable arguments will be passed.
Defaults to ``None``.
Returns
-------
fig : instance of matplotlib.figure.Figure | list
The figure object containing the plot. If ``evoked`` has multiple
channel types, a list of figures, one for each channel type, is
returned.
Notes
-----
.. versionadded:: 0.12.0
"""
import matplotlib.pyplot as plt
if ts_args is not None and not isinstance(ts_args, dict):
raise TypeError('ts_args must be dict or None, got type %s'
% (type(ts_args),))
ts_args = dict() if ts_args is None else ts_args.copy()
ts_args['time_unit'], _ = _check_time_unit(
ts_args.get('time_unit', 's'), evoked.times)
topomap_args = dict() if topomap_args is None else topomap_args.copy()
got_axes = False
illegal_args = {"show", 'times', 'exclude'}
for args in (ts_args, topomap_args):
if any((x in args for x in illegal_args)):
raise ValueError("Don't pass any of {} as *_args.".format(
", ".join(list(illegal_args))))
if ("axes" in ts_args) or ("axes" in topomap_args):
if not (("axes" in ts_args) and ("axes" in topomap_args)):
raise ValueError("If one of `ts_args` and `topomap_args` contains "
"'axes', the other must, too.")
_validate_if_list_of_axes([ts_args["axes"]], 1)
if times in (None, 'peaks'):
n_topomaps = 3 + 1
else:
assert not isinstance(times, str)
n_topomaps = len(times) + 1
_validate_if_list_of_axes(list(topomap_args["axes"]), n_topomaps)
got_axes = True
# channel selection
# simply create a new evoked object with the desired channel selection
# Need to deal with proj before picking to avoid bad projections
proj = topomap_args.get('proj', True)
proj_ts = ts_args.get('proj', True)
if proj_ts != proj:
raise ValueError(
f'topomap_args["proj"] (default True, got {proj}) must match '
f'ts_args["proj"] (default True, got {proj_ts})')
_check_option('topomap_args["proj"]', proj, (True, False, 'reconstruct'))
evoked = evoked.copy()
if proj:
evoked.apply_proj()
if proj == 'reconstruct':
evoked._reconstruct_proj()
topomap_args['proj'] = ts_args['proj'] = False # don't reapply
evoked = _pick_inst(evoked, picks, exclude, copy=False)
info = evoked.info
ch_types = _get_channel_types(info, unique=True, only_data_chs=True)
# if multiple sensor types: one plot per channel type, recursive call
if len(ch_types) > 1:
if got_axes:
raise NotImplementedError(
"Currently, passing axes manually (via `ts_args` or "
"`topomap_args`) is not supported for multiple channel types.")
figs = list()
for this_type in ch_types: # pick only the corresponding channel type
ev_ = evoked.copy().pick_channels(
[info['ch_names'][idx] for idx in range(info['nchan'])
if channel_type(info, idx) == this_type])
if len(_get_channel_types(ev_.info, unique=True)) > 1:
raise RuntimeError('Possibly infinite loop due to channel '
'selection problem. This should never '
'happen! Please check your channel types.')
figs.append(
plot_evoked_joint(
ev_, times=times, title=title, show=show, ts_args=ts_args,
exclude=list(), topomap_args=topomap_args))
return figs
# set up time points to show topomaps for
times_sec = _process_times(evoked, times, few=True)
del times
_, times_ts = _check_time_unit(ts_args['time_unit'], times_sec)
# prepare axes for topomap
if not got_axes:
fig, ts_ax, map_ax, cbar_ax = _prepare_joint_axes(len(times_sec),
figsize=(8.0, 4.2))
else:
ts_ax = ts_args["axes"]
del ts_args["axes"]
map_ax = topomap_args["axes"][:-1]
cbar_ax = topomap_args["axes"][-1]
del topomap_args["axes"]
fig = cbar_ax.figure
# butterfly/time series plot
# most of this code is about passing defaults on demand
ts_args_def = dict(picks=None, unit=True, ylim=None, xlim='tight',
proj=False, hline=None, units=None, scalings=None,
titles=None, gfp=False, window_title=None,
spatial_colors=True, zorder='std',
sphere=None)
ts_args_def.update(ts_args)
_plot_evoked(evoked, axes=ts_ax, show=False, plot_type='butterfly',
exclude=[], **ts_args_def)
# handle title
# we use a new axis for the title to handle scaling of plots
old_title = ts_ax.get_title()
ts_ax.set_title('')
# XXX BUG destroys ax -> fig assignment if title & axes are passed
if title is not None:
title_ax = plt.subplot(4, 3, 2)
if title == '':
title = old_title
title_ax.text(.5, .5, title, transform=title_ax.transAxes,
horizontalalignment='center',
verticalalignment='center')
title_ax.axis('off')
# topomap
contours = topomap_args.get('contours', 6)
ch_type = ch_types.pop() # set should only contain one element
# Since the data has all the ch_types, we get the limits from the plot.
vmin, vmax = ts_ax.get_ylim()
norm = ch_type == 'grad'
vmin = 0 if norm else vmin
vmin, vmax = _setup_vmin_vmax(evoked.data, vmin, vmax, norm)
if not isinstance(contours, (list, np.ndarray)):
locator, contours = _set_contour_locator(vmin, vmax, contours)
else:
locator = None
topomap_args_pass = (dict(extrapolate='local') if ch_type == 'seeg'
else dict())
topomap_args_pass.update(topomap_args)
topomap_args_pass['outlines'] = topomap_args.get('outlines', 'skirt')
topomap_args_pass['contours'] = contours
evoked.plot_topomap(times=times_sec, axes=map_ax, show=False,
colorbar=False, **topomap_args_pass)
if topomap_args.get('colorbar', True):
from matplotlib import ticker
cbar_ax.grid(False) # auto-removal deprecated as of 2021/10/05
cbar = plt.colorbar(map_ax[0].images[0], cax=cbar_ax)
if isinstance(contours, (list, np.ndarray)):
cbar.set_ticks(contours)
else:
if locator is None:
locator = ticker.MaxNLocator(nbins=5)
cbar.locator = locator
cbar.update_ticks()
if not got_axes:
plt.subplots_adjust(left=.1, right=.93, bottom=.14,
top=1. if title is not None else 1.2)
# connection lines
# draw the connection lines between time series and topoplots
lines = [_connection_line(timepoint, fig, ts_ax, map_ax_)
for timepoint, map_ax_ in zip(times_ts, map_ax)]
for line in lines:
fig.lines.append(line)
# mark times in time series plot
for timepoint in times_ts:
ts_ax.axvline(timepoint, color='grey', linestyle='-',
linewidth=1.5, alpha=.66, zorder=0)
# show and return it
plt_show(show)
return fig
###############################################################################
# The following functions are all helpers for plot_compare_evokeds. #
###############################################################################
def _check_loc_legal(loc, what='your choice', default=1):
"""Check if loc is a legal location for MPL subordinate axes."""
true_default = {"legend": 2, "show_sensors": 1}.get(what, default)
if isinstance(loc, (bool, np.bool_)) and loc:
loc = true_default
loc_dict = {'upper right': 1, 'upper left': 2, 'lower left': 3,
'lower right': 4, 'right': 5, 'center left': 6,
'center right': 7, 'lower center': 8, 'upper center': 9,
'center': 10}
loc_ = loc_dict.get(loc, loc)
if loc_ not in range(11):
raise ValueError(str(loc) + " is not a legal MPL loc, please supply"
"another value for " + what + ".")
return loc_
def _validate_style_keys_pce(styles, conditions, tags):
"""Validate styles dict keys for plot_compare_evokeds."""
styles = deepcopy(styles)
if not set(styles).issubset(tags.union(conditions)):
raise ValueError('The keys in "styles" ({}) must match the keys in '
'"evokeds" ({}).'.format(list(styles), conditions))
# make sure all the keys are in there
for cond in conditions:
if cond not in styles:
styles[cond] = dict()
# deal with matplotlib's synonymous handling of "c" and "color" /
# "ls" and "linestyle" / "lw" and "linewidth"
elif 'c' in styles[cond]:
styles[cond]['color'] = styles[cond].pop('c')
elif 'ls' in styles[cond]:
styles[cond]['linestyle'] = styles[cond].pop('ls')
elif 'lw' in styles[cond]:
styles[cond]['linewidth'] = styles[cond].pop('lw')
# transfer styles from partial-matched entries
for tag in cond.split('/'):
if tag in styles:
styles[cond].update(styles[tag])
# remove the (now transferred) partial-matching style entries
for key in list(styles):
if key not in conditions:
del styles[key]
return styles
def _validate_colors_pce(colors, cmap, conditions, tags):
"""Check and assign colors for plot_compare_evokeds."""
err_suffix = ''
if colors is None:
if cmap is None:
colors = _get_color_list()
err_suffix = ' in the default color cycle'
else:
colors = list(range(len(conditions)))
# convert color list to dict
if isinstance(colors, (list, tuple, np.ndarray)):
if len(conditions) > len(colors):
raise ValueError('Trying to plot {} conditions, but there are only'
' {} colors{}. Please specify colors manually.'
.format(len(conditions), len(colors), err_suffix))
colors = dict(zip(conditions, colors))
# should be a dict by now...
if not isinstance(colors, dict):
raise TypeError('"colors" must be a dict, list, or None; got {}.'
.format(type(colors).__name__))
# validate color dict keys
if not set(colors).issubset(tags.union(conditions)):
raise ValueError('If "colors" is a dict its keys ({}) must '
'match the keys/conditions in "evokeds" ({}).'
.format(list(colors), conditions))
# validate color dict values
color_vals = list(colors.values())
all_numeric = all(_is_numeric(_color) for _color in color_vals)
if cmap is not None and not all_numeric:
raise TypeError('if "cmap" is specified, then "colors" must be '
'None or a (list or dict) of (ints or floats); got {}.'
.format(', '.join(color_vals)))
# convert provided ints to sequential, rank-ordered ints
all_int = all([isinstance(_color, Integral) for _color in color_vals])
if all_int:
colors = deepcopy(colors)
ranks = {val: ix for ix, val in enumerate(sorted(set(color_vals)))}
for key, orig_int in colors.items():
colors[key] = ranks[orig_int]
# if no cmap, convert color ints to real colors
if cmap is None:
color_list = _get_color_list()
for cond, color_int in colors.items():
colors[cond] = color_list[color_int]
# recompute color_vals as a sorted set (we'll need it that way later)
color_vals = set(colors.values())
if all_numeric:
color_vals = sorted(color_vals)
return colors, color_vals
def _validate_cmap_pce(cmap, colors, color_vals):
"""Check and assign colormap for plot_compare_evokeds."""
from matplotlib.cm import get_cmap
from matplotlib.colors import Colormap
all_int = all([isinstance(_color, Integral) for _color in color_vals])
lut = len(color_vals) if all_int else None
colorbar_title = ''
if isinstance(cmap, (list, tuple, np.ndarray)) and len(cmap) == 2:
colorbar_title, cmap = cmap
if isinstance(cmap, str):
cmap = get_cmap(cmap, lut=lut)
elif isinstance(cmap, Colormap) and all_int:
cmap = cmap._resample(lut)
return cmap, colorbar_title
def _validate_linestyles_pce(linestyles, conditions, tags):
"""Check and assign linestyles for plot_compare_evokeds."""
# make linestyles a list if it's not defined
if linestyles is None:
linestyles = [None] * len(conditions) # will get changed to defaults
# convert linestyle list to dict
if isinstance(linestyles, (list, tuple, np.ndarray)):
if len(conditions) > len(linestyles):
raise ValueError('Trying to plot {} conditions, but there are '
'only {} linestyles. Please specify linestyles '
'manually.'
.format(len(conditions), len(linestyles)))
linestyles = dict(zip(conditions, linestyles))
# should be a dict by now...
if not isinstance(linestyles, dict):
raise TypeError('"linestyles" must be a dict, list, or None; got {}.'
.format(type(linestyles).__name__))
# validate linestyle dict keys
if not set(linestyles).issubset(tags.union(conditions)):
raise ValueError('If "linestyles" is a dict its keys ({}) must '
'match the keys/conditions in "evokeds" ({}).'
.format(list(linestyles), conditions))
# normalize linestyle values (so we can accurately count unique linestyles
# later). See https://github.com/matplotlib/matplotlib/blob/master/matplotlibrc.template#L131-L133 # noqa
linestyle_map = {'solid': (0, ()),
'dotted': (0, (1., 1.65)),
'dashed': (0, (3.7, 1.6)),
'dashdot': (0, (6.4, 1.6, 1., 1.6)),
'-': (0, ()),
':': (0, (1., 1.65)),
'--': (0, (3.7, 1.6)),
'-.': (0, (6.4, 1.6, 1., 1.6))}
for cond, _ls in linestyles.items():
linestyles[cond] = linestyle_map.get(_ls, _ls)
return linestyles
def _populate_style_dict_pce(condition, condition_styles, style_name,
style_dict, cmap):
"""Transfer styles into condition_styles dict for plot_compare_evokeds."""
defaults = dict(color='gray', linestyle=(0, ())) # (0, ()) == 'solid'
# if condition X doesn't yet have style Y defined:
if condition_styles.get(style_name, None) is None:
# check the style dict for the full condition name
try:
condition_styles[style_name] = style_dict[condition]
# if it's not in there, try the slash-separated condition tags
except KeyError:
for tag in condition.split('/'):
try:
condition_styles[style_name] = style_dict[tag]
# if the tag's not in there, assign a default value (but also
# continue looping in search of a tag that *is* in there)
except KeyError:
condition_styles[style_name] = defaults[style_name]
# if we found a valid tag, keep track of it for colorbar
# legend purposes, and also stop looping (so we don't overwrite
# a valid tag's style with an invalid tag → default style)
else:
if style_name == 'color' and cmap is not None:
condition_styles['cmap_label'] = tag
break
return condition_styles
def _handle_styles_pce(styles, linestyles, colors, cmap, conditions):
"""Check and assign styles for plot_compare_evokeds."""
styles = deepcopy(styles)
# validate style dict structure (doesn't check/assign values yet)
tags = set(tag for cond in conditions for tag in cond.split('/'))
if styles is None:
styles = {cond: dict() for cond in conditions}
styles = _validate_style_keys_pce(styles, conditions, tags)
# validate color dict
colors, color_vals = _validate_colors_pce(colors, cmap, conditions, tags)
all_int = all([isinstance(_color, Integral) for _color in color_vals])
# instantiate cmap
cmap, colorbar_title = _validate_cmap_pce(cmap, colors, color_vals)
# validate linestyles
linestyles = _validate_linestyles_pce(linestyles, conditions, tags)
# prep for colorbar tick handling
colorbar_ticks = None if cmap is None else dict()
# array mapping color integers (indices) to tick locations (array values)
tick_locs = np.linspace(0, 1, 2 * len(color_vals) + 1)[1::2]
# transfer colors/linestyles dicts into styles dict; fall back on defaults
color_and_linestyle = dict(color=colors, linestyle=linestyles)
for cond, cond_styles in styles.items():
for _name, _style in color_and_linestyle.items():
cond_styles = _populate_style_dict_pce(cond, cond_styles, _name,
_style, cmap)
# convert numeric colors into cmap color values; store colorbar ticks
if cmap is not None:
color_number = cond_styles['color']
cond_styles['color'] = cmap(color_number)
tick_loc = tick_locs[color_number] if all_int else color_number
key = cond_styles.pop('cmap_label', cond)
colorbar_ticks[key] = tick_loc
return styles, linestyles, colors, cmap, colorbar_title, colorbar_ticks
def _evoked_sensor_legend(info, picks, ymin, ymax, show_sensors, ax,
sphere):
"""Show sensor legend (location of a set of sensors on the head)."""
if show_sensors is True:
ymin, ymax = np.abs(ax.get_ylim())
show_sensors = "lower right" if ymin > ymax else "upper right"
pos, outlines = _get_pos_outlines(info, picks, sphere=sphere)
show_sensors = _check_loc_legal(show_sensors, "show_sensors")
_plot_legend(pos, ["k"] * len(picks), ax, list(), outlines,
show_sensors, size=25)
def _draw_colorbar_pce(ax, colors, cmap, colorbar_title, colorbar_ticks):
"""Draw colorbar for plot_compare_evokeds."""
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colorbar import ColorbarBase
from matplotlib.transforms import Bbox
# create colorbar axes
orig_bbox = ax.get_position()
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.1)
cax.yaxis.tick_right()
cb = ColorbarBase(cax, cmap=cmap, norm=None, orientation='vertical')
cb.set_label(colorbar_title)
# handle ticks
ticks = sorted(set(colorbar_ticks.values()))
ticklabels = [''] * len(ticks)
for label, tick in colorbar_ticks.items():
idx = ticks.index(tick)
if len(ticklabels[idx]): # handle labels with the same color/location
ticklabels[idx] = '\n'.join([ticklabels[idx], label])
else:
ticklabels[idx] = label
assert all(len(label) for label in ticklabels)
cb.set_ticks(ticks)
cb.set_ticklabels(ticklabels)
# shrink colorbar if discrete colors
color_vals = set(colors.values())
if all([isinstance(_color, Integral) for _color in color_vals]):
fig = ax.get_figure()
fig.canvas.draw()
fig_aspect = np.divide(*fig.get_size_inches())
new_bbox = ax.get_position()
cax_width = 0.75 * (orig_bbox.xmax - new_bbox.xmax)
# add extra space for multiline colorbar labels
h_mult = max(2, max([len(label.split('\n')) for label in ticklabels]))
cax_height = len(color_vals) * h_mult * cax_width / fig_aspect
x0 = orig_bbox.xmax - cax_width
y0 = (new_bbox.ymax + new_bbox.ymin - cax_height) / 2
x1 = orig_bbox.xmax
y1 = y0 + cax_height
new_bbox = Bbox([[x0, y0], [x1, y1]])
cax.set_axes_locator(None)
cax.set_position(new_bbox)
def _draw_legend_pce(legend, split_legend, styles, linestyles, colors, cmap,
do_topo, ax):
"""Draw legend for plot_compare_evokeds."""
import matplotlib.lines as mlines
lines = list()
# triage
if split_legend is None:
split_legend = cmap is not None
n_colors = len(set(colors.values()))
n_linestyles = len(set(linestyles.values()))
draw_styles = cmap is None and not split_legend
draw_colors = cmap is None and split_legend and n_colors > 1
draw_linestyles = (cmap is None or split_legend) and n_linestyles > 1
# create the fake lines for the legend
if draw_styles:
for label, cond_styles in styles.items():
line = mlines.Line2D([], [], label=label, **cond_styles)
lines.append(line)
else:
if draw_colors:
for label, color in colors.items():
line = mlines.Line2D([], [], label=label, linestyle='solid',
color=color)
lines.append(line)
if draw_linestyles:
for label, linestyle in linestyles.items():
line = mlines.Line2D([], [], label=label, linestyle=linestyle,
color='black')
lines.append(line)
# legend params
ncol = 1 + (len(lines) // 5)
loc = _check_loc_legal(legend, 'legend')
legend_params = dict(loc=loc, frameon=True, ncol=ncol)
# special placement (above dedicated legend axes) in topoplot
if do_topo and isinstance(legend, bool):
legend_params.update(loc='lower right', bbox_to_anchor=(1, 1))
# draw the legend
if any([draw_styles, draw_colors, draw_linestyles]):
labels = [line.get_label() for line in lines]
ax.legend(lines, labels, **legend_params)
def _draw_axes_pce(ax, ymin, ymax, truncate_yaxis, truncate_xaxis, invert_y,
vlines, tmin, tmax, unit, skip_axlabel=True):
"""Position, draw, and truncate axes for plot_compare_evokeds."""
# avoid matplotlib errors
if ymin == ymax:
ymax += 1e-15
if tmin == tmax:
tmax += 1e-9
ax.set_xlim(tmin, tmax)
# for dark backgrounds:
ax.patch.set_alpha(0)
if not np.isfinite([ymin, ymax]).all(): # nothing plotted
return
ax.set_ylim(ymin, ymax)
ybounds = (ymin, ymax)
# determine ymin/ymax for spine truncation
trunc_y = True if truncate_yaxis == 'auto' else truncate_yaxis
if truncate_yaxis:
if isinstance(truncate_yaxis, bool):
# truncate to half the max abs. value and round to a nice-ish
# number. ylims are already symmetric about 0 or have a lower bound
# of 0, so div. by 2 should suffice.
ybounds = np.array([ymin, ymax]) / 2.
precision = 0.25
ybounds = np.round(ybounds / precision) * precision
elif truncate_yaxis == 'auto':
# truncate to existing max/min ticks
ybounds = _trim_ticks(ax.get_yticks(), ymin, ymax)[[0, -1]]
else:
raise ValueError('"truncate_yaxis" must be bool or '
'"auto", got {}'.format(truncate_yaxis))
_setup_ax_spines(ax, vlines, tmin, tmax, ybounds[0], ybounds[1], invert_y,
unit, truncate_xaxis, trunc_y, skip_axlabel)
def _get_data_and_ci(evoked, combine, combine_func, picks, scaling=1,
ci_fun=None):
"""Compute (sensor-aggregated, scaled) time series and possibly CI."""
picks = np.array(picks).flatten()
# apply scalings
data = np.array([evk.data[picks] * scaling for evk in evoked])
# combine across sensors
if combine is not None:
logger.info('combining channels using "{}"'.format(combine))
data = combine_func(data)
# get confidence band
if ci_fun is not None:
ci = ci_fun(data)
# get grand mean across evokeds
data = np.mean(data, axis=0)
_check_if_nan(data)
return (data,) if ci_fun is None else (data, ci)
def _get_ci_function_pce(ci, do_topo=False):
"""Get confidence interval function for plot_compare_evokeds."""
if ci is None:
return None
elif callable(ci):
return ci
elif isinstance(ci, bool) and not ci:
return None
elif isinstance(ci, bool):
ci = 0.95
if isinstance(ci, float):
from ..stats import _ci
method = 'parametric' if do_topo else 'bootstrap'
return partial(_ci, ci=ci, method=method)
else:
raise TypeError('"ci" must be None, bool, float or callable, got {}'
.format(type(ci).__name__))
def _plot_compare_evokeds(ax, data_dict, conditions, times, ci_dict, styles,
title, all_positive, topo):
"""Plot evokeds (to compare them; with CIs) based on a data_dict."""
for condition in conditions:
# plot the actual data ('dat') as a line
dat = data_dict[condition].T
ax.plot(times, dat, zorder=1000, label=condition, clip_on=False,
**styles[condition])
# plot the confidence interval if available
if ci_dict.get(condition, None) is not None:
ci_ = ci_dict[condition]
ax.fill_between(times, ci_[0].flatten(), ci_[1].flatten(),
zorder=9, color=styles[condition]['color'],
alpha=0.3, clip_on=False)
if topo:
ax.text(-.1, 1, title, transform=ax.transAxes)
else:
ax.set_title(title)
def _title_helper_pce(title, picked_types, picks, ch_names, combine):
"""Format title for plot_compare_evokeds."""
if title is None:
title = (_handle_default('titles').get(picks, None) if picked_types
else _set_title_multiple_electrodes(title, combine, ch_names))
# add the `combine` modifier
do_combine = picked_types or len(ch_names) > 1
if (title is not None and len(title) and isinstance(combine, str) and
do_combine):
_comb = combine.upper() if combine == 'gfp' else combine
_comb = 'std. dev.' if _comb == 'std' else _comb
title += ' ({})'.format(_comb)
return title
def _ascii_minus_to_unicode(s):
"""Replace ASCII-encoded "minus-hyphen" characters with Unicode minus.
Aux function for ``plot_compare_evokeds`` to prettify ``Evoked.comment``.
"""
if s is None:
return
# replace ASCII minus operators with Unicode minus characters
s = s.replace(' - ', ' − ')
# replace leading minus operator if present
if s.startswith('-'):
s = f'−{s[1:]}'
return s
@fill_doc
def plot_compare_evokeds(evokeds, picks=None, colors=None,
linestyles=None, styles=None, cmap=None,
vlines='auto', ci=True, truncate_yaxis='auto',
truncate_xaxis=True, ylim=None, invert_y=False,
show_sensors=None, legend=True,
split_legend=None, axes=None, title=None, show=True,
combine=None, sphere=None):
"""Plot evoked time courses for one or more conditions and/or channels.
Parameters
----------
evokeds : instance of mne.Evoked | list | dict
If a single Evoked instance, it is plotted as a time series.
If a list of Evokeds, the contents are plotted with their
``.comment`` attributes used as condition labels. If no comment is set,
the index of the respective Evoked the list will be used instead,
starting with ``1`` for the first Evoked.
If a dict whose values are Evoked objects, the contents are plotted as
single time series each and the keys are used as labels.
If a [dict/list] of lists, the unweighted mean is plotted as a time
series and the parametric confidence interval is plotted as a shaded
area. All instances must have the same shape - channel numbers, time
points etc.
If dict, keys must be of type str.
%(picks_all_data)s
* If picks is None or a (collection of) data channel types, the
global field power will be plotted for all data channels.
Otherwise, picks will be averaged.
* If multiple channel types are selected, one
figure will be returned for each channel type.
* If the selected channels are gradiometers, the signal from
corresponding (gradiometer) pairs will be combined.
colors : list | dict | None
Colors to use when plotting the ERP/F lines and confidence bands. If
``cmap`` is not ``None``, ``colors`` must be a :class:`list` or
:class:`dict` of :class:`ints <int>` or :class:`floats <float>`
indicating steps or percentiles (respectively) along the colormap. If
``cmap`` is ``None``, list elements or dict values of ``colors`` must
be :class:`ints <int>` or valid :doc:`matplotlib colors
<matplotlib:tutorials/colors/colors>`; lists are cycled through
sequentially,
while dicts must have keys matching the keys or conditions of an
``evokeds`` dict (see Notes for details). If ``None``, the current
:doc:`matplotlib color cycle
<matplotlib:gallery/color/color_cycle_default>`
is used. Defaults to ``None``.
linestyles : list | dict | None
Styles to use when plotting the ERP/F lines. If a :class:`list` or
:class:`dict`, elements must be valid :doc:`matplotlib linestyles
<matplotlib:gallery/lines_bars_and_markers/linestyles>`. Lists are
cycled through sequentially; dictionaries must have keys matching the
keys or conditions of an ``evokeds`` dict (see Notes for details). If
``None``, all lines will be solid. Defaults to ``None``.
styles : dict | None
Dictionary of styles to use when plotting ERP/F lines. Keys must match
keys or conditions of ``evokeds``, and values must be a :class:`dict`
of legal inputs to :func:`matplotlib.pyplot.plot`. Those values will be
passed as parameters to the line plot call of the corresponding
condition, overriding defaults (e.g.,
``styles={"Aud/L": {"linewidth": 3}}`` will set the linewidth for
"Aud/L" to 3). As with ``colors`` and ``linestyles``, keys matching
conditions in ``/``-separated ``evokeds`` keys are supported (see Notes
for details).
cmap : None | str | tuple | instance of matplotlib.colors.Colormap
Colormap from which to draw color values when plotting the ERP/F lines
and confidence bands. If not ``None``, ints or floats in the ``colors``
parameter are mapped to steps or percentiles (respectively) along the
colormap. If ``cmap`` is a :class:`str`, it will be passed to
:func:`matplotlib.cm.get_cmap`; if ``cmap`` is a tuple, its first
element will be used as a string to label the colorbar, and its
second element will be passed to :func:`matplotlib.cm.get_cmap` (unless
it is already an instance of :class:`~matplotlib.colors.Colormap`).
.. versionchanged:: 0.19
Support for passing :class:`~matplotlib.colors.Colormap` instances.
vlines : "auto" | list of float
A list in seconds at which to plot dashed vertical lines.
If "auto" and the supplied data includes 0, it is set to [0.]
and a vertical bar is plotted at time 0. If an empty list is passed,
no vertical lines are plotted.
ci : float | bool | callable | None
Confidence band around each ERP/F time series. If ``False`` or ``None``
no confidence band is drawn. If :class:`float`, ``ci`` must be between
0 and 1, and will set the threshold for a bootstrap
(single plot)/parametric (when ``axes=='topo'``) estimation of the
confidence band; ``True`` is equivalent to setting a threshold of 0.95
(i.e., the 95%% confidence band is drawn). If a callable, it must take
a single array (n_observations × n_times) as input and return upper and
lower confidence margins (2 × n_times). Defaults to ``True``.
truncate_yaxis : bool | 'auto'
Whether to shorten the y-axis spine. If 'auto', the spine is truncated
at the minimum and maximum ticks. If ``True``, it is truncated at the
multiple of 0.25 nearest to half the maximum absolute value of the
data. If ``truncate_xaxis=False``, only the far bound of the y-axis
will be truncated. Defaults to 'auto'.
truncate_xaxis : bool
Whether to shorten the x-axis spine. If ``True``, the spine is
truncated at the minimum and maximum ticks. If
``truncate_yaxis=False``, only the far bound of the x-axis will be
truncated. Defaults to ``True``.
ylim : dict | None
Y-axis limits for plots (after scaling has been applied). :class:`dict`
keys should match channel types; valid keys are eeg, mag, grad, misc
(example: ``ylim=dict(eeg=[-20, 20])``). If ``None``, the y-axis limits
will be set automatically by matplotlib. Defaults to ``None``.
invert_y : bool
Whether to plot negative values upward (as is sometimes done
for ERPs out of tradition). Defaults to ``False``.
show_sensors : bool | int | str | None
Whether to display an inset showing sensor locations on a head outline.
If :class:`int` or :class:`str`, indicates position of the inset (see
:func:`mpl_toolkits.axes_grid1.inset_locator.inset_axes`). If ``None``,
treated as ``True`` if there is only one channel in ``picks``. If
``True``, location is upper or lower right corner, depending on data
values. Defaults to ``None``.
legend : bool | int | str
Whether to show a legend for the colors/linestyles of the conditions
plotted. If :class:`int` or :class:`str`, indicates position of the
legend (see :func:`mpl_toolkits.axes_grid1.inset_locator.inset_axes`).
If ``True``, equivalent to ``'upper left'``. Defaults to ``True``.
split_legend : bool | None
Whether to separate color and linestyle in the legend. If ``None``,
a separate linestyle legend will still be shown if ``cmap`` is
specified. Defaults to ``None``.
axes : None | Axes instance | list of Axes | 'topo'
:class:`~matplotlib.axes.Axes` object to plot into. If plotting
multiple channel types (or multiple channels when ``combine=None``),
``axes`` should be a list of appropriate length containing
:class:`~matplotlib.axes.Axes` objects. If ``'topo'``, a new
:class:`~matplotlib.figure.Figure` is created with one axis for each
channel, in a topographical layout. If ``None``, a new
:class:`~matplotlib.figure.Figure` is created for each channel type.
Defaults to ``None``.
title : str | None
Title printed above the plot. If ``None``, a title will be
automatically generated based on channel name(s) or type(s) and the
value of the ``combine`` parameter. Defaults to ``None``.
show : bool
Whether to show the figure. Defaults to ``True``.
%(combine)s
If callable, the callable must accept one positional input (data of
shape ``(n_evokeds, n_channels, n_times)``) and return an
:class:`array <numpy.ndarray>` of shape ``(n_epochs, n_times)``. For
example::
combine = lambda data: np.median(data, axis=1)
If ``combine`` is ``None``, channels are combined by computing GFP,
unless ``picks`` is a single channel (not channel type) or
``axes='topo'``, in which cases no combining is performed. Defaults to
``None``.
%(topomap_sphere_auto)s
Returns
-------
fig : list of Figure instances
A list of the figure(s) generated.
Notes
-----
If the parameters ``styles``, ``colors``, or ``linestyles`` are passed as
:class:`dicts <python:dict>`, then ``evokeds`` must also be a
:class:`python:dict`, and
the keys of the plot-style parameters must either match the keys of
``evokeds``, or match a ``/``-separated partial key ("condition") of
``evokeds``. For example, if evokeds has keys "Aud/L", "Aud/R", "Vis/L",
and "Vis/R", then ``linestyles=dict(L='--', R='-')`` will plot both Aud/L
and Vis/L conditions with dashed lines and both Aud/R and Vis/R conditions
with solid lines. Similarly, ``colors=dict(Aud='r', Vis='b')`` will plot
Aud/L and Aud/R conditions red and Vis/L and Vis/R conditions blue.
Color specification depends on whether a colormap has been provided in the
``cmap`` parameter. The following table summarizes how the ``colors``
parameter is interpreted:
.. cssclass:: table-bordered
.. rst-class:: midvalign
+-------------+----------------+------------------------------------------+
| ``cmap`` | ``colors`` | result |
+=============+================+==========================================+
| | None | matplotlib default color cycle; unique |
| | | color for each condition |
| +----------------+------------------------------------------+
| | | matplotlib default color cycle; lowest |
| | list or dict | integer mapped to first cycle color; |
| | of integers | conditions with same integer get same |
| None | | color; unspecified conditions are "gray" |
| +----------------+------------------------------------------+
| | list or dict | ``ValueError`` |
| | of floats | |
| +----------------+------------------------------------------+
| | list or dict | the specified hex colors; unspecified |
| | of hexadecimal | conditions are "gray" |
| | color strings | |
+-------------+----------------+------------------------------------------+
| | None | equally spaced colors on the colormap; |
| | | unique color for each condition |
| +----------------+------------------------------------------+
| | | equally spaced colors on the colormap; |
| | list or dict | lowest integer mapped to first cycle |
| string or | of integers | color; conditions with same integer |
| instance of | | get same color |
| matplotlib +----------------+------------------------------------------+
| Colormap | list or dict | floats mapped to corresponding colormap |
| | of floats | values |
| +----------------+------------------------------------------+
| | list or dict | |
| | of hexadecimal | ``TypeError`` |
| | color strings | |
+-------------+----------------+------------------------------------------+
"""
import matplotlib.pyplot as plt
from ..evoked import Evoked, _check_evokeds_ch_names_times
# build up evokeds into a dict, if it's not already
if isinstance(evokeds, Evoked):
evokeds = [evokeds]
if isinstance(evokeds, (list, tuple)):
evokeds_copy = evokeds.copy()
evokeds = dict()
comments = [_ascii_minus_to_unicode(getattr(_evk, 'comment', None))
for _evk in evokeds_copy]
for idx, (comment, _evoked) in enumerate(zip(comments, evokeds_copy)):
key = str(idx + 1)
if comment: # only update key if comment is non-empty
if comments.count(comment) == 1: # comment is unique
key = comment
else: # comment is non-unique: prepend index
key = f'{key}: {comment}'
evokeds[key] = _evoked
del evokeds_copy
if not isinstance(evokeds, dict):
raise TypeError('"evokeds" must be a dict, list, or instance of '
'mne.Evoked; got {}'.format(type(evokeds).__name__))
evokeds = deepcopy(evokeds) # avoid modifying dict outside function scope
for cond, evoked in evokeds.items():
_validate_type(cond, 'str', 'Conditions')
if isinstance(evoked, Evoked):
evokeds[cond] = [evoked] # wrap singleton evokeds in a list
for evk in evokeds[cond]:
_validate_type(evk, Evoked, 'All evokeds entries ', 'Evoked')
# ensure same channels and times across all evokeds
all_evoked = sum(evokeds.values(), [])
_check_evokeds_ch_names_times(all_evoked)
del all_evoked
# get some representative info
conditions = list(evokeds)
one_evoked = evokeds[conditions[0]][0]
times = one_evoked.times
info = one_evoked.info
sphere = _check_sphere(sphere, info)
tmin, tmax = times[0], times[-1]
# set some defaults
if ylim is None:
ylim = dict()
if vlines == 'auto':
vlines = [0.] if (tmin < 0 < tmax) else []
_validate_type(vlines, (list, tuple), 'vlines', 'list or tuple')
# is picks a channel type (or None)?
orig_picks = deepcopy(picks)
picks, picked_types = _picks_to_idx(info, picks, return_kind=True)
# some things that depend on picks:
ch_names = np.array(one_evoked.ch_names)[picks].tolist()
ch_types = list(_get_channel_types(info, picks=picks, unique=True)
.intersection(_DATA_CH_TYPES_SPLIT + ('misc',))) # miscICA
picks_by_type = channel_indices_by_type(info, picks)
# discard picks from non-data channels (e.g., ref_meg)
good_picks = sum([picks_by_type[ch_type] for ch_type in ch_types], [])
picks = np.intersect1d(picks, good_picks)
if show_sensors is None:
show_sensors = (len(picks) == 1)
_validate_type(combine, types=(None, 'callable', str), item_name='combine')
# cannot combine a single channel
if (len(picks) < 2) and combine is not None:
warn('Only {} channel in "picks"; cannot combine by method "{}".'
.format(len(picks), combine))
# `combine` defaults to GFP unless picked a single channel or axes='topo'
do_topo = isinstance(axes, str) and axes == 'topo'
if combine is None and len(picks) > 1 and not do_topo:
combine = 'gfp'
# convert `combine` into callable (if None or str)
combine_func = _make_combine_callable(combine)
# title
title = _title_helper_pce(title, picked_types, picks=orig_picks,
ch_names=ch_names, combine=combine)
# setup axes
if do_topo:
show_sensors = False
if len(picks) > 70:
logger.info('You are plotting to a topographical layout with >70 '
'sensors. This can be extremely slow. Consider using '
'mne.viz.plot_topo, which is optimized for speed.')
axes = ['topo'] * len(ch_types)
else:
if axes is None:
axes = (plt.subplots(figsize=(8, 6))[1] for _ in ch_types)
elif isinstance(axes, plt.Axes):
axes = [axes]
_validate_if_list_of_axes(axes, obligatory_len=len(ch_types))
if len(ch_types) > 1:
logger.info('Multiple channel types selected, returning one figure '
'per type.')
figs = list()
for ch_type, ax in zip(ch_types, axes):
_picks = picks_by_type[ch_type]
_ch_names = np.array(one_evoked.ch_names)[_picks].tolist()
_picks = ch_type if picked_types else _picks
# don't pass `combine` here; title will run through this helper
# function a second time & it will get added then
_title = _title_helper_pce(title, picked_types, picks=_picks,
ch_names=_ch_names, combine=None)
figs.extend(plot_compare_evokeds(
evokeds, picks=_picks, colors=colors, cmap=cmap,
linestyles=linestyles, styles=styles, vlines=vlines, ci=ci,
truncate_yaxis=truncate_yaxis, ylim=ylim, invert_y=invert_y,
legend=legend, show_sensors=show_sensors,
axes=ax, title=_title, split_legend=split_legend, show=show,
sphere=sphere))
return figs
# colors and colormap. This yields a `styles` dict with one entry per
# condition, specifying at least color and linestyle. THIS MUST BE DONE
# AFTER THE "MULTIPLE CHANNEL TYPES" LOOP
(_styles, _linestyles, _colors, _cmap, colorbar_title,
colorbar_ticks) = _handle_styles_pce(styles, linestyles, colors, cmap,
conditions)
# From now on there is only 1 channel type
assert len(ch_types) == 1
ch_type = ch_types[0]
# some things that depend on ch_type:
units = _handle_default('units')[ch_type]
scalings = _handle_default('scalings')[ch_type]
# prep for topo
pos_picks = picks # need this version of picks for sensor location inset
info = pick_info(info, sel=picks, copy=True)
all_ch_names = info['ch_names']
if not do_topo:
# add vacuous "index" (needed for topo) so same code works for both
axes = [(ax, 0) for ax in axes]
if np.array(picks).ndim < 2:
picks = [picks] # enables zipping w/ axes
else:
from .topo import iter_topography
fig = plt.figure(figsize=(18, 14))
def click_func(
ax_, pick_, evokeds=evokeds, colors=colors,
linestyles=linestyles, styles=styles, cmap=cmap, vlines=vlines,
ci=ci, truncate_yaxis=truncate_yaxis,
truncate_xaxis=truncate_xaxis, ylim=ylim, invert_y=invert_y,
show_sensors=show_sensors, legend=legend,
split_legend=split_legend, picks=picks, combine=combine):
plot_compare_evokeds(
evokeds=evokeds, colors=colors, linestyles=linestyles,
styles=styles, cmap=cmap, vlines=vlines, ci=ci,
truncate_yaxis=truncate_yaxis, truncate_xaxis=truncate_xaxis,
ylim=ylim, invert_y=invert_y, show_sensors=show_sensors,
legend=legend, split_legend=split_legend,
picks=picks[pick_], combine=combine, axes=ax_, show=True,
sphere=sphere)
layout = find_layout(info)
# make sure everything fits nicely. our figsize is (18, 14) so margins
# of 0.25 inch seem OK
w_margin = 0.25 / 18
h_margin = 0.25 / 14
axes_width = layout.pos[0, 2]
axes_height = layout.pos[0, 3]
left_edge = layout.pos[:, 0].min()
right_edge = layout.pos[:, 0].max() + axes_width
bottom_edge = layout.pos[:, 1].min()
top_edge = layout.pos[:, 1].max() + axes_height
# compute scale. Use less of vertical height (leave room for title)
w_scale = (0.95 - 2 * w_margin) / (right_edge - left_edge)
h_scale = (0.9 - 2 * h_margin) / (top_edge - bottom_edge)
# apply transformation
layout.pos[:, 0] = ((layout.pos[:, 0] - left_edge) * w_scale
+ w_margin + 0.025)
layout.pos[:, 1] = ((layout.pos[:, 1] - bottom_edge) * h_scale
+ h_margin + 0.025)
# make sure there is room for a legend axis (sometimes not if only a
# few channels were picked)
data_lefts = layout.pos[:, 0]
data_bottoms = layout.pos[:, 1]
legend_left = data_lefts.max()
legend_bottom = data_bottoms.min()
overlap = np.any(np.logical_and(
np.logical_and(
data_lefts <= legend_left,
legend_left <= (data_lefts + axes_width)),
np.logical_and(
data_bottoms <= legend_bottom,
legend_bottom <= (data_bottoms + axes_height)
)
))
right_edge = legend_left + axes_width
n_columns = (right_edge - data_lefts.min()) / axes_width
scale_factor = n_columns / (n_columns + 1)
if overlap:
layout.pos[:, [0, 2]] *= scale_factor
# `axes` will be a list of (axis_object, channel_index) tuples
axes = list(iter_topography(
info, layout=layout, on_pick=click_func,
fig=fig, fig_facecolor='w', axis_facecolor='w',
axis_spinecolor='k', layout_scale=None, legend=True))
picks = list(picks)
del info
# for each axis, compute the grand average and (maybe) the CI
# (per sensor if topo, otherwise aggregating over sensors)
c_func = None if do_topo else combine_func
all_data = list()
all_cis = list()
for _picks, (ax, idx) in zip(picks, axes):
data_dict = dict()
ci_dict = dict()
for cond in conditions:
this_evokeds = evokeds[cond]
# assign ci_fun first to get arg checking
ci_fun = _get_ci_function_pce(ci, do_topo=do_topo)
# for bootstrap or parametric CIs, skip when only 1 observation
if not callable(ci):
ci_fun = ci_fun if len(this_evokeds) > 1 else None
res = _get_data_and_ci(this_evokeds, combine, c_func, picks=_picks,
scaling=scalings, ci_fun=ci_fun)
data_dict[cond] = res[0]
if ci_fun is not None:
ci_dict[cond] = res[1]
all_data.append(data_dict) # grand means, or indiv. sensors if do_topo
all_cis.append(ci_dict)
del evokeds
# compute ylims
allvalues = list()
for _dict in all_data:
for _array in list(_dict.values()):
allvalues.append(_array[np.newaxis]) # to get same .ndim as CIs
for _dict in all_cis:
allvalues.extend(list(_dict.values()))
allvalues = np.concatenate(allvalues)
norm = np.all(allvalues > 0)
orig_ymin, orig_ymax = ylim.get(ch_type, [None, None])
ymin, ymax = _setup_vmin_vmax(allvalues, orig_ymin, orig_ymax, norm)
del allvalues
# add empty data and title for the legend axis
if do_topo:
all_data.append({cond: np.array([]) for cond in data_dict})
all_cis.append({cond: None for cond in ci_dict})
all_ch_names.append('')
# plot!
for (ax, idx), data, cis in zip(axes, all_data, all_cis):
if do_topo:
title = all_ch_names[idx]
# plot the data
_times = [] if idx == -1 else times
_plot_compare_evokeds(ax, data, conditions, _times, cis, _styles,
title, norm, do_topo)
# draw axes & vlines
skip_axlabel = do_topo and (idx != -1)
_draw_axes_pce(ax, ymin, ymax, truncate_yaxis, truncate_xaxis,
invert_y, vlines, tmin, tmax, units, skip_axlabel)
# add inset scalp plot showing location of sensors picked
if show_sensors:
_validate_type(show_sensors, (np.int64, bool, str, type(None)),
'show_sensors', 'numeric, str, None or bool')
if not _check_ch_locs(info=one_evoked.info, picks=pos_picks):
warn('Cannot find channel coordinates in the supplied Evokeds. '
'Not showing channel locations.')
else:
_evoked_sensor_legend(one_evoked.info, pos_picks, ymin, ymax,
show_sensors, ax, sphere)
# add color/linestyle/colormap legend(s)
if legend:
_draw_legend_pce(legend, split_legend, _styles, _linestyles, _colors,
_cmap, do_topo, ax)
if cmap is not None:
_draw_colorbar_pce(ax, _colors, _cmap, colorbar_title, colorbar_ticks)
# finish
plt_show(show)
return [ax.figure]
|
wmvanvliet/mne-python
|
mne/viz/evoked.py
|
Python
|
bsd-3-clause
| 110,933
|
[
"Gaussian"
] |
088f41976e11ea4093237efbdb663ad7b98ba540b4a97429ac1c77c396a8546f
|
from __future__ import absolute_import
from .walk import IRWalker
class ConstantsCollection(object):
'''Special collection that compares objects not just by
equality but also by type. Hashing not used so that
we can store unhashable objects.
'''
def __init__(self, seq=None):
self.ops = []
if seq is not None:
self.extend(seq)
def add(self, op):
key = self.key_op(op)
if key not in self.ops:
self.ops.append(key)
def remove(self, op):
self.ops.remove(self.key_op(op))
def extend(self, seq):
for el in seq:
self.add(el)
def key_op(self, op):
return type(op), op
def __len__(self):
return len(self.ops)
def __contains__(self, op):
return self.key_op(op) in self.ops
def __iter__(self):
for tp,val in self.ops:
yield val
def index(self, op):
return self.ops.index(self.key_op(op))
class ConstantCollector(IRWalker):
def __init__(self, descend_into_functions=True, skip_unused_constants=True):
super(ConstantCollector, self).__init__()
self.descend_into_functions = descend_into_functions
self.constants = ConstantsCollection()
self.skip_unused_constants = skip_unused_constants
def visit_constant(self, node):
if not (self.skip_unused_constants and node.result_ignored):
self.constants.add(node.value)
def collect_constants(node, descend_into_functions=True, skip_unused_constants=True):
col = ConstantCollector(descend_into_functions, skip_unused_constants)
col.visit(node)
return col.constants
|
matthagy/Jamenson
|
jamenson/compiler/constants.py
|
Python
|
apache-2.0
| 1,678
|
[
"VisIt"
] |
9e3c39e2db160ac169ad7ac011ede5a4e44ec62fa8f3a444168e80e2058b4d97
|
"""
@name: Modules/Web/web_server.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: 2012-2020 by D. Brian Kimmel
@note: Created on Apr 3, 2012
@license: MIT License
@summary: This module provides the web server(s) service of PyHouse.
This is a Main Module - always present.
Open 2 web servers.
open server on port 8580.
Secure (TLS) server on port 8588 (optional)
Present a Login screen. A successful login is required to get a main menu screen.
Failure to log in will keep the user on a login screen.
On initial startup allow a house to be created
then rooms
then light controllers
and lights
and buttons
and scenes
then schedules
Do not require reloads, auto change PyHouse on the fly.
"""
__updated__ = '2019-12-30'
# Import system type stuff
from twisted.internet import endpoints
# from twisted.web.resource import Resource
from twisted.web.server import Site
# from twisted.web.template import Element, XMLString, renderer
# from werkzeug.contrib.jsrouting import render_template
from klein import Klein # , route
# Import PyMh files and modules.
from Modules.Computer.Web import web_utils
from Modules.Computer.Web.web_mainpage import MainPage
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
from Modules.Core import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.WebServer ')
klein_app = Klein()
@klein_app.route('/')
def root(_request):
return MainPage()
class ClientConnections:
"""This class keeps track of all the connected browsers.
We can update the browser via COMET when a controlled device changes.
(Light On/Off, Pool water low, Garage Door open/Close ...)
"""
def __init__(self):
self.ConnectedBrowsers = []
def add_browser(self, p_login):
self.ConnectedBrowsers.append(p_login)
LOG.warning('Connected to: {}'.format(PrettyFormatAny.form(p_login, 'Login')))
class lightingUtilityWs(ClientConnections):
def start_webservers(self, p_pyhouse_obj):
""" Start Kline web server()
We will always start a TCP server (for now)
We may optionally start a TLS server.
"""
self.start_tcp(p_pyhouse_obj, 'localhost', p_pyhouse_obj.Computer.Web.WebPort)
self.start_tls(p_pyhouse_obj, None, p_pyhouse_obj.Computer.Web.SecurePort)
LOG.info("Started Web Server(s)")
def start_tcp(self, p_pyhouse_obj, p_interface, p_port):
""" Start an HTTP server
Supported arguments: port, interface, backlog.
interface and backlog are optional.
interface is an IP address (belonging to the IPv4 address family) to bind to.
For example:
tcp:port=80:interface=192.168.1.1.
"""
def cb_listen(p_arg):
# LOG.debug('{}'.format(PrettyFormatAny.form(p_arg, 'Arg', 190)))
pass
def eb_listen_error(p_reason):
LOG.error(p_reason)
pass
l_reactor = p_pyhouse_obj._Twisted.Reactor
_l_app = p_pyhouse_obj._Twisted.Application
# l_app = Klein()
p_pyhouse_obj._Twisted.Application = klein_app
# LOG.debug('{}'.format(PrettyFormatAny.form(klein_app, 'KleinApp', 190)))
l_endpoint_description = 'tcp'
l_endpoint_description += ':port={}'.format(p_port)
if p_interface != None:
l_endpoint_description += ':interface={}'.format(p_interface)
LOG.debug("TCP Endpoint: {}".format(l_endpoint_description))
l_endpoint = endpoints.serverFromString(l_reactor, l_endpoint_description)
# LOG.debug('{}'.format(PrettyFormatAny.form(l_endpoint, 'Endpoint', 190)))
l_server = l_endpoint.listen(Site(klein_app.resource()))
l_server.addCallback(cb_listen)
l_server.addErrback(eb_listen_error)
# LOG.debug('{}'.format(PrettyFormatAny.form(l_server, 'Server', 190)))
p_pyhouse_obj.Computer.Web.WebServer = l_server
# print(PrettyFormatAny.form(l_server, 'WebServer'))
LOG.info("Started TCP web server - {}".format(l_endpoint))
def start_tls(self, p_pyhouse_obj, p_host, p_port):
""" Start an HTTPS server (TLS)
All TCP arguments are supported, plus: certKey, privateKey, extraCertChain, sslmethod, and dhParameters.
certKey (optional, defaults to the value of privateKey) gives a filesystem path to a certificate (PEM format).
privateKey gives a filesystem path to a private key (PEM format).
extraCertChain gives a filesystem path to a file with one or more concatenated certificates in PEM format that establish the chain from a root CA to the one that signed your certificate.
sslmethod indicates which SSL/TLS version to use (a value like TLSv1_METHOD).
dhParameters gives a filesystem path to a file in PEM format with parameters that are required for Diffie-Hellman key exchange.
Since the this is required for the DHE-family of ciphers that offer perfect forward secrecy (PFS), it is recommended to specify one.
Such a file can be created using openssl dhparam -out dh_param_1024.pem -2 1024.
Please refer to OpenSSL’s documentation on dhparam for further details.
For example,;
ssl:port=443:privateKey=/etc/ssl/server.pem:extraCertChain=/etc/ssl/chain.pem:sslmethod=SSLv3_METHOD:dhParameters=dh_param_1024.pem.
You can use the endpoint: feature with TCP if you want to connect to a host name;
for example, if your DNS is not working, but you know that the IP address 7.6.5.4 points to awesome.site.example.com, you could specify:
tls:awesome.site.example.com:443:endpoint=tcp\:7.6.5.4\:443.
"""
_l_reactor = p_pyhouse_obj._Twisted.Reactor
_l_app = p_pyhouse_obj._Twisted.Application
l_endpoint_description = 'tls:'
if p_host != None:
l_endpoint_description += '{}:'.format(p_host)
if p_port != None:
l_endpoint_description += '{}'.format(p_port)
LOG.debug("TLS Endpoint: {}".format(l_endpoint_description))
# l_certData = getModule(__name__).filePath.sibling('server.pem').getContent()
# l_certificate = ssl.PrivateCertificate.loadPEM(l_certData)
# l_factory = protocol.Factory.forP rotocol(echoserv.Echo)
# p_pyhouse_obj._Twisted.Reactor.listenSSL(8000, l_factory, l_certificate.options())
return
class Api(lightingUtilityWs):
def __init__(self, p_pyhouse_obj):
self.m_pyhouse_obj = p_pyhouse_obj
self.State = web_utils.WS_IDLE
self.m_web_running = False
p_pyhouse_obj._Twisted.Application = Klein()
LOG.info('Initialized.')
def LoadXml(self, p_pyhouse_obj):
pass
def Start(self):
LOG.info('Starting web servers.')
self.start_webservers(self.m_pyhouse_obj)
LOG.info('Started.')
def SaveXml(self, p_xml):
pass
def Stop(self):
LOG.info('Stopped.')
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/Computer/Web/web_server.py
|
Python
|
mit
| 7,036
|
[
"Brian"
] |
391378546aa883001fcce77710d7bae17b8ecde43c6cd3dff64a7676bda66283
|
## INFO ########################################################################
## ##
## COUBLET ##
## ======= ##
## ##
## Cross-platform desktop client to follow posts from COUB ##
## Version: 0.6.93.172 (20140814) ##
## ##
## File: resources/artwork/source/loader/gif.py ##
## ##
## Designed and written by Peter Varo. Copyright (c) 2014 ##
## License agreement is provided in the LICENSE file ##
## For more info visit: https://github.com/petervaro/coub ##
## ##
## Copyright (c) 2014 Coub Ltd and/or its suppliers and licensors, ##
## 5 Themistokli Dervi Street, Elenion Building, 1066 Nicosia, Cyprus. ##
## All rights reserved. COUB (TM) is a trademark of Coub Ltd. ##
## http://coub.com ##
## ##
######################################################################## INFO ##
from os import system
# Requires 'imagemagick'
system('convert -delay 8 -loop 0 dark/*.png ../../motion/dark_loader.gif')
system('convert -delay 8 -loop 0 light/*.png ../../motion/light_loader.gif')
|
petervaro/coublet
|
resources/artwork/source/loader/gif.py
|
Python
|
mit
| 1,821
|
[
"VisIt"
] |
2604aae8b0acfabb987b1417dccccfaed4fce86c4d798813c2da39bbdf29409d
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
""" Creates functionals (classes) from a method. """
import sys
if sys.version_info.major == 2:
from inspect import getargspec as func_signature
def __func_name(func):
return func.func_name
def __kwargs(initargs):
return initargs.keywords
else:
from inspect import getfullargspec as func_signature
def __func_name(func):
return func.__name__
def __kwargs(initargs):
return initargs.varkw
def create_initstring(classname, base, method, excludes):
""" Creates a string defining the __init__ method. """
# creates line: def __init__(self, ...):
# keywords are deduced from arguments with defaults.
# others will not be added.
args = func_signature(method)
result = "def __init__(self"
if args.defaults is not None:
nargs = len(args.args) - len(args.defaults)
for key, value in zip(args.args[nargs:], args.defaults):
if key in excludes:
continue
result += ", {0}={1!r}".format(key, value)
result += ", copy=None, **kwargs):\n"
# adds standard doc string.
result +=\
" \"\"\" Initializes {0} instance.\n\n" \
" This function is created automagically from\n" \
" :py:func:`{1.__module__}.{3}`. Please see that function\n" \
" for the description of its parameters.\n\n" \
" :param {2.__name__} copy:\n" \
" Deep-copies attributes from this instance to the new (derived)\n" \
" object. This parameter makes easy to create meta-functional from\n"\
" the most basic wrappers.\n" \
" \"\"\"\n".format(classname, method, base, __func_name(method))
# creates line: from copy import deepcopy
# used by the copy keyword argument below.
result += " from copy import deepcopy\n"
# creates line: super(BASECLASS, self).__init__(...)
# arguments are taken from BASECLASS.__init__
result += " super(self.__class__, self).__init__("
initargs = func_signature(base.__init__)
if initargs.args is not None and len(initargs) > 1:
# first add args without defaults.
# fails if not present in method's default arguments.
ninitargs = len(initargs.args) - len(initargs.defaults)
for i, key in enumerate(initargs.args[1:ninitargs]):
if key in excludes:
raise Exception('Cannot ignore {1} when synthesizing {0}.'.format(classname, key))
if key not in args.args[nargs:]:
raise Exception(
'Could not synthesize {0}. Missing default argument.'.format(classname))
result += ", {0}".format(key)
if initargs.defaults is not None and args.defaults is not None:
# then add keyword arguments, ignoring thosse that are not in method
for i, (key, value) in enumerate(zip(initargs.args[nargs:], initargs.defaults)):
if key in args.args[ninitargs:]:
result += ", {0} = {0}".format(key)
# add a keyword dict if present in initargs
keywords = __kwargs(initargs)
if keywords is not None or initargs.defaults is not None:
result += ', **kwargs'
result += ')\n\n'
# deals with issues on how to print first argument.
result = result.replace('(, ', '(')
# create lines: self.attr = value
# where attr is something in method which is not in baseclass.__init__
if args.defaults is not None:
for key, value in zip(args.args[nargs:], args.defaults):
if key in excludes or key in initargs.args:
continue
result += " self.{0} = {0}\n".format(key)
# create lines which deep-copies base-class attributes to new derived attributes,
# eg, using copy. Does not include previously set parameters and anything in
# excludes.
avoid = set(initargs.args[:ninitargs]) | set(args.args[nargs:]) | set(excludes)
result += " if copy is not None:\n" \
" avoid = {0!r}\n" \
" for key, value in copy.__dict__.items():\n" \
" if key not in avoid and key not in kwargs:\n" \
" setattr(self, key, deepcopy(value))\n" \
.format(avoid)
return result
def create_iter(iter, excludes):
""" Creates the iterator method. """
# make stateless.
result = "from pylada.tools import stateless, assign_attributes\n"\
"@assign_attributes(ignore=['overwrite'])\n@stateless\n"
# creates line: def iter(self, ...):
# keywords are deduced from arguments with defaults.
# others will not be added.
args = func_signature(iter)
result += "def iter(self"
if args.args is not None and len(args.args) > 1:
# first add arguments without default (except for first == self).
nargs = len(args.args) - len(args.defaults)
for key in args.args[1:nargs]:
result += ", {0}".format(key)
if args.args is not None and len(args.args) > 1:
# then add arguments with default
nargs = len(args.args) - len(args.defaults)
for key, value in zip(args.args[nargs:], args.defaults):
if key in excludes:
result += ", {0}={1!r}".format(key, value)
# then add kwargs.,
result += ", **kwargs):\n"
# adds standard doc string.
doc = iter.__doc__
if doc is not None and '\n' in doc:
first_line = doc[:doc.find('\n')].rstrip().lstrip()
result +=\
" \"\"\"{0}\n\n" \
" This function is created automagically from " \
":py:func:`{2} <{1.__module__}.{2}>`.\n" \
" Please see that function for the description of its parameters.\n"\
" \"\"\"\n"\
.format(first_line, iter, __func_name(iter))
# import iterations method
result += " from pylada.tools import SuperCall\n"
result += " from {0.__module__} import {1}\n".format(iter, __func_name(iter))
# add iteration line:
result += " for o in {0}(SuperCall(self.__class__, self)" \
.format(__func_name(iter))
if args.args is not None and len(args.args) > 1:
# first add arguments without default (except for first == self).
nargs = len(args.args) - len(args.defaults)
for key in args.args[1:nargs]:
result += ", {0}".format(key)
if args.args is not None and len(args.args) > 1:
# then add arguments with default
nargs = len(args.args) - len(args.defaults)
for key in args.args[nargs:]:
if key in excludes:
result += ", {0}={0}".format(key)
else:
result += ", {0}=self.{0}".format(key)
# adds arguments to overloaded function.
keywords = __kwargs(args)
if keywords is not None:
result += ", **kwargs"
result += "): yield o\n"
return result
def create_call_from_iter(iter, excludes):
""" Creates a call method relying on existence of iter method. """
# creates line: def call(self, ...):
# keywords are deduced from arguments with defaults.
# others will not be added.
args = func_signature(iter)
callargs = ['self']
if args.args is not None and len(args.args) > 1:
# first add arguments without default (except for first == self).
nargs = len(args.args) - len(args.defaults)
for key in args.args[1:nargs]:
callargs.append(str(key))
if args.args is not None and len(args.args) > 1:
# then add arguments with default
nargs = len(args.args) - len(args.defaults)
for key, value in zip(args.args[nargs:], args.defaults):
if key in excludes:
callargs.append("{0}={1!r}".format(key, value))
# then add kwargs,
if args.args is None or 'comm' not in args.args:
callargs.append('comm=None')
keywords = __kwargs(args)
if keywords is not None:
callargs.append('**' + keywords)
result = "def __call__({0}):\n".format(', '.join(callargs))
# adds standard doc string.
doc = iter.__doc__
if doc is not None and '\n' in doc:
first_line = doc[:doc.find('\n')].rstrip().lstrip()
result += \
" \"\"\"{0}\n\n" \
" This function is created automagically from\n" \
" :py:func:`{1.__module__}.{2}`. Please see that \n" \
" function for the description of its parameters.\n\n" \
" :param comm:\n" \
" Additional keyword argument defining how call external\n" \
" programs.\n" \
" :type comm: :py:class:`~pylada.process.mpi.Communicator`\n\n" \
" \"\"\"\n" \
.format(first_line, iter, __func_name(iter))
# add iteration line:
iterargs = []
if args.args is not None and len(args.args) > 1:
# first add arguments without default (except for first == self).
nargs = len(args.args) - len(args.defaults)
for key in args.args[1:nargs]:
iterargs.append("{0}".format(key))
if args.args is not None and len(args.args) > 1:
# then add arguments with default
nargs = len(args.args) - len(args.defaults)
for key in args.args[nargs:]:
if key in excludes:
iterargs.append("{0}={0}".format(key))
# adds arguments to overloaded function.
if args.args is None or 'comm' not in args.args:
iterargs.append('comm=comm')
keywords = __kwargs(args)
if keywords is not None:
iterargs.append("**" + keywords)
result += " result = None\n" \
" for program in self.iter({0}):\n" \
" if getattr(program, 'success', False):\n" \
" result = program\n" \
" continue\n" \
" if not hasattr(program, 'start'):\n" \
" return program\n" \
" program.start(comm)\n" \
" program.wait()\n" \
" return result".format(', '.join(iterargs))
return result
def create_call(call, excludes):
""" Creates the call method. """
# make stateless.
result = "from pylada.tools import stateless, assign_attributes\n"\
"@assign_attributes(ignore=['overwrite'])\n@stateless\n"
# creates line: def iter(self, ...):
# keywords are deduced from arguments with defaults.
# others will not be added.
args = func_signature(call)
result += "def __call__(self"
if args.args is not None and len(args.args) > 1:
# first add arguments without default (except for first == self).
nargs = len(args.args) - len(args.defaults)
for key in args.args[1:nargs]:
result += ", {0}".format(key)
if args.args is not None and len(args.args) > 1:
# then add arguments with default
nargs = len(args.args) - len(args.defaults)
for key, value in zip(args.args[nargs:], args.defaults):
if key in excludes:
result += ", {0}={1!r}".format(key, value)
# then add kwargs.,
result += ", **kwargs):\n"
# adds standard doc string.
doc = call.__doc__
if doc is not None and '\n' in doc:
first_line = doc[:doc.find('\n')].rstrip().lstrip()
result +=\
" \"\"\"{0}\n\n" \
" This function is created automagically from " \
" {1.__module__}.{2}. Please see that function for the\n"\
" description of its parameters.\n\n" \
" \"\"\"\n" \
.format(first_line, call, __func_name(call))
# import iterations method
result += " from pylada.tools import SuperCall\n".format(call)
result += " from {0.__module__} import {1}\n".format(call, __func_name(call))
# add iteration line:
result += " return {1}(SuperCall(self.__class__, self)".format(call, __func_name(call))
if args.args is not None and len(args.args) > 1:
# first add arguments without default (except for first == self).
nargs = len(args.args) - len(args.defaults)
for key in args.args[1:nargs]:
result += ", {0}".format(key)
if args.args is not None and len(args.args) > 1:
# then add arguments with default
nargs = len(args.args) - len(args.defaults)
for key in args.args[nargs:]:
if key in excludes:
result += ", {0}={0}".format(key)
else:
result += ", {0}=self.{0}".format(key)
result = result.replace('(, ', '(')
# adds arguments to overloaded function.
keywords = __kwargs(args)
if keywords is not None:
result += ", **kwargs"
result += ")\n"
return result
def makeclass(classname, base, iter=None, call=None,
doc=None, excludes=None, module=None):
""" Creates a class from a function.
Makes it easy to create a class which works just like the input method.
This means we don't have to write the boiler plate methods of a class,
such as `__init__`. Instead, one can focus on writing a function which
takes a functional and does something special with it, and then at the
last minute create an actual derived class from the method and the
functional. It is used for instance in :py:class:`vasp.Relax
<pylada.vasp.Relax>`. The parameters from the method which have defaults
become attributes of instances of this class. Instances can be called as
one would call the base functional, except of course the job of the
method is done.
:param str classname:
Name of the resulting class.
:param type base:
Base class, e.g. for a method using VASP, this would be
:py:class:`Vasp <pylada.vasp.Vasp>`.
:param function iter:
The iteration version of the method being wrapped into a class, e.g.
would override :py:meth:`Vasp.iter <pylada.vasp.Vasp.iter>`. Ignored if
None.
:param function call:
The __call__ version of the method being wrapped into a class, e.g.
would override :py:meth:`Vasp.__call__ <pylada.vasp.Vasp.__call__>`.
Ignored if None.
:param str doc:
Docstring of the class. Ignored if None.
:param list excludes:
List of strings indicating arguments (with defaults) of the methods
which should *not* be turned into an attribute. If None, defaults to
``['structure', 'outdir', 'comm']``.
:param bool withkword:
Whether to include ``**kwargs`` when calling the __init__ method of
the *base* class. Only effective if the method accepts variable
keyword arguments in the first place.
:param str module:
Name of the module within which this class will reside.
:return: A new class derived from ``base`` but implementing the methods
given on input. Furthermore it contains an `Extract` class-attribute
coming from either ``iter``, ``call``, ``base``, in that order.
"""
basemethod = iter if iter is not None else call
if basemethod is None:
raise ValueError('One of iter or call should not be None.')
if excludes is None:
excludes = ['structure', 'outdir', 'comm']
# dictionary which will hold all synthesized functions.
funcs = {}
# creates __init__
exec(create_initstring(classname, base, basemethod, excludes), funcs)
if iter is not None:
exec(create_iter(iter, excludes), funcs)
if call is not None:
exec(create_call(call, excludes), funcs)
elif iter is not None:
exec(create_call_from_iter(iter, excludes), funcs)
d = {'__init__': funcs['__init__']}
if call is not None or iter is not None:
d['__call__'] = funcs['__call__']
if iter is not None:
d['iter'] = funcs['iter']
if doc is not None and len(doc.rstrip().lstrip()) > 0:
d['__doc__'] = doc + "\n\nThis class was automagically generated by "\
":py:func:`pylada.tools.makeclass`."
if hasattr(iter, 'Extract'):
d['Extract'] = iter.Extract
elif hasattr(call, 'Extract'):
d['Extract'] = call.Extract
elif hasattr(base, 'Extract'):
d['Extract'] = base.Extract
if module is not None:
d['__module__'] = module
return type(classname, (base,), d)
def makefunc(name, iter, module=None):
""" Creates function from iterable. """
# creates header line of function calls.
# keywords are deduced from arguments with defaults.
# others will not be added.
args = func_signature(iter)
funcstring = "def {0}(".format(name)
callargs = []
if args.args is not None and len(args.args) > 0:
# first add arguments without default (except for first == self).
nargs = len(args.args) - len(args.defaults)
for key in args.args[:nargs]:
callargs.append(str(key))
if args.args is not None and len(args.args) > 0:
# then add arguments with default
nargs = len(args.args) - len(args.defaults)
for key, value in zip(args.args[nargs:], args.defaults):
callargs.append("{0}={1!r}".format(key, value))
# adds comm keyword if does not already exist.
if 'comm' not in args.args:
callargs.append('comm=None')
# adds **kwargs keyword if necessary.
keywords = __kwargs(args)
if keywords is not None:
callargs.append('**{0}'.format(keywords))
funcstring = "def {0}({1}):\n".format(name, ', '.join(callargs))
# adds standard doc string.
doc = iter.__doc__
if doc is not None and '\n' in doc:
first_line = doc[:doc.find('\n')].rstrip().lstrip()
funcstring +=\
" \"\"\"{0}\n\n" \
" This function is created automagically from " \
" {1.__module__}.{2}. Please see that function for the\n"\
" description of its parameters.\n\n" \
" :param comm:\n" \
" Additional keyword argument defining how call external\n" \
" programs.\n" \
" :type comm: :py:class:`~pylada.process.mpi.Communicator`\n\n" \
" \"\"\"\n"\
.format(first_line, iter, __func_name(iter))
# create function body...
funcstring += " from {0.__module__} import {1}\n"\
" for program in {1}(".format(iter, __func_name(iter))
# ... including detailed call to iterator function.
iterargs = []
if args.args is not None and len(args.args) > 0:
for key in args.args:
iterargs.append("{0}".format(key))
if args.args is None or 'comm' not in args.args:
iterargs.append('comm=comm')
keywords = __kwargs(args)
if keywords is not None:
iterargs.append('**' + keywords)
funcstring += "{0}):\n" \
" if getattr(program, 'success', False):\n" \
" result = program\n" \
" continue\n" \
" if not hasattr(program, 'start'): return program\n" \
" program.start(comm)\n" \
" program.wait()\n" \
" return result".format(', '.join(iterargs))
funcs = {}
exec(funcstring, funcs)
if module is not None:
funcs[name].__module__ = module
return funcs[name]
|
pylada/pylada-light
|
src/pylada/tools/makeclass.py
|
Python
|
gpl-3.0
| 22,192
|
[
"CRYSTAL",
"VASP"
] |
a59e0b8490a6c51115047f012462dc63755b0ce4368b12d9e579d92c343229a9
|
"""
Actions for MayaVi2 UI
"""
#Author: Martin Weier
#Copyright (C) 2006 California Institute of Technology
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# Standard library imports
from os.path import isfile
# Enthought library imports
from enthought.pyface import FileDialog, OK
# Mayavi plugin imports
from enthought.mayavi.script import get_imayavi
from enthought.mayavi.core.common import error
from enthought.mayavi.action.common import WorkbenchAction, get_imayavi # TODO: fix double import of get_imayavi
class OpenVTKAction(WorkbenchAction):
"""
Open a VTK file.
"""
def perform(self):
"""Performs the action. """
wildcard = 'VTK files (*.vtk)|*.vtk|' + FileDialog.WILDCARD_ALL
parent = self.window.control
dialog = FileDialog(parent=parent,
title='Open CitcomS VTK file',
action='open',
wildcard=wildcard)
if dialog.open() == OK:
if isfile(dialog):
from citcoms_display.plugins.VTKFileReader import VTKFileReader
r = VTKFileReader()
r.initialize(dialog.path)
mv = get_imayavi(self.window)
mv.add_source(r)
else:
error("File '%s' does not exist!" % dialog.path, parent)
return
class OpenHDF5Action(WorkbenchAction):
"""
Open an HDF5 file.
"""
def perform(self):
""" Performs the action. """
wildcard = 'HDF5 files (*.h5)|*.h5|' + FileDialog.WILDCARD_ALL
parent = self.window.control
dialog = FileDialog(parent=parent,
title='Open CitcomS HDF5 file',
action='open',
wildcard=wildcard)
if dialog.open() == OK:
if isfile(dialog.path):
from citcoms_display.plugins.HDF5FileReader import HDF5FileReader
r = HDF5FileReader()
r.initialize(dialog.path)
mv = get_imayavi(self.window)
mv.add_source(r)
else:
error("File '%s' does not exist!" % dialog.path, parent)
return
class ReduceFilterAction(WorkbenchAction):
"""
Add a ReduceFilter to the mayavi pipeline.
"""
def perform(self):
""" Performs the action. """
from citcoms_display.plugins.ReduceFilter import ReduceFilter
f = ReduceFilter()
mv = get_imayavi(self.window)
mv.add_filter(f)
class ShowCapsFilterAction(WorkbenchAction):
"""
Add a ShowCapsFilter to the mayavi pipeline
"""
def perform(self):
""" Performs the action. """
from citcoms_display.plugins.ShowCapsFilter import ShowCapsFilter
f = ShowCapsFilter()
mv = get_imayavi(self.window)
mv.add_filter(f)
|
geodynamics/citcoms
|
visual/Mayavi2/citcoms_display/actions.py
|
Python
|
gpl-2.0
| 3,506
|
[
"Mayavi",
"VTK"
] |
ed4a85ea569697e42bc2953f170417dea1e236126a9493e6f94011fe2cd22d01
|
#!/usr/bin/env python2
#
# Copyright (C) 2013-2018
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# -*- coding: utf-8 -*-
#
import sys
import time
import espressopp
import mpi4py.MPI as MPI
import unittest
class TestAdResS(unittest.TestCase):
def setUp(self):
# set up system
system = espressopp.System()
box = (10, 10, 10)
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
system.skin = 0.3
system.comm = MPI.COMM_WORLD
nodeGrid = espressopp.tools.decomp.nodeGrid(espressopp.MPI.COMM_WORLD.size,box,rc=1.5,skin=system.skin)
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, rc=1.5, skin=system.skin)
system.storage = espressopp.storage.DomainDecompositionAdress(system, nodeGrid, cellGrid)
self.system = system
def test_slab(self):
# add some particles
particle_list = [
(1, 1, espressopp.Real3D(5.5, 5.0, 5.0), 1.0, 0),
(2, 1, espressopp.Real3D(6.5, 5.0, 5.0), 1.0, 0),
(3, 1, espressopp.Real3D(7.5, 5.0, 5.0), 1.0, 0),
(4, 1, espressopp.Real3D(8.5, 5.0, 5.0), 1.0, 0),
(5, 1, espressopp.Real3D(9.5, 5.0, 5.0), 1.0, 0),
(6, 0, espressopp.Real3D(5.5, 5.0, 5.0), 1.0, 1),
(7, 0, espressopp.Real3D(6.5, 5.0, 5.0), 1.0, 1),
(8, 0, espressopp.Real3D(7.5, 5.0, 5.0), 1.0, 1),
(9, 0, espressopp.Real3D(8.5, 5.0, 5.0), 1.0, 1),
(10, 0, espressopp.Real3D(9.5, 5.0, 5.0), 1.0, 1),
]
tuples = [(1,6),(2,7),(3,8),(4,9),(5,10)]
self.system.storage.addParticles(particle_list, 'id', 'type', 'pos', 'mass','adrat')
ftpl = espressopp.FixedTupleListAdress(self.system.storage)
ftpl.addTuples(tuples)
self.system.storage.setFixedTuplesAdress(ftpl)
self.system.storage.decompose()
# generate a verlet list
vl = espressopp.VerletListAdress(self.system, cutoff=1.5, adrcut=1.5,
dEx=2.0, dHy=1.0, adrCenter=[5.0, 5.0, 5.0], sphereAdr=False)
# add interaction
interNB = espressopp.interaction.VerletListAdressLennardJones2(vl, ftpl)
potWCA1 = espressopp.interaction.LennardJones(epsilon=1.0, sigma=1.0, shift='auto', cutoff=1.4)
potWCA2 = espressopp.interaction.LennardJones(epsilon=0.5, sigma=1.0, shift='auto', cutoff=1.4)
interNB.setPotentialAT(type1=0, type2=0, potential=potWCA1) # AT
interNB.setPotentialCG(type1=1, type2=1, potential=potWCA2) # CG
self.system.addInteraction(interNB)
# initialize lambda values
integrator = espressopp.integrator.VelocityVerlet(self.system)
integrator.dt = 0.01
adress = espressopp.integrator.Adress(self.system,vl,ftpl)
integrator.addExtension(adress)
espressopp.tools.AdressDecomp(self.system, integrator)
# coordinates and non-bonded energy of particles before integration
before = [self.system.storage.getParticle(i).pos[j] for i in range(1,6) for j in range(3)]
energy_before = interNB.computeEnergy()
# run ten steps
integrator.run(10)
# coordinates and non-bonded energy of particles after integration
after = [self.system.storage.getParticle(i).pos[j] for i in range(1,6) for j in range(3)]
energy_after = interNB.computeEnergy()
# run checks (Particles should move along the x-axis only given their initial configuration. Additionally, check energies)
self.assertAlmostEqual(after[0], 5.413171, places=5)
self.assertEqual(before[1], after[1])
self.assertEqual(before[2], after[2])
self.assertAlmostEqual(after[3], 6.500459, places=5)
self.assertEqual(before[4], after[4])
self.assertEqual(before[5], after[5])
self.assertAlmostEqual(after[6], 7.522099, places=5)
self.assertEqual(before[7], after[7])
self.assertEqual(before[8], after[8])
self.assertAlmostEqual(after[9], 8.512569, places=5)
self.assertEqual(before[10], after[10])
self.assertEqual(before[11], after[11])
self.assertAlmostEqual(after[12], 9.551701, places=5)
self.assertEqual(before[13], after[13])
self.assertEqual(before[14], after[14])
self.assertAlmostEqual(energy_before,1.266889, places=5)
self.assertAlmostEqual(energy_after, -0.209015, places=5)
def test_fixed_sphere(self):
# add some particles
particle_list = [
(1, 1, espressopp.Real3D(5.0, 5.5, 5.0), 1.0, 0),
(2, 1, espressopp.Real3D(5.0, 6.5, 5.0), 1.0, 0),
(3, 1, espressopp.Real3D(5.0, 7.5, 5.0), 1.0, 0),
(4, 1, espressopp.Real3D(5.0, 8.5, 5.0), 1.0, 0),
(5, 1, espressopp.Real3D(5.0, 9.5, 5.0), 1.0, 0),
(6, 0, espressopp.Real3D(5.0, 5.5, 5.0), 1.0, 1),
(7, 0, espressopp.Real3D(5.0, 6.5, 5.0), 1.0, 1),
(8, 0, espressopp.Real3D(5.0, 7.5, 5.0), 1.0, 1),
(9, 0, espressopp.Real3D(5.0, 8.5, 5.0), 1.0, 1),
(10, 0, espressopp.Real3D(5.0, 9.5, 5.0), 1.0, 1),
]
tuples = [(1,6),(2,7),(3,8),(4,9),(5,10)]
self.system.storage.addParticles(particle_list, 'id', 'type', 'pos', 'mass','adrat')
ftpl = espressopp.FixedTupleListAdress(self.system.storage)
ftpl.addTuples(tuples)
self.system.storage.setFixedTuplesAdress(ftpl)
self.system.storage.decompose()
# generate a verlet list
vl = espressopp.VerletListAdress(self.system, cutoff=1.5, adrcut=1.5,
dEx=2.0, dHy=1.0, adrCenter=[5.0, 5.0, 5.0], sphereAdr=True)
# add interaction
interNB = espressopp.interaction.VerletListAdressLennardJones2(vl, ftpl)
potWCA1 = espressopp.interaction.LennardJones(epsilon=1.0, sigma=1.0, shift='auto', cutoff=1.4)
potWCA2 = espressopp.interaction.LennardJones(epsilon=0.5, sigma=1.0, shift='auto', cutoff=1.4)
interNB.setPotentialAT(type1=0, type2=0, potential=potWCA1) # AT
interNB.setPotentialCG(type1=1, type2=1, potential=potWCA2) # CG
self.system.addInteraction(interNB)
# initialize lambda values
integrator = espressopp.integrator.VelocityVerlet(self.system)
integrator.dt = 0.01
adress = espressopp.integrator.Adress(self.system,vl,ftpl)
integrator.addExtension(adress)
espressopp.tools.AdressDecomp(self.system, integrator)
# coordinates and non-bonded energy of particles before integration
before = [self.system.storage.getParticle(i).pos[j] for i in range(1,6) for j in range(3)]
energy_before = interNB.computeEnergy()
# run ten steps
integrator.run(10)
# coordinates and non-bonded energy of particles after integration
after = [self.system.storage.getParticle(i).pos[j] for i in range(1,6) for j in range(3)]
energy_after = interNB.computeEnergy()
# run checks (particles should move along the y-axis only, given their initial configuration)
self.assertEqual(before[0], after[0])
self.assertAlmostEqual(after[1], 5.413171, places=5)
self.assertEqual(before[2], after[2])
self.assertEqual(before[3], after[3])
self.assertAlmostEqual(after[4], 6.500459, places=5)
self.assertEqual(before[5], after[5])
self.assertEqual(before[6], after[6])
self.assertAlmostEqual(after[7], 7.522099, places=5)
self.assertEqual(before[8], after[8])
self.assertEqual(before[9], after[9])
self.assertAlmostEqual(after[10], 8.512569, places=5)
self.assertEqual(before[11], after[11])
self.assertEqual(before[12], after[12])
self.assertAlmostEqual(after[13], 9.551701, places=5)
self.assertEqual(before[14], after[14])
self.assertAlmostEqual(energy_before, 1.266890, places=5)
self.assertAlmostEqual(energy_after, -0.209015, places=5)
def test_moving_sphere(self):
# add some particles
particle_list = [
(1, 1, espressopp.Real3D(5.0, 5.5, 5.0), 1.0, 0),
(2, 1, espressopp.Real3D(5.0, 6.5, 5.0), 1.0, 0),
(3, 1, espressopp.Real3D(5.0, 7.5, 5.0), 1.0, 0),
(4, 1, espressopp.Real3D(5.0, 8.5, 5.0), 1.0, 0),
(5, 1, espressopp.Real3D(5.0, 9.5, 5.0), 1.0, 0),
(6, 0, espressopp.Real3D(5.0, 5.5, 5.0), 1.0, 1),
(7, 0, espressopp.Real3D(5.0, 6.5, 5.0), 1.0, 1),
(8, 0, espressopp.Real3D(5.0, 7.5, 5.0), 1.0, 1),
(9, 0, espressopp.Real3D(5.0, 8.5, 5.0), 1.0, 1),
(10, 0, espressopp.Real3D(5.0, 9.5, 5.0), 1.0, 1),
]
tuples = [(1,6),(2,7),(3,8),(4,9),(5,10)]
self.system.storage.addParticles(particle_list, 'id', 'type', 'pos', 'mass','adrat')
ftpl = espressopp.FixedTupleListAdress(self.system.storage)
ftpl.addTuples(tuples)
self.system.storage.setFixedTuplesAdress(ftpl)
self.system.storage.decompose()
# generate a verlet list
vl = espressopp.VerletListAdress(self.system, cutoff=1.5, adrcut=1.5,
dEx=2.0, dHy=1.0, pids=[1], sphereAdr=True)
# add interaction
interNB = espressopp.interaction.VerletListAdressLennardJones2(vl, ftpl)
potWCA1 = espressopp.interaction.LennardJones(epsilon=1.0, sigma=1.0, shift='auto', cutoff=1.4)
potWCA2 = espressopp.interaction.LennardJones(epsilon=0.5, sigma=1.0, shift='auto', cutoff=1.4)
interNB.setPotentialAT(type1=0, type2=0, potential=potWCA1) # AT
interNB.setPotentialCG(type1=1, type2=1, potential=potWCA2) # CG
self.system.addInteraction(interNB)
# initialize lambda values
integrator = espressopp.integrator.VelocityVerlet(self.system)
integrator.dt = 0.01
adress = espressopp.integrator.Adress(self.system,vl,ftpl)
integrator.addExtension(adress)
espressopp.tools.AdressDecomp(self.system, integrator)
# coordinates and non-bonded energy of particles before integration
before = [self.system.storage.getParticle(i).pos[j] for i in range(1,6) for j in range(3)]
energy_before = interNB.computeEnergy()
# run ten steps
integrator.run(10)
# coordinates and non-bonded energy of particles after integration
after = [self.system.storage.getParticle(i).pos[j] for i in range(1,6) for j in range(3)]
energy_after = interNB.computeEnergy()
# run checks (particles should move along the y-axis only, given their initial configuration)
self.assertEqual(before[0], after[0])
self.assertAlmostEqual(after[1], 5.409062, places=5)
self.assertEqual(before[2], after[2])
self.assertEqual(before[3], after[3])
self.assertAlmostEqual(after[4], 6.488613, places=5)
self.assertEqual(before[5], after[5])
self.assertEqual(before[6], after[6])
self.assertAlmostEqual(after[7], 7.533786, places=5)
self.assertEqual(before[8], after[8])
self.assertEqual(before[9], after[9])
self.assertAlmostEqual(after[10], 8.516598, places=5)
self.assertEqual(before[11], after[11])
self.assertEqual(before[12], after[12])
self.assertAlmostEqual(after[13], 9.551941, places=5)
self.assertEqual(before[14], after[14])
self.assertAlmostEqual(energy_before,1.382061, places=5)
self.assertAlmostEqual(energy_after, -0.320432, places=5)
def test_ATATCG_template(self):
# add some particles
particle_list = [
(1, 1, 0, espressopp.Real3D(5.5, 5.0, 5.0), 1.0, 0),
(2, 1, 0, espressopp.Real3D(6.5, 5.0, 5.0), 1.0, 0),
(3, 1, 0, espressopp.Real3D(7.5, 5.0, 5.0), 1.0, 0),
(4, 1, 0, espressopp.Real3D(8.5, 5.0, 5.0), 1.0, 0),
(5, 1, 0, espressopp.Real3D(9.5, 5.0, 5.0), 1.0, 0),
(6, 0, 1.0, espressopp.Real3D(5.5, 5.0, 5.0), 1.0, 1),
(7, 0, 1.0, espressopp.Real3D(6.5, 5.0, 5.0), 1.0, 1),
(8, 0, 1.0, espressopp.Real3D(7.5, 5.0, 5.0), 1.0, 1),
(9, 0, 1.0, espressopp.Real3D(8.5, 5.0, 5.0), 1.0, 1),
(10, 0, 1.0, espressopp.Real3D(9.5, 5.0, 5.0), 1.0, 1),
]
tuples = [(1,6),(2,7),(3,8),(4,9),(5,10)]
self.system.storage.addParticles(particle_list, 'id', 'type', 'q', 'pos', 'mass','adrat')
ftpl = espressopp.FixedTupleListAdress(self.system.storage)
ftpl.addTuples(tuples)
self.system.storage.setFixedTuplesAdress(ftpl)
self.system.storage.decompose()
# generate a verlet list
vl = espressopp.VerletListAdress(self.system, cutoff=1.5, adrcut=1.5,
dEx=2.0, dHy=1.0, adrCenter=[5.0, 5.0, 5.0], sphereAdr=False)
# add interactions
interNB = espressopp.interaction.VerletListAdressATLJReacFieldGenHarmonic(vl, ftpl)
potLJ = espressopp.interaction.LennardJones(epsilon=0.650299305951, sigma=0.316549165245, shift='auto', cutoff=1.4)
potQQ = espressopp.interaction.ReactionFieldGeneralized(prefactor=138.935485, kappa=0.0, epsilon1=1.0, epsilon2=80.0, cutoff= 1.4, shift="auto")
potCG = espressopp.interaction.Harmonic(K=500.0, r0=1.4, cutoff=1.4)
interNB.setPotentialAT1(type1=0, type2=0, potential=potLJ)
interNB.setPotentialAT2(type1=0, type2=0, potential=potQQ)
interNB.setPotentialCG(type1=1, type2=1, potential=potCG)
self.system.addInteraction(interNB)
# set up integrator
integrator = espressopp.integrator.VelocityVerlet(self.system)
integrator.dt = 0.01
adress = espressopp.integrator.Adress(self.system, vl, ftpl)
integrator.addExtension(adress)
espressopp.tools.AdressDecomp(self.system, integrator)
# coordinates and non-bonded energy of particles before integration
before = [self.system.storage.getParticle(i).pos[j] for i in range(1,6) for j in range(3)]
energy_before = interNB.computeEnergy()
# run ten steps and compute energy
integrator.run(10)
# coordinates and non-bonded energy of particles after integration
after = [self.system.storage.getParticle(i).pos[j] for i in range(1,6) for j in range(3)]
energy_after = interNB.computeEnergy()
# run checks
self.assertAlmostEqual(after[0], 5.004574, places=5)
self.assertEqual(before[1], after[1])
self.assertEqual(before[2], after[2])
self.assertAlmostEqual(after[3], 6.009012, places=5)
self.assertEqual(before[4], after[4])
self.assertEqual(before[5], after[5])
self.assertAlmostEqual(after[6], 7.129601, places=5)
self.assertEqual(before[7], after[7])
self.assertEqual(before[8], after[8])
self.assertAlmostEqual(after[9], 8.787093, places=5)
self.assertEqual(before[10], after[10])
self.assertEqual(before[11], after[11])
self.assertAlmostEqual(after[12], 0.569719, places=5)
self.assertEqual(before[13], after[13])
self.assertEqual(before[14], after[14])
self.assertAlmostEqual(energy_before, 223.764297, places=5)
self.assertAlmostEqual(energy_after, 23.995610, places=5)
if __name__ == '__main__':
unittest.main()
|
MrTheodor/espressopp
|
testsuite/AdResS/ForceAdResS/test_AdResS.py
|
Python
|
gpl-3.0
| 16,081
|
[
"ESPResSo"
] |
5a7786aa82a050ec41a89e9c2e4b06e1012aefe6a066da76bc59740615d5def7
|
'''This module implements classes for generating random data.
'''
# Standard library imports
import numpy as np
# Intra-package imports
from ..ensemble import _largest_power_of_2_leq
from ..spectra.nonparametric import _plot_image
class RandomSignal(object):
'''A class for the creation of 1-dimensional random signals.
Note that `M` >= 1 turbulent branches can be specified simultaneously.
Attributes:
-----------
x - array_like, (`self.Nt`,)
The 1-dimensional random signal with the autospectral density
specified at initialization. The signal is constrained to be real.
[x] = arbitrary units
Fs - float
Temporal sampling rate of signal `self.x`.
[Fs] = arbitrary units
t0 - float
Initial time stamp of signal `self.x`.
[t0] = 1 / [self.Fs]
Nt - int
The number of timestamps in `self.x`. Constrained to be
a power of 2, for fastest FFT computations.
[Nt] = unitless
f0 - array_like, (`M`,)
The dominant temporal frequency of each turbulent branch.
[f0] = [self.Fs]
tau - array_like, (`M`,)
The correlation time of each turbulent branch, where
a Gaussian correlation function has been assumed.
[tau] = 1 / [self.Fs]
G0 - array_like, (`M`,)
The relative peak one-sided autospectral density of each branch.
[G0] = unitless
'''
def __init__(self,
Fs=1., t0=0., T=128.,
f0=[0.], tau=[10.], G0=[1.],
noise_floor=1e-2, seed=None):
'''Create instance of the `RandomSignal` class.
Note that `M` >= 1 turbulent branches can be specified simultaneously.
Input parameters:
-----------------
Fs - float
Temporal sampling rate.
[Fs] = arbitrary units
t0 - float
Initial time stamp.
[t0] = 1 / [Fs]
T - float
Desired time interval over which signal is measured.
Because creation of the random signal relies on the FFT
(which is fastest for powers of two), the realized time
interval `Treal` will be selected such that
Treal * Fs = _largest_power_of_2_leq(T * Fs),
where `_largest_power_of_2_leq(a)` selects the largest
power of 2 that is less than or equal to `a`.
[T] = 1 / [Fs]
f0 - array_like, (`M`,)
The dominant temporal frequency of each turbulent branch
in the medium's rest frame (i.e. `f0` is *not* attributable
to a Doppler shift; see `v` for Doppler-shift effects).
[f0] = [Fs]
tau - array_like, (`M`,)
The correlation time of each turbulent branch, where
a Gaussian correlation function has been assumed.
[tau] = 1 / [Fs]
G0 - array_like, (`M`,)
The relative peak one-sided autospectral density of each branch.
[G0] = unitless
noise_floor - float
The noise floor of the random process's autospectral density.
[noise_floor] = [self.x]^2 / [Fs] / [Fs_spatial], where
`self.x` is the realization of the random process
created at object initialization.
seed - int or None
Random seed used to initialize pseudo-random number generator.
If `None`, generator is seeded from `/dev/urandom` or the clock.
'''
# Grid parameters
self.Fs = Fs
self.t0 = t0
self.Nt = _largest_power_of_2_leq(T * Fs)
# Turbulence parameters
self.f0 = np.array(f0, dtype='float', ndmin=1)
self.tau = np.array(tau, dtype='float', ndmin=1)
self.G0 = np.array(G0, dtype='float', ndmin=1)
# Noise floor of the random process's autospectral density
self._noise_floor = noise_floor
# Get autospectral density of random process
res = self._getAutoSpectralDensity()
self._f = res[0]
self._Gxx = res[1]
# Get a temporal realization of the random process
self.x = self._getSignal(seed=seed)
def _getAutoSpectralDensity(self):
'''Get one-sided autospectral density Gxx(f) of the 1d random process.
Returns:
--------
(f, Gxx) - tuple, where
f - array_like, ((`self.Nt` // 2) + 1,)
The (one-sided) frequency in ascending order.
[f] = [self.Fs]
Gxx - array_like, (`self.Nt`,)
The one-sided autospectral density Gxx(f) of the 1d random process.
[Gxx] = [self.x]^2 / [self.Fs], where
`self.x` is the realization of the random process
created at object initialization.
'''
# Construct the spectral grid.
f = np.fft.rfftfreq(self.Nt, d=(1. / self.Fs))
# Initialize autospectral density with zeros
Gxx = np.zeros(f.shape)
# Iteratively incorporate autospectral density of each branch
for branch_ind in np.arange(len(self.f0)):
# Parse turbulence parameters of branch
f0 = self.f0[branch_ind]
tau = self.tau[branch_ind]
G0 = self.G0[branch_ind]
# Shape auto-spectral density, Sxx.
f_shaping = G0 * np.exp(-((np.pi * tau * (f - f0)) ** 2))
Gxx += f_shaping
# Define peak autospectral density of turbulence to be unity
Gxx /= np.max(Gxx)
# Finally, incorporate noise floor
Gxx += self._noise_floor
return f, Gxx
def _getSignal(self, seed=None):
'''Get a temporal realization of the 1d random process.
Input parameters:
-----------------
seed - int or None
Random seed used to initialize pseudo-random number generator.
If `None`, generator is seeded from `/dev/urandom` or the clock.
Returns:
--------
x - array_like, (`self.Nt`,)
A realization of the 1d random process in time.
For a given random process, the temporal representation
will vary from one realization to the next, but the underlying
autospectral density of each realization will be identical.
[x] = arbitrary units
'''
# Compute *magnitude* of FFT corresponding to autospectral density.
#
# The frequency normalization includes an additional factor of 2
# to account for the one-sided in frequency representation of
# the autospectral density.
f_norm = 2. / (self.Nt * self.Fs)
Xmag = np.sqrt(self._Gxx / f_norm)
# To obtain a realization of the random process, we now need
# to add a random phase to each point of the FFT.
if seed is not None:
np.random.seed(seed)
ph = 2 * np.pi * np.random.rand(len(self._f))
# Construct the complex-valued FFT of the realization by
# multiplying the FFT magnitude by the set of random phases.
X = Xmag * np.exp(1j * ph)
return np.fft.irfft(X)
def t(self):
'Get times for points in `self.x`.'
return _uniform_grid(self.Nt, self.t0, 1. / self.Fs)
class RandomSignal2d(object):
'''A class for the creation of 2-dimensional random signals.
Note that `M` >= 1 turbulent branches can be specified simultaneously.
Attributes:
-----------
x - array_like, (`self.Nz`, `self.Nt`)
The 2-dimensional random signal with the autospectral density
specified at initialization. The first axis corresponds to the
spatial dimension with `self.Nz` spatial points; the second axis
corresponds to the temporal dimension with `self.Nt` temporal
points. The signal is constrained to be real.
[x] = arbitrary units
Fs_spatial - float
Spatial sampling rate of signal `self.x`.
[Fs_spatial] = arbitrary units
z0 - float
Initial spatial stamp of signal `self.x`.
[z0] = 1 / [self.Fs_spatial]
Nz - int
The number of spatial stamps in `self.x`. Constrained to be
a power of 2, for fastest FFT computations.
[Nz] = unitless
Fs - float
Temporal sampling rate of signal `self.x`.
[Fs] = arbitrary units
t0 - float
Initial time stamp of signal `self.x`.
[t0] = 1 / [self.Fs]
Nt - int
The number of timestamps in `self.x`. Constrained to be
a power of 2, for fastest FFT computations.
[Nt] = unitless
xi0 - array_like, (`M`,)
The dominant spatial frequency of each turbulent branch.
[xi0] = [self.Fs_spatial]
Lz - array_like, (`M`,)
The spatial correlation length of each turbulent branch, where
a Gaussian correlation function has been assumed.
[Lz] = 1 / [self.Fs_spatial]
f0 - array_like, (`M`,)
The dominant temporal frequency of each turbulent branch
in the medium's rest frame (i.e. `f0` is *not* attributable
to a Doppler shift; see `v` for Doppler-shift effects).
[f0] = [self.Fs]
tau - array_like, (`M`,)
The correlation time of each turbulent branch, where
a Gaussian correlation function has been assumed.
[tau] = 1 / [self.Fs]
v - array_like, (`M`,)
The lab-frame velocity of the medium through which
the turbulent branch is propagating. Note that
non-zero velocity produces a Doppler-shifted
lab-frame frequency
df = xi * v
where `xi` is the spatial frequency.
[v] = [self.Fs] / [self.Fs_spatial]
S0 - array_like, (`M`,)
The relative peak autospectral density of each branch.
[S0] = unitless
'''
def __init__(self,
Fs_spatial=1., z0=0., Z=64.,
Fs=1., t0=0., T=128.,
xi0=[0.], Lz=[5.], f0=[0.], tau=[10.], v=[1.], S0=[1.],
noise_floor=1e-2, seed=None):
'''Create an instance of the `RandomSignal2d` class.
Note that `M` >= 1 turbulent branches can be specified simultaneously.
Input parameters:
-----------------
Fs_spatial - float
Spatial sampling rate.
[Fs_spatial] = arbitrary units
z0 - float
Initial spatial stamp.
[z0] = 1 / [Fs_spatial]
Z - float
Desired spatial interval over which signal is measured.
Because creation of the random signal relies on the FFT
(which is fastest for powers of two), the realized spatial
interval `Zreal` will be selected such that
Zreal * Fs_spatial = _largest_power_of_2_leq(Z * Fs_spatial),
where `_largest_power_of_2_leq(a)` selects the largest
power of 2 that is less than or equal to `a`.
[Z] = 1 / [Fs_spatial]
Fs - float
Temporal sampling rate.
[Fs] = arbitrary units
t0 - float
Initial time stamp.
[t0] = 1 / [Fs]
T - float
Desired time interval over which signal is measured.
Because creation of the random signal relies on the FFT
(which is fastest for powers of two), the realized time
interval `Treal` will be selected such that
Treal * Fs = _largest_power_of_2_leq(T * Fs),
where `_largest_power_of_2_leq(a)` selects the largest
power of 2 that is less than or equal to `a`.
[T] = 1 / [Fs]
xi0 - array_like, (`M`,)
The dominant spatial frequency of each turbulent branch.
[xi0] = [Fs_spatial]
Lz - array_like, (`M`,)
The spatial correlation length of each turbulent branch, where
a Gaussian correlation function has been assumed.
[Lz] = 1 / [Fs_spatial]
f0 - array_like, (`M`,)
The dominant temporal frequency of each turbulent branch
in the medium's rest frame (i.e. `f0` is *not* attributable
to a Doppler shift; see `v` for Doppler-shift effects).
[f0] = [Fs]
tau - array_like, (`M`,)
The correlation time of each turbulent branch, where
a Gaussian correlation function has been assumed.
[tau] = 1 / [Fs]
v - array_like, (`M`,)
The lab-frame velocity of the medium through which
the turbulent branch is propagating. Note that
non-zero velocity produces a Doppler-shifted
lab-frame frequency
df = xi * v
where `xi` is the spatial frequency.
[v] = [Fs] / [Fs_spatial]
S0 - array_like, (`M`,)
The relative peak autospectral density of each branch.
[S0] = unitless
noise_floor - float
The noise floor of the random process's autospectral density.
[noise_floor] = [self.x]^2 / [Fs] / [Fs_spatial], where
`self.x` is the realization of the random process
created at object initialization.
seed - int or None
Random seed used to initialize pseudo-random number generator.
If `None`, generator is seeded from `/dev/urandom` or the clock.
'''
# Spatial-grid parameters
self.Fs_spatial = Fs_spatial
self.z0 = z0
self.Nz = _largest_power_of_2_leq(Z * Fs_spatial)
# Temporal-grid parameters
self.Fs = Fs
self.t0 = t0
self.Nt = _largest_power_of_2_leq(T * Fs)
# Turbulence parameters
self.xi0 = np.array(xi0, dtype='float', ndmin=1)
self.Lz = np.array(Lz, dtype='float', ndmin=1)
self.f0 = np.array(f0, dtype='float', ndmin=1)
self.tau = np.array(tau, dtype='float', ndmin=1)
self.v = np.array(v, dtype='float', ndmin=1)
self.S0 = np.array(S0, dtype='float', ndmin=1)
# Noise floor of the random process's autospectral density
self._noise_floor = noise_floor
# Get autospectral density of 2d random process
res = self._getAutoSpectralDensity()
self._xi = res[0]
self._f = res[1]
self._Sxx = res[2]
# Get a space-time realization of the 2d random process
self.x = self._getSignal(seed=seed)
def _getAutoSpectralDensity(self):
'''Get autospectral density Sxx(xi, f) of the 2d random process.
Returns:
--------
(xi, f, Sxx) - tuple, where
xi - array_like, (`self.Nz`,)
The (two-sided) spatial frequency in ascending order.
Note that the spatial frequency is related to the wavenumber k
via k = 2 * pi * xi.
[xi] = [self.Fs_spatial]
f - array_like, ((`self.Nt` // 2) + 1,)
The (one-sided) frequency in ascending order.
[f] = [self.Fs]
Sxx - array_like, (`self.Nz`, `self.Nt`)
The autospectral density Sxx(xi, f) of the 2d random process.
Note that the autospectral density is one-sided in frequency (f)
and two-sided in spatial frequency (xi).
[Sxx] = [self.x]^2 / [self.Fs] / [self.Fs_spatial], where
`self.x` is the realization of the random process
created at object initialization.
'''
# Construct the spectral grid.
#
# Typically, we present present the autospectral density Sxx(xi, f)
# as one-sided in frequency (f) & two-sided in spatial frequency (xi),
# so we will follow that convention here.
f = np.fft.rfftfreq(self.Nt, d=(1. / self.Fs))
xi = np.fft.fftshift(np.fft.fftfreq(self.Nz, d=(1. / self.Fs_spatial)))
ff, xixi = np.meshgrid(f, xi)
# Initialize autospectral density with zeros
Sxx = np.zeros(ff.shape)
# Iteratively incorporate autospectral density of each branch
for branch_ind in np.arange(len(self.xi0)):
# Parse turbulence parameters of branch
xi0 = self.xi0[branch_ind]
Lz = self.Lz[branch_ind]
f0 = self.f0[branch_ind]
tau = self.tau[branch_ind]
v = self.v[branch_ind]
S0 = self.S0[branch_ind]
# Shape auto-spectral density, Sxx.
xi_shaping = np.exp(-((np.pi * Lz * (xixi - xi0)) ** 2))
df = v * xixi
f_shaping = np.exp(-((np.pi * tau * (ff - f0 - df)) ** 2))
Sxx += (S0 * xi_shaping * f_shaping)
# Define peak autospectral density of turbulence to be unity
Sxx /= np.max(Sxx)
# Finally, incorporate noise floor
Sxx += self._noise_floor
return xi, f, Sxx
def _getSignal(self, seed=None):
'''Get a space-time realization of the 2d random process.
Input parameters:
-----------------
seed - int or None
Random seed used to initialize pseudo-random number generator.
If `None`, generator is seeded from `/dev/urandom` or the clock.
Returns:
--------
x - array_like, (`self.Nz`, `self.Nt`)
A realization of the 2d random process in space and time.
For a given random process, the space-time representation
will vary from one realization to the next, but the underlying
autospectral density of each realization will be identical.
[x] = arbitrary units
'''
# Compute *magnitude* of FFT corresponding to autospectral density.
#
# The frequency normalization includes an additional factor of 2
# to account for the one-sided in frequency representation of
# the autospectral density.
f_norm = 2. / (self.Nt * self.Fs)
xi_norm = 1. / (self.Nz * self.Fs_spatial)
Xmag = np.sqrt(self._Sxx / f_norm / xi_norm)
# To obtain a realization of the random process, we now need
# to add a random phase to each point of the FFT.
#
# Note that if the random signal is real-valued, as is desired,
# then the FFT must have Hermitian symmetry, i.e.
#
# X(-xi, -f) = [X(xi, f)]*,
#
# where * indicates the complex conjugate.
#
# Perhaps the easiest way to satisfy the above Hermitian-symmetry
# constraint is to steal the phase from a dummy random signal `y`
# with the desired dimensions, as is done below. Note that care
# must be exercised in application of one-sided vs. two-sided FFTs,
# as they do *not* commute; specifically, the forward one-sided FFT
# will "silently discard" any imaginary component of the input signal.
# Thus, in computation of the forward FFT, the one-sided FFT (in time)
# must be applied first, and then the forward two-sided FFT (in space)
# can be applied. When computing the inverse FFTs, the opposite
# ordering must be used.
if seed is not None:
np.random.seed(seed)
y = np.random.randn(self.Nz, self.Nt)
Y = np.fft.fft(np.fft.rfft(y, axis=1), axis=0)
ph = np.angle(Y)
# Shift along spatial axis, as the autospectral density's convention
# is two-sided spatial frequencies in ascending order.
ph = np.fft.fftshift(ph, axes=0)
# Construct the complex-valued FFT of the realization by
# multiplying the FFT magnitude by the set of random phases.
X = Xmag * np.exp(1j * ph)
# Inverse the shift along the spatial axis to bring the FFT
# into the conventional FFT ordering.
X = np.fft.ifftshift(X, axes=0)
# As discussed when computing the phase above, the two-sided and
# one-sided FFTs do *not* commute. Thus, to compute the space-time
# realization of the random process, we need to first compute the
# two-sided inverse FFT in space and then compute the one-sided
# inverse FFT in time.
return np.fft.irfft(np.fft.ifft(X, axis=0), axis=1)
def t(self):
'Get times for points in `self.x`.'
return _uniform_grid(self.Nt, self.t0, 1. / self.Fs)
def z(self):
'Get spatial coordinates for points in `self.x`.'
return _uniform_grid(self.Nz, self.z0, 1. / self.Fs_spatial)
def plotSpectralDensity(
self, xilim=None, flim=None, vlim=None,
cmap='viridis', interpolation='none',
fontsize=16,
title=None, xlabel=r'$\xi$', ylabel=r'$f$',
cblabel=r'$|S_{xx}(\xi, f)|$', cborientation='horizontal',
ax=None, fig=None, geometry=111):
'Plot magnitude of autospectral density on log scale.'
ax = _plot_image(
self._xi, self._f, np.abs(self._Sxx).T,
xlim=xilim, ylim=flim, vlim=vlim,
norm='log', cmap=cmap, interpolation=interpolation,
title=title, xlabel=xlabel, ylabel=ylabel,
cblabel=cblabel, cborientation=cborientation,
fontsize=fontsize,
ax=ax, fig=fig, geometry=geometry)
return ax
def plotSignal(self, cmap='RdBu', interpolation='none', ax=None):
'Plot image of signal as a function of space and time.'
ax = _plot_image(
self.z(), self.t(), self.x.T,
norm=None, cmap=cmap, interpolation=interpolation,
title='', xlabel=r'$z$', ylabel=r'$t$',
cblabel=r'$x(z, t)$', cborientation='vertical',
ax=ax)
return ax
def _uniform_grid(Npts, x0, dx):
'Get uniform grid of `Npts` starting at `x0` and spaced by `dx`.'
return x0 + (np.arange(Npts) * dx)
|
emd/random_data
|
random_data/signals/random_signal.py
|
Python
|
gpl-2.0
| 21,850
|
[
"Gaussian"
] |
dd0e9ca477fcf640a4064ab9e8f47538ce2a70a6d14cea011671c6d29683559a
|
from django.db import models
import cms
from cms.models.pluginmodel import CMSPlugin
from django.utils.translation import ugettext_lazy as _
from filer.fields.image import FilerImageField
class MBDAboutTeamBar(CMSPlugin):
"""
Plugin to describe the information stored for a Team Bar in the Template
"""
sections = models.PositiveSmallIntegerField(default=4, null=False, blank=False,
verbose_name=_("Number of sections the bar will be divided"),
help_text=_(
"Describe the number of sections the bar will be divided showing arrows "
"up and down for each section"))
def get_range(self):
return range(self.sections)
def get_width(self):
return (80 / self.sections)
class MBDBoardMemberCard(CMSPlugin):
"""
Plugin to show a board member presentation card. This plugin is mainly used in the
about page
"""
name = models.CharField(null=False, blank=False, help_text=_("Main name to display on the card"),
verbose_name=_("Name"), max_length=60)
title = models.CharField(null=False, blank=False, help_text=_("Title of the board member"), verbose_name=_("Title"),
max_length=60)
bio = models.TextField(null=False, blank=False, help_text=_("Brief biography of the member"), verbose_name=_("Bio"))
picture = FilerImageField(null=True, blank=True, default=None, verbose_name=_("image"), on_delete=models.SET_NULL,
help_text=_("Member Picture"))
point_up = models.BooleanField(null=False, blank=False, default=False, verbose_name=_("Point Card Up"),
help_text=_("Display the indicator of the card pointing up. "
"Designed for cards that will show below the team line"))
offset = models.PositiveSmallIntegerField(null=True, blank=True, verbose_name=_("Bootstrap Column Offset"),
help_text=_("The offset in bootstrap column the card should appear at."
"See column offset in bootstrap"))
class MBDAboutBoardMemberSocialIcons(CMSPlugin):
"""
Plugin model to specify the social icons that will be displayed in a MBDBoardMemberCardPlugin plugin
"""
facebook = models.URLField(null=True, blank=True, help_text=_("Url to the facebook profile page"),
verbose_name=_("Facebook Url"))
twitter = models.URLField(null=True, blank=True, help_text=_("Url to the twitter profile page"),
verbose_name=_("Twitter Url"))
google_plus = models.URLField(null=True, blank=True, help_text=_("Url to the google plus profile page"),
verbose_name=_("Google+ Url"))
instagram = models.URLField(null=True, blank=True, help_text=_("Url to the instagram profile page"),
verbose_name=_("Instagram Url"))
youtube = models.URLField(null=True, blank=True, help_text=_("Url to the yourube channel page"),
verbose_name=_("Youtube Url"))
pinterest = models.URLField(null=True, blank=True, help_text=_("Url to the pinterest profile page"),
verbose_name=_("Pinterest Url"))
class MBDDancerBadge(CMSPlugin):
"""
Plugin model to store the configuration of the badge display
"""
picture = FilerImageField(null=True, blank=True, default=None, verbose_name=_("picture"), on_delete=models.SET_NULL,
help_text=_("Dancer Picture"))
name = models.CharField(null=False, blank=False, help_text=_("Main name to display on the badge"),
verbose_name=_("Name"), max_length=60)
alt_text = models.CharField(null=False, blank=False, help_text=_("Text to display after the name"),
verbose_name=_("Alternate Text"), max_length=60)
class MBDancerPicture(CMSPlugin):
"""
Plugin model to store the configuration for the dancer picture plugin
"""
picture = FilerImageField(null=True, blank=True, default=None, verbose_name=_("picture"), on_delete=models.SET_NULL,
help_text=_("Dancer Picture"))
name = models.CharField(null=False, blank=False, help_text=_("Main name to display on the badge"),
verbose_name=_("Name"), max_length=60)
small_bio = models.CharField(null=False, blank=False, help_text=_("Small text that will appear when hover"),
verbose_name=_("Small Bio"), max_length=60)
link_page = cms.models.fields.PageField(verbose_name=_("Link"), blank=False, null=True,
help_text=_("Url to visit when read more is selected"))
class MBDTwoPicCarousel(CMSPlugin):
"""
Plugin in model to store information for the MBDTwoPicCarouselPlugin
"""
slide1_background = FilerImageField(null=True, blank=True, default=None, verbose_name=_("Slide 1 Main Picture"),
on_delete=models.SET_NULL, related_name="slide1_background",
help_text=_("Picture for slide 1 background"))
slide1_foreground = FilerImageField(null=True, blank=True, default=None,
verbose_name=_("Slide 1 Foreground Picture"),
on_delete=models.SET_NULL, related_name="slide1_foreground",
help_text=_("Picture for slide 1 foreground"))
slide1_header = models.CharField(null=True, blank=True, help_text=_("Header text that will appear in slide 1"),
verbose_name=_("Slide 1 Header"), max_length=60)
slide1_caption = models.CharField(null=True, blank=True,
help_text=_("Caption text that will appear in slide 1 below header"),
verbose_name=_("Slide 1 Header"), max_length=120)
slide1_button_text = models.CharField(null=True, blank=True,
help_text=_(
"Text to show in a button below the caption of the slide if wanted"),
verbose_name=_("Slide 1 Button Text"), max_length=25)
slide1_button_link = cms.models.fields.PageField(null=True, blank=True, related_name="slide1_button_link",
help_text=_("Page to visit once the button is clicked"),
verbose_name=_("Slide 1 Button Link"))
slide2_background = FilerImageField(null=True, blank=True, default=None, verbose_name=_("Slide 2 Main Picture"),
on_delete=models.SET_NULL, related_name="slide2_background",
help_text=_("Picture for slide 2 background"))
slide2_foreground = FilerImageField(null=True, blank=True, default=None,
verbose_name=_("Slide 2 Foreground Picture"),
on_delete=models.SET_NULL, related_name="slide2_foreground",
help_text=_("Picture for slide 2 foreground"))
slide2_header = models.CharField(null=True, blank=True, help_text=_("Header text that will appear in slide 2"),
verbose_name=_("Slide 2 Header"), max_length=60)
slide2_caption = models.CharField(null=True, blank=True,
help_text=_("Caption text that will appear in slide 2 below header"),
verbose_name=_("Slide 2 Header"), max_length=120)
slide2_button_text = models.CharField(null=True, blank=True,
help_text=_(
"Text to show in a button below the caption of the slide if wanted"),
verbose_name=_("Slide 2 Button Text"), max_length=25)
slide2_button_link = cms.models.fields.PageField(null=True, blank=True, related_name="slide2_button_link",
help_text=_("Page to visit once the button is clicked"),
verbose_name=_("Slide 2 Button Link"))
slide3_background = FilerImageField(null=True, blank=True, default=None, verbose_name=_("Slide 3 Main Picture"),
on_delete=models.SET_NULL, related_name="slide3_background",
help_text=_("Picture for slide 3 background"))
slide3_foreground = FilerImageField(null=True, blank=True, default=None,
verbose_name=_("Slide 3 Foreground Picture"),
on_delete=models.SET_NULL, related_name="slide3_foreground",
help_text=_("Picture for slide 3 foreground"))
slide3_header = models.CharField(null=True, blank=True, help_text=_("Header text that will appear in slide 3"),
verbose_name=_("Slide 3 Header"), max_length=60)
slide3_caption = models.CharField(null=True, blank=True,
help_text=_("Caption text that will appear in slide 3 below header"),
verbose_name=_("Slide 3 Header"), max_length=120)
slide3_button_text = models.CharField(null=True, blank=True,
help_text=_(
"Text to show in a button below the caption of the slide if wanted"),
verbose_name=_("Slide 1 Button Text"), max_length=25)
slide3_button_link = cms.models.fields.PageField(null=True, blank=True, related_name="slide3_button_link",
help_text=_("Page to visit once the button is clicked"),
verbose_name=_("Slide 3 Button Link"))
slide4_background = FilerImageField(null=True, blank=True, default=None, verbose_name=_("Slide 4 Main Picture"),
on_delete=models.SET_NULL, related_name="slide4_background",
help_text=_("Picture for slide 4 background"))
slide4_foreground = FilerImageField(null=True, default=None, blank=True,
verbose_name=_("Slide 4 Foreground Picture"),
on_delete=models.SET_NULL, related_name="slide4_foreground",
help_text=_("Picture for slide 4 foreground"))
slide4_header = models.CharField(null=True, blank=True, help_text=_("Header text that will appear in slide 4"),
verbose_name=_("Slide 4 Header"), max_length=60)
slide4_caption = models.CharField(null=True, blank=True,
help_text=_("Caption text that will appear in slide 4 below header"),
verbose_name=_("Slide 4 Header"), max_length=120)
slide4_button_text = models.CharField(null=True, blank=True,
help_text=_(
"Text to show in a button below the caption of the slide if wanted"),
verbose_name=_("Slide 4 Button Text"), max_length=25)
slide4_button_link = cms.models.fields.PageField(null=True, blank=True, related_name="slide4_button_link",
help_text=_("Page to visit once the button is clicked"),
verbose_name=_("Slide 4 Button Link"))
|
ti3r/mbd-cms-template
|
mbd_cms_template/models.py
|
Python
|
mit
| 12,188
|
[
"VisIt"
] |
320bc342c3849dff663e05bab1a5b548f624ab3eb78ffd993982cd64b966b9eb
|
import subprocess, time, requests, json
name = "Linus Torvalds" # The name for the character, by default is ya pal Chuck
restartSeconds = 30 # Seconds between each joke
restartIfError = 120 # Seconds before a restart after an error
color = '\e[92m' # For green, to see a list of colors visit http://misc.flogisoft.com/bash/tip_colors_and_formatting
def main():
while True:
try:
fullUrl = "http://api.icndb.com/jokes/random?firstName=" + str(name.split()[0]) + "&lastName=" + str(name.split()[1])
dankSentence = requests.get(fullUrl)
dankestSentence = json.loads(dankSentence.text).get('value').get('joke')
fixedQuotes = dankestSentence.replace(""", "'")
subprocess.call(['clear'])
subprocess.call(['echo', '-e', color])
subprocess.call(['cowsay', fixedQuotes])
time.sleep(int(restartSeconds))
except KeyboardInterrupt:
print('\n ^__^\n Cya! > (oo)\n (__)')
break
except:
time.sleep(int(restartIfError))
main()
if __name__ == '__main__':
main()
|
Capuno/Cowsay-with-chuck-api
|
autoCow.py
|
Python
|
gpl-3.0
| 1,025
|
[
"VisIt"
] |
3150c4aba192a8d13a3ee1db64ffbe0f33d235d3594e32ea41b4884ecdb6b804
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'item.quantidade_excedente'
db.alter_column(u'salesReport_item', 'quantidade_excedente', self.gf('django.db.models.fields.IntegerField')(null=True))
def backwards(self, orm):
# Changing field 'item.quantidade_excedente'
db.alter_column(u'salesReport_item', 'quantidade_excedente', self.gf('django.db.models.fields.FloatField')(null=True))
models = {
u'salesReport.brands': {
'Meta': {'object_name': 'brands'},
'meta_dias_estoque': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'primary_key': 'True'})
},
u'salesReport.csvreport': {
'Meta': {'object_name': 'csvReport'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'csvFile': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'salesReport.item': {
'Meta': {'object_name': 'item'},
'brand': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['salesReport.brands']", 'null': 'True', 'blank': 'True'}),
'cmm': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'cost': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'estoque_atual': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'estoque_disponivel': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'estoque_empenhado': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'margem': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'price': ('django.db.models.fields.FloatField', [], {}),
'product_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'quantidade_excedente': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'quantidade_faltante': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'sku': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'specialPrice': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'vmd': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'weight': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'})
},
u'salesReport.order': {
'Meta': {'object_name': 'order'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'custoProdutos': ('django.db.models.fields.FloatField', [], {}),
'customer_email': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'customer_firstname': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'customer_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'customer_lastname': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'discount_amount': ('django.db.models.fields.FloatField', [], {}),
'grand_total': ('django.db.models.fields.FloatField', [], {}),
'increment_id': ('django.db.models.fields.BigIntegerField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'item': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['salesReport.item']", 'through': u"orm['salesReport.orderItem']", 'symmetrical': 'False'}),
'margemBrutaCartaoFrete': ('django.db.models.fields.FloatField', [], {}),
'margemBrutaSoProdutos': ('django.db.models.fields.FloatField', [], {}),
'order_id': ('django.db.models.fields.FloatField', [], {}),
'payment_amount_ordered': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'payment_method': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'payment_shipping_amount': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'receitaFrete': ('django.db.models.fields.FloatField', [], {}),
'shipping_address_postcode': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'shipping_address_region': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'shipping_address_street': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'shipping_amount': ('django.db.models.fields.FloatField', [], {}),
'shipping_amount_centralfit': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'somatoriaProdutos': ('django.db.models.fields.FloatField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'subtotal': ('django.db.models.fields.FloatField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'valorBonificado': ('django.db.models.fields.FloatField', [], {}),
'valorBonificadoPedido': ('django.db.models.fields.FloatField', [], {}),
'valorBrutoFaturado': ('django.db.models.fields.FloatField', [], {}),
'valorDesconto': ('django.db.models.fields.FloatField', [], {}),
'valorFrete': ('django.db.models.fields.FloatField', [], {}),
'valorLiquidoProdutos': ('django.db.models.fields.FloatField', [], {}),
'valorTaxaCartao': ('django.db.models.fields.FloatField', [], {}),
'weight': ('django.db.models.fields.FloatField', [], {})
},
u'salesReport.orderitem': {
'Meta': {'object_name': 'orderItem'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_child': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['salesReport.item']"}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['salesReport.order']"}),
'price': ('django.db.models.fields.FloatField', [], {}),
'productType': ('django.db.models.fields.CharField', [], {'max_length': '155'}),
'quantidade': ('django.db.models.fields.FloatField', [], {}),
'removido_estoque': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'})
},
u'salesReport.status_history': {
'Meta': {'object_name': 'status_history'},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'entity_name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['salesReport.order']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '250'})
}
}
complete_apps = ['salesReport']
|
akiokio/centralfitestoque
|
src/salesReport/migrations/0035_auto__chg_field_item_quantidade_excedente.py
|
Python
|
bsd-2-clause
| 8,389
|
[
"VMD"
] |
000893e5d38a2520e5ac5c360e15ece5d6822f83d6a51c2d302bed332331c929
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import ctypes
import numpy
from pyscf import lib
from pyscf import ao2mo
from pyscf.fci import direct_spin1
from pyscf.fci import direct_spin1_symm
from pyscf.fci import selected_ci
from pyscf.fci import selected_ci_symm
from pyscf.fci import selected_ci_spin0
libfci = lib.load_library('libfci')
def contract_2e(eri, civec_strs, norb, nelec, link_index=None, orbsym=None):
ci_coeff, nelec, ci_strs = selected_ci._unpack(civec_strs, nelec)
if link_index is None:
link_index = selected_ci._all_linkstr_index(ci_strs, norb, nelec)
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
na, nlinka = nb, nlinkb = cd_indexa.shape[:2]
eri = ao2mo.restore(1, eri, norb)
eri1 = eri.transpose(0,2,1,3) - eri.transpose(0,2,3,1)
idx,idy = numpy.tril_indices(norb, -1)
idx = idx * norb + idy
eri1 = lib.take_2d(eri1.reshape(norb**2,-1), idx, idx) * 2
lib.transpose_sum(eri1, inplace=True)
eri1 *= .5
eri1, dd_indexa, dimirrep = selected_ci_symm.reorder4irrep(eri1, norb, dd_indexa, orbsym, -1)
fcivec = ci_coeff.reshape(na,nb)
ci1 = numpy.zeros_like(fcivec)
# (aa|aa)
if nelec[0] > 1:
ma, mlinka = mb, mlinkb = dd_indexa.shape[:2]
libfci.SCIcontract_2e_aaaa_symm(eri1.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(ma), ctypes.c_int(mlinka),
dd_indexa.ctypes.data_as(ctypes.c_void_p),
dimirrep.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(len(dimirrep)))
h_ps = numpy.einsum('pqqs->ps', eri) * (.5/nelec[0])
eri1 = eri.copy()
for k in range(norb):
eri1[:,:,k,k] += h_ps
eri1[k,k,:,:] += h_ps
eri1 = ao2mo.restore(4, eri1, norb)
lib.transpose_sum(eri1, inplace=True)
eri1 *= .5
eri1, cd_indexa, dimirrep = selected_ci_symm.reorder4irrep(eri1, norb, cd_indexa, orbsym)
# (bb|aa)
libfci.SCIcontract_2e_bbaa_symm(eri1.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nlinka), ctypes.c_int(nlinkb),
cd_indexa.ctypes.data_as(ctypes.c_void_p),
cd_indexa.ctypes.data_as(ctypes.c_void_p),
dimirrep.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(len(dimirrep)))
lib.transpose_sum(ci1, inplace=True)
return selected_ci._as_SCIvector(ci1.reshape(ci_coeff.shape), ci_strs)
def kernel(h1e, eri, norb, nelec, ci0=None, level_shift=1e-3, tol=1e-10,
lindep=1e-14, max_cycle=50, max_space=12, nroots=1,
davidson_only=False, pspace_size=400, orbsym=None, wfnsym=None,
select_cutoff=1e-3, ci_coeff_cutoff=1e-3, ecore=0, **kwargs):
return direct_spin1._kfactory(SelectedCI, h1e, eri, norb, nelec, ci0,
level_shift, tol, lindep, max_cycle,
max_space, nroots, davidson_only,
pspace_size, select_cutoff=select_cutoff,
ci_coeff_cutoff=ci_coeff_cutoff, ecore=ecore,
**kwargs)
make_rdm1s = selected_ci.make_rdm1s
make_rdm2s = selected_ci.make_rdm2s
make_rdm1 = selected_ci.make_rdm1
make_rdm2 = selected_ci.make_rdm2
trans_rdm1s = selected_ci.trans_rdm1s
trans_rdm1 = selected_ci.trans_rdm1
class SelectedCI(selected_ci_symm.SelectedCI):
def contract_2e(self, eri, civec_strs, norb, nelec, link_index=None,
orbsym=None, **kwargs):
if orbsym is None:
orbsym = self.orbsym
if getattr(civec_strs, '_strs', None) is not None:
self._strs = civec_strs._strs
else:
assert(civec_strs.size == len(self._strs[0])*len(self._strs[1]))
civec_strs = selected_ci._as_SCIvector(civec_strs, self._strs)
return contract_2e(eri, civec_strs, norb, nelec, link_index, orbsym)
def make_hdiag(self, h1e, eri, ci_strs, norb, nelec):
return selected_ci_spin0.make_hdiag(h1e, eri, ci_strs, norb, nelec)
enlarge_space = selected_ci_spin0.enlarge_space
SCI = SelectedCI
if __name__ == '__main__':
from functools import reduce
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf import symm
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = 'sto-3g'
mol.symmetry = 1
mol.build()
m = scf.RHF(mol).run()
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron - 2
h1e = reduce(numpy.dot, (m.mo_coeff.T, scf.hf.get_hcore(mol), m.mo_coeff))
eri = ao2mo.incore.full(m._eri, m.mo_coeff)
orbsym = symm.label_orb_symm(mol, mol.irrep_id, mol.symm_orb, m.mo_coeff)
myci = SelectedCI().set(orbsym=orbsym)
e1, c1 = myci.kernel(h1e, eri, norb, nelec)
myci = direct_spin1_symm.FCISolver().set(orbsym=orbsym)
e2, c2 = myci.kernel(h1e, eri, norb, nelec)
print(e1 - e2)
|
gkc1000/pyscf
|
pyscf/fci/selected_ci_spin0_symm.py
|
Python
|
apache-2.0
| 6,419
|
[
"PySCF"
] |
e44694dcee149a3f4beaee73ba1d9d4655d20a817a1c294d158835f5229e0f32
|
"""
Tests which scan for certain occurrences in the code, they may not find
all of these occurrences but should catch almost all. This file was adapted
from NumPy.
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import scipy
import pytest
if sys.version_info >= (3, 4):
from pathlib import Path
import ast
import tokenize
class ParseCall(ast.NodeVisitor):
def __init__(self):
self.ls = []
def visit_Attribute(self, node):
ast.NodeVisitor.generic_visit(self, node)
self.ls.append(node.attr)
def visit_Name(self, node):
self.ls.append(node.id)
class FindFuncs(ast.NodeVisitor):
def __init__(self, filename):
super().__init__()
self.__filename = filename
self.bad_filters = []
self.bad_stacklevels = []
def visit_Call(self, node):
p = ParseCall()
p.visit(node.func)
ast.NodeVisitor.generic_visit(self, node)
if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings':
if node.args[0].s == "ignore":
self.bad_filters.append(
"{}:{}".format(self.__filename, node.lineno))
if p.ls[-1] == 'warn' and (
len(p.ls) == 1 or p.ls[-2] == 'warnings'):
if self.__filename == "_lib/tests/test_warnings.py":
# This file
return
# See if stacklevel exists:
if len(node.args) == 3:
return
args = {kw.arg for kw in node.keywords}
if "stacklevel" not in args:
self.bad_stacklevels.append(
"{}:{}".format(self.__filename, node.lineno))
@pytest.fixture(scope="session")
def warning_calls():
# combined "ignore" and stacklevel error
base = Path(scipy.__file__).parent
bad_filters = []
bad_stacklevels = []
for path in base.rglob("*.py"):
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g., LANG='C')
with tokenize.open(str(path)) as file:
tree = ast.parse(file.read(), filename=str(path))
finder = FindFuncs(path.relative_to(base))
finder.visit(tree)
bad_filters.extend(finder.bad_filters)
bad_stacklevels.extend(finder.bad_stacklevels)
return bad_filters, bad_stacklevels
@pytest.mark.slow
@pytest.mark.skipif(sys.version_info < (3, 4), reason="needs Python >= 3.4")
def test_warning_calls_filters(warning_calls):
bad_filters, bad_stacklevels = warning_calls
# There is still one simplefilter occurrence in optimize.py that could be removed.
bad_filters = [item for item in bad_filters
if 'optimize.py' not in item]
# The filterwarnings calls in sparse are needed.
bad_filters = [item for item in bad_filters
if os.path.join('sparse', '__init__.py') not in item
and os.path.join('sparse', 'sputils.py') not in item]
if bad_filters:
raise AssertionError(
"warning ignore filter should not be used, instead, use\n"
"scipy._lib._numpy_compat.suppress_warnings (in tests only);\n"
"found in:\n {}".format(
"\n ".join(bad_filters)))
@pytest.mark.slow
@pytest.mark.skipif(sys.version_info < (3, 4), reason="needs Python >= 3.4")
@pytest.mark.xfail(reason="stacklevels currently missing")
def test_warning_calls_stacklevels(warning_calls):
bad_filters, bad_stacklevels = warning_calls
msg = ""
if bad_filters:
msg += ("warning ignore filter should not be used, instead, use\n"
"scipy._lib._numpy_compat.suppress_warnings (in tests only);\n"
"found in:\n {}".format("\n ".join(bad_filters)))
msg += "\n\n"
if bad_stacklevels:
msg += "warnings should have an appropriate stacklevel:\n {}".format(
"\n ".join(bad_stacklevels))
if msg:
raise AssertionError(msg)
|
jamestwebber/scipy
|
scipy/_lib/tests/test_warnings.py
|
Python
|
bsd-3-clause
| 4,193
|
[
"VisIt"
] |
d48f9bdaf638b8b6da0ebd64351169f07e0daf63e83b30096b3acdeafe8fbdec
|
#!/usr/bin/env python
'''
Submit multi-thread upload/download jobs in client
'''
import threading
import time,os
def addFile(m):
timeStart = time.time()
n = m + 0
#print 'Adding file100m-%04d' %n
cmd = 'dirac-dms-add-file /cepc/stormtest/100M-files-10/file100m-%04d random100M IHEP-STORM' %n
os.system(cmd)
timeEnd = time.time()
print 'Finished add file100m-%04d with Speed %.3f M/s' %(n, 100/(timeEnd-timeStart))
def getFile(m):
timeStart = time.time()
n = m + 0
print 'Downloading file100m-%04d' %n
cmd = "lcg-cp -b -D srmv2 --connect-timeout 3600 --sendreceive-timeout 3600 -n 4\
srm://storm.ihep.ac.cn:8444/srm/managerv2?SFN=/cepc/stormtest/100M-files-10/file100m-%04d file:///dev/null" %n
os.system(cmd)
timeEnd = time.time()
print 'Finished download file100m-%04d with Speed %.3f M/s' %(n, 100/(timeEnd-timeStart))
class MyThread(threading.Thread):
def __init__(self, func, args, name=''):
threading.Thread.__init__(self)
self.name = name
self.func = func
self.args = args
self.result = 0
def run(self):
self.result = apply(self.func, self.args)
def getResult(self):
return self.result
def main():
worker = sys.argv[1] + 'File' # addFile or getFile
n = sys.argv[2] # number of workers
threads = []
for i in range(n):
t = MyThread(worker, (i,))
t.setDaemon(True)
threads.append(t)
timeS = time.time()
for i in range(n):
threads[i].start()
for i in range(n):
threads[i].join()
timeE = time.time()
print 'Total time %.2f, average Speed: %.3f M/s' %(timeE-timeS, n*100/(timeE-timeS))
if __name__ == '__main__':
main()
|
yan-tian/stormutils
|
stormutils/stressTesting/utils/client_submit.py
|
Python
|
gpl-2.0
| 1,742
|
[
"DIRAC"
] |
5233b576ec0a12e9382f45915ebffa134ac4901d3390018de79d8580468677ac
|
#!/usr/bin/python
'''
Create Video Statistics
'''
import os, sys
import csv
import re
import json
import gsutil
import bqutil
import datetime
import process_tracking_logs
from path import path
from collections import OrderedDict
from collections import defaultdict
from check_schema_tracking_log import schema2dict, check_schema
from load_course_sql import find_course_sql_dir, openfile
from unidecode import unidecode
import re
from time import sleep
import urllib2
import json
import os
import datetime
import gzip
#-----------------------------------------------------------------------------
# CONSTANTS
#-----------------------------------------------------------------------------
VIDEO_LENGTH = 'video_length'
VIDEO_ID = 'youtube_id'
YOUTUBE_PARTS = "contentDetails,statistics"
MIN_IN_SECS = 60
HOURS_IN_SECS = MIN_IN_SECS * 60
DAYS_IN_SECS = HOURS_IN_SECS * 24
WEEKS_IN_SECS = DAYS_IN_SECS * 7
MONTHS_IN_SECS = WEEKS_IN_SECS * 4
YEAR_IN_SECS = MONTHS_IN_SECS * 12
TABLE_VIDEO_STATS = 'video_stats'
TABLE_VIDEO_STATS_PER_DAY = 'video_stats_day'
TABLE_VIDEO_AXIS = 'video_axis'
TABLE_COURSE_AXIS = 'course_axis'
FILENAME_VIDEO_AXIS = TABLE_VIDEO_AXIS + ".json.gz"
SCHEMA_VIDEO_AXIS = 'schemas/schema_video_axis.json'
SCHEMA_VIDEO_AXIS_NAME = 'video_axis'
DATE_DEFAULT_START = '20120101'
DATE_DEFAULT_END = datetime.datetime.today().strftime("%Y%m%d")
DATE_DEFAULT_END_NEW = datetime.datetime.today().strftime("%Y-%m-%d")
#-----------------------------------------------------------------------------
# METHODS
#-----------------------------------------------------------------------------
def analyze_videos(course_id, api_key=None, basedir=None,
datedir=None, force_recompute=False,
use_dataset_latest=False):
make_video_stats(course_id, api_key, basedir, datedir, force_recompute, use_dataset_latest)
pass # Add new video stat methods here
def make_video_stats(course_id, api_key, basedir, datedir, force_recompute, use_dataset_latest):
'''
Create Video stats for Videos Viewed and Videos Watched.
First create a video axis, based on course axis. Then use tracking logs to count up videos viewed and videos watched
'''
assert api_key is not None, "[analyze videos]: Public API Key is missing from configuration file. Visit https://developers.google.com/console/help/new/#generatingdevkeys for details on how to generate public key, and then add to edx2bigquery_config.py as API_KEY variable"
# Get Course Dir path
basedir = path(basedir or '')
course_dir = course_id.replace('/','__')
lfp = find_course_sql_dir(course_id, basedir, datedir, use_dataset_latest)
# get schema
mypath = os.path.dirname(os.path.realpath(__file__))
SCHEMA_FILE = '%s/%s' % ( mypath, SCHEMA_VIDEO_AXIS )
the_schema = json.loads(open(SCHEMA_FILE).read())[ SCHEMA_VIDEO_AXIS_NAME ]
the_dict_schema = schema2dict(the_schema)
# Create initial video axis
videoAxisExists = False
dataset = bqutil.course_id2dataset(course_id, use_dataset_latest=use_dataset_latest)
va_date = None
try:
tinfo = bqutil.get_bq_table_info(dataset, TABLE_VIDEO_AXIS )
assert tinfo is not None, "[analyze videos] %s.%s does not exist. First time creating table" % ( dataset, TABLE_VIDEO_AXIS )
videoAxisExists = True
va_date = tinfo['lastModifiedTime'] # datetime
except (AssertionError, Exception) as err:
print "%s --> Attempting to process %s table" % ( str(err), TABLE_VIDEO_AXIS )
sys.stdout.flush()
# get course axis time
ca_date = None
try:
tinfo = bqutil.get_bq_table_info(dataset, TABLE_COURSE_AXIS )
ca_date = tinfo['lastModifiedTime'] # datetime
except (AssertionError, Exception) as err:
pass
if videoAxisExists and (not force_recompute) and ca_date and va_date and (ca_date > va_date):
force_recompute = True
print "video_axis exists, but has date %s, older than course_axis date %s; forcing recompute" % (va_date, ca_date)
sys.stdout.flush()
if not videoAxisExists or force_recompute:
force_recompute = True
createVideoAxis(course_id=course_id, force_recompute=force_recompute, use_dataset_latest=use_dataset_latest)
# Get video lengths
va = bqutil.get_table_data(dataset, TABLE_VIDEO_AXIS)
assert va is not None, "[analyze videos] Possibly no data in video axis table. Check course axis table"
va_bqdata = va['data']
fileoutput = lfp / FILENAME_VIDEO_AXIS
getYoutubeDurations( dataset=dataset, bq_table_input=va_bqdata, api_key=api_key, outputfilename=fileoutput, schema=the_dict_schema, force_recompute=force_recompute )
# upload and import video axis
gsfn = gsutil.gs_path_from_course_id(course_id, use_dataset_latest=use_dataset_latest) / FILENAME_VIDEO_AXIS
gsutil.upload_file_to_gs(fileoutput, gsfn)
table = TABLE_VIDEO_AXIS
bqutil.load_data_to_table(dataset, table, gsfn, the_schema, wait=True)
else:
print "[analyze videos] %s.%s already exists (and force recompute not specified). Skipping step to generate %s using latest course axis" % ( dataset, TABLE_VIDEO_AXIS, TABLE_VIDEO_AXIS )
# Lastly, create video stats
createVideoStats_day( course_id, force_recompute=force_recompute, use_dataset_latest=use_dataset_latest )
createVideoStats( course_id, force_recompute=force_recompute, use_dataset_latest=use_dataset_latest )
#-----------------------------------------------------------------------------
def createVideoAxis(course_id, force_recompute=False, use_dataset_latest=False):
'''
Video axis depends on the current course axis, and looks for the category field defines as video.
In addition, the edx video id is extracted (with the full path stripped, in order to generalize tracking log searches for video ids where it
was found that some courses contained the full path beginning with i4x, while other courses only had the edx video id), youtube id
and the chapter name / index for that respective video
'''
dataset = bqutil.course_id2dataset(course_id, use_dataset_latest=use_dataset_latest)
table = TABLE_VIDEO_AXIS
# Get Video results
the_sql = """
SELECT chapters.index as index_chapter,
videos.index as index_video,
videos.category as category,
videos.course_id as course_id,
videos.name as name,
videos.vid_id as video_id,
videos.yt_id as youtube_id,
chapters.name as chapter_name
FROM ( SELECT index, category, course_id, name, chapter_mid,
#REGEXP_REPLACE(module_id, '[.]', '_') as vid_id, # vid id containing full path
REGEXP_EXTRACT(REGEXP_REPLACE(module_id, '[.]', '_'), r'(?:.*\/)(.*)') as vid_id, # Only containing video id
REGEXP_EXTRACT(data.ytid, r'\:(.*)') as yt_id,
FROM [{dataset}.course_axis]
WHERE category = "video") as videos
LEFT JOIN
( SELECT name, module_id, index
FROM [{dataset}.course_axis]
) as chapters
ON videos.chapter_mid = chapters.module_id
ORDER BY videos.index asc
""".format(dataset=dataset)
print "[analyze_videos] Creating %s.%s table for %s" % (dataset, TABLE_VIDEO_AXIS, course_id)
sys.stdout.flush()
try:
tinfo = bqutil.get_bq_table_info(dataset, TABLE_COURSE_AXIS )
assert tinfo is not None, "[analyze videos] %s table depends on %s, which does not exist" % ( TABLE_VIDEO_AXIS, TABLE_COURSE_AXIS )
except (AssertionError, Exception) as err:
print " --> Err: missing %s.%s? Skipping creation of %s" % ( dataset, TABLE_COURSE_AXIS, TABLE_VIDEO_AXIS )
sys.stdout.flush()
return
bqdat = bqutil.get_bq_table(dataset, table, the_sql, force_query=force_recompute,
depends_on=["%s.course_axis" % (dataset)],
)
return bqdat
#-----------------------------------------------------------------------------
def createVideoStats_day( course_id, force_recompute=False, use_dataset_latest=False, skip_last_day=False, end_date=None):
'''
Create video statistics per ay for viewed by looking for users who had a video position > 0, and watched by looking for users who had a video
position > 95% of the total video length duration.
'''
dataset = bqutil.course_id2dataset(course_id, use_dataset_latest=use_dataset_latest)
logs = bqutil.course_id2dataset(course_id, dtype='logs')
table = TABLE_VIDEO_STATS_PER_DAY
the_sql = """
SELECT date(time)as date, username,
#module_id as video_id,
#REGEXP_REPLACE(REGEXP_EXTRACT(JSON_EXTRACT(event, '$.id'), r'(?:i4x-)(.*)(?:"$)'), '-', '/') as video_id, # Old method takes full video id path
(case when REGEXP_MATCH( JSON_EXTRACT(event, '$.id') , r'([-])' ) then REGEXP_EXTRACT(REGEXP_REPLACE(REGEXP_REPLACE(REGEXP_REPLACE(JSON_EXTRACT(event, '$.id'), '-', '/'), '"', ''), 'i4x/', ''), r'(?:.*\/)(.*)') else REGEXP_REPLACE(REGEXP_REPLACE(REGEXP_REPLACE(JSON_EXTRACT(event, '$.id'), '-', '/'), '"', ''), 'i4x/', '') end) as video_id, # This takes video id only
max(case when JSON_EXTRACT_SCALAR(event, '$.speed') is not null then float(JSON_EXTRACT_SCALAR(event,'$.speed'))*float(JSON_EXTRACT_SCALAR(event, '$.currentTime')) else float(JSON_EXTRACT_SCALAR(event, '$.currentTime')) end) as position,
FROM {DATASETS}
WHERE (event_type = "play_video" or event_type = "pause_video" or event_type = "stop_video") and
event is not null
group by username, video_id, date
order by date
"""
try:
tinfo = bqutil.get_bq_table_info(dataset, TABLE_VIDEO_STATS_PER_DAY )
assert tinfo is not None, "[analyze_videos] Creating %s.%s table for %s" % (dataset, TABLE_VIDEO_STATS_PER_DAY, course_id)
print "[analyze_videos] Appending latest data to %s.%s table for %s" % (dataset, TABLE_VIDEO_STATS_PER_DAY, course_id)
sys.stdout.flush()
except (AssertionError, Exception) as err:
print str(err)
sys.stdout.flush()
print " --> Missing %s.%s? Attempting to create..." % ( dataset, TABLE_VIDEO_STATS_PER_DAY )
sys.stdout.flush()
pass
print "=== Processing Video Stats Per Day for %s (start %s)" % (course_id, datetime.datetime.now())
sys.stdout.flush()
def gdf(row):
return datetime.datetime.strptime(row['date'], '%Y-%m-%d')
process_tracking_logs.run_query_on_tracking_logs(the_sql, table, course_id, force_recompute=force_recompute,
use_dataset_latest=use_dataset_latest,
get_date_function=gdf,
skip_last_day=skip_last_day)
print "Done with Video Stats Per Day for %s (end %s)" % (course_id, datetime.datetime.now())
print "="*77
sys.stdout.flush()
#-----------------------------------------------------------------------------
def createVideoStats( course_id, force_recompute=False, use_dataset_latest=False ):
'''
Final step for video stats is to run through daily video stats table and aggregate for entire course for videos watch and videos viewed
Join results with video axis to get detailed metadata per video for dashboard data
'''
dataset = bqutil.course_id2dataset(course_id, use_dataset_latest=use_dataset_latest)
logs = bqutil.course_id2dataset(course_id, dtype='logs')
table = TABLE_VIDEO_STATS
the_sql = """
SELECT index_chapter,
index_video,
name,
video_id,
chapter_name,
sum(case when position > 0 then 1 else 0 end) as videos_viewed,
sum(case when position > video_length*0.95 then 1 else 0 end) as videos_watched,
FROM (
SELECT username, index_chapter,
index_video,
name,
video_id,
chapter_name,
max(position) as position,
video_length,
FROM (SELECT * FROM [{dataset}.{videostatsperday}]) as video_log,
LEFT JOIN EACH
(SELECT video_length,
video_id as vid_id,
name,
index_video,
index_chapter,
chapter_name
FROM [{dataset}.{videoaxis}]
) as video_axis
ON video_log.video_id = video_axis.vid_id
WHERE video_id is not null and username is not null
group by username, video_id, name, index_chapter, index_video, chapter_name, video_length
order by video_id asc)
GROUP BY video_id, index_chapter, index_video, name, chapter_name
ORDER BY index_video asc;
""".format(dataset=dataset, videoaxis=TABLE_VIDEO_AXIS, videostatsperday=TABLE_VIDEO_STATS_PER_DAY)
print "[analyze_videos] Creating %s.%s table for %s" % (dataset, TABLE_VIDEO_STATS, course_id)
sys.stdout.flush()
try:
tinfo_va = bqutil.get_bq_table_info( dataset, TABLE_VIDEO_AXIS )
trows_va = int(tinfo_va['numRows'])
tinfo_va_day = bqutil.get_bq_table_info( dataset, TABLE_VIDEO_STATS_PER_DAY )
trows_va_day = int(tinfo_va['numRows'])
assert tinfo_va is not None and trows_va != 0, "[analyze videos] %s table depends on %s, which does not exist" % ( TABLE_VIDEO_STATS, TABLE_VIDEO_AXIS )
assert tinfo_va_day is not None and trows_va_day != 0, "[analyze videos] %s table depends on %s, which does not exist" % ( TABLE_VIDEO_STATS, TABLE_VIDEO_STATS_PER_DAY )
except (AssertionError, Exception) as err:
print " --> Err: missing %s.%s and/or %s (including 0 rows in table)? Skipping creation of %s" % ( dataset, TABLE_VIDEO_AXIS, TABLE_VIDEO_STATS_PER_DAY, TABLE_VIDEO_STATS )
sys.stdout.flush()
return
bqdat = bqutil.get_bq_table(dataset, table, the_sql, force_query=force_recompute,
depends_on=["%s.%s" % (dataset, TABLE_VIDEO_AXIS)],
)
return bqdat
#-----------------------------------------------------------------------------
def createVideoStats_obsolete( course_id, force_recompute=False, use_dataset_latest=False, startDate=DATE_DEFAULT_START, endDate=DATE_DEFAULT_END ):
'''
Create video statistics for viewed by looking for users who had a video position > 0, and watched by looking for users who had a video
position > 95% of the total video length duration.
This was the original method used, but is not the most efficient since it queries entire log set. Instead, generate video stats per day, then incrementally
append to that data table as the daily log data comes in.
'''
dataset = bqutil.course_id2dataset(course_id, use_dataset_latest=use_dataset_latest)
logs = bqutil.course_id2dataset(course_id, dtype='logs')
table = TABLE_VIDEO_STATS
the_sql = """
SELECT index_chapter,
index_video,
name,
video_id,
chapter_name,
sum(case when position > 0 then 1 else 0 end) as videos_viewed,
sum(case when position > video_length*0.95 then 1 else 0 end) as videos_watched,
FROM (SELECT username,
#module_id as video_id,
#REGEXP_REPLACE(REGEXP_EXTRACT(JSON_EXTRACT(event, '$.id'), r'(?:i4x-)(.*)(?:"$)'), '-', '/') as video_id, # Old method takes full video id path
(case when REGEXP_MATCH( JSON_EXTRACT(event, '$.id') , r'[-]' ) then REGEXP_EXTRACT(REGEXP_REPLACE(REGEXP_REPLACE(REGEXP_REPLACE(JSON_EXTRACT(event, '$.id'), '-', '/'), '"', ''), 'i4x/', ''), r'(?:.*\/)(.*)') else REGEXP_REPLACE(REGEXP_REPLACE(REGEXP_REPLACE(JSON_EXTRACT(event, '$.id'), '-', '/'), '"', ''), 'i4x/', '') end) as video_id, # This takes video id only
max(case when JSON_EXTRACT_SCALAR(event, '$.speed') is not null then float(JSON_EXTRACT_SCALAR(event,'$.speed'))*float(JSON_EXTRACT_SCALAR(event, '$.currentTime')) else float(JSON_EXTRACT_SCALAR(event, '$.currentTime')) end) as position,
FROM (TABLE_QUERY({logs},
"integer(regexp_extract(table_id, r'tracklog_([0-9]+)')) BETWEEN {start_date} and {end_date}"))
WHERE (event_type = "play_video" or event_type = "pause_video" or event_type = "stop_video") and
event is not null
group by username, video_id
order by username, video_id) as video_log,
LEFT JOIN EACH
(SELECT video_length,
video_id as vid_id,
name,
index_video,
index_chapter,
chapter_name
FROM [{dataset}.{videoaxis}]
) as {videoaxis}
ON video_log.video_id = {videoaxis}.vid_id
WHERE video_id is not null
group by video_id, name, index_chapter, index_video, chapter_name
order by index_video asc;
""".format(dataset=dataset,start_date=startDate,end_date=endDate,logs=logs, videoaxis=TABLE_VIDEO_AXIS)
print "[analyze_videos] Creating %s.%s table for %s" % (dataset, TABLE_VIDEO_STATS, course_id)
sys.stdout.flush()
try:
tinfo = bqutil.get_bq_table_info(dataset, TABLE_VIDEO_AXIS )
assert tinfo is not None, "[analyze videos] %s table depends on %s, which does not exist" % ( TABLE_VIDEO_STATS, TABLE_VIDEO_AXIS )
except (AssertionError, Exception) as err:
print " --> Err: missing %s.%s? Skipping creation of %s" % ( dataset, TABLE_VIDEO_AXIS, TABLE_VIDEO_STATS )
sys.stdout.flush()
return
bqdat = bqutil.get_bq_table(dataset, table, the_sql, force_query=force_recompute,
depends_on=["%s.%s" % (dataset, TABLE_VIDEO_AXIS)],
)
return bqdat
#-----------------------------------------------------------------------------
def get_youtube_api_stats(youtube_id, api_key, part, delay_secs=0):
'''
Youtube video duration lookup, using specified API_KEY from configuration file
Visit https://developers.google.com/console/help/new/#generatingdevkeys for details on how to generate public key
'''
if youtube_id is '': return None
sleep(delay_secs)
try:
assert api_key is not None, "[analyze videos] Public API Key is missing from configuration file."
#url = "http://gdata.youtube.com/feeds/api/videos/" + youtube_id + "?v=2&alt=jsonc" # Version 2 API has been deprecated
url = "https://www.googleapis.com/youtube/v3/videos?part=" + part + "&id=" + youtube_id + "&key=" + api_key # Version 3.0 API
data = urllib2.urlopen(url).read().decode("utf-8")
except (AssertionError, Exception) as err:
error = str(err)
if "504" in error or "403" in error:
# rate-limit issue: try again with double timeout
if delay_secs > MIN_IN_SECS:
print "[Giving up] %s\n%s" % (youtube_id, url)
return None, None
new_delay = max(1.0, delay_secs * 2.0)
print "[Rate-limit] <%s> - Trying again with delay: %s" % (youtube_id, str(new_delay))
return get_youtube_api_stats(youtube_id=youtube_id, api_key=api_key, delay_secs=new_delay)
else:
print "[Error] <%s> - Unable to get duration.\n%s" % (youtube_id, url)
raise
d = json.loads(data)
contentDetails = d['items'][0]['contentDetails']
statistics = d['items'][0]['statistics']
return contentDetails, statistics
#-----------------------------------------------------------------------------
def parseISOduration(isodata):
'''
Parses time duration for video length
'''
# see http://en.wikipedia.org/wiki/ISO_8601#Durations
ISO_8601_period_rx = re.compile(
'P' # designates a period
'(?:(?P<years>\d+)Y)?' # years
'(?:(?P<months>\d+)M)?' # months
'(?:(?P<weeks>\d+)W)?' # weeks
'(?:(?P<days>\d+)D)?' # days
'(?:T' # time part must begin with a T
'(?:(?P<hours>\d+)H)?' # hourss
'(?:(?P<minutes>\d+)M)?' # minutes
'(?:(?P<seconds>\d+)S)?' # seconds
')?' # end of time part
)
parsedISOdata = ISO_8601_period_rx.match(isodata).groupdict()
return parsedISOdata
#-----------------------------------------------------------------------------
def getTotalTimeSecs(data):
'''
Convert parsed time duration dict into seconds
'''
sec = 0
for timeData in data:
if data[timeData] is not None:
if timeData == 'years':
sec = sec + int(data[timeData])*YEAR_IN_SECS
if timeData == 'months':
sec = sec + int(data[timeData])*MONTHS_IN_SECS
if timeData == 'weeks':
sec = sec + int(data[timeData])*WEEKS_IN_SECS
if timeData == 'hours':
sec = sec + int(data[timeData])*HOURS_IN_SECS
if timeData == 'minutes':
sec = sec + int(data[timeData])*MIN_IN_SECS
if timeData == 'seconds':
sec = sec + int(data[timeData])
return sec
#-----------------------------------------------------------------------------
def findVideoLength(dataset, youtube_id, api_key=None):
'''
Handle video length lookup
'''
try:
youtube_id = unidecode(youtube_id)
except Exception as err:
print "youtube_id is not ascii? ytid=", youtube_id
return 0
try:
assert youtube_id is not None, "[analyze videos] youtube id does not exist"
content, stats = get_youtube_api_stats(youtube_id=youtube_id, api_key=api_key, part=YOUTUBE_PARTS)
durationDict = parseISOduration(content['duration'].encode("ascii","ignore"))
length = getTotalTimeSecs(durationDict)
print "[analyze videos] totalTime for youtube video %s is %s sec" % (youtube_id, length)
except (AssertionError, Exception) as err:
print "Failed to lookup video length for %s! Error=%s, data=%s" % (youtube_id, err, dataset)
length = 0
return length
#-----------------------------------------------------------------------------
def openfile(fn, mode='r'):
'''
Properly open file according to file extension type
'''
if (not os.path.exists(fn)) and (not fn.endswith('.gz')):
fn += ".gz"
if mode=='r' and not os.path.exists(fn):
return None # failure, no file found, return None
if fn.endswith('.gz'):
return gzip.GzipFile(fn, mode)
return open(fn, mode)
#-----------------------------------------------------------------------------
def getYoutubeDurations(dataset, bq_table_input, api_key, outputfilename, schema, force_recompute):
'''
Add youtube durations to Video Axis file using youtube id's and then write out to specified local path to prep for google storage / bigquery upload
'''
fp = openfile(outputfilename, 'w')
linecnt = 0
for row_dict in bq_table_input:
linecnt += 1
verified_row = OrderedDict()
# Initial pass-through of keys in current row
for keys in row_dict:
# Only include keys defined in schema
if keys in schema.keys():
verified_row[keys] = row_dict[keys]
# Recompute Video Length durations
if force_recompute:
verified_row[VIDEO_LENGTH] = findVideoLength( dataset=dataset, youtube_id=verified_row[VIDEO_ID], api_key=api_key )
# Ensure schema type
check_schema(linecnt, verified_row, the_ds=schema, coerce=True)
try:
fp.write(json.dumps(verified_row)+'\n')
except Exception as err:
print "Failed to write line %s! Error=%s, data=%s" % (linecnt, str(err), dataset)
fp.close()
#-----------------------------------------------------------------------------
|
CGNx/edx2bigquery
|
edx2bigquery/make_video_analysis.py
|
Python
|
gpl-2.0
| 25,478
|
[
"VisIt"
] |
601ad1d1a4a8bb985f776edfa0d5a5fc4389a0b7647567ca1ccab83c2550610d
|
"""ndb model definitions
Many of these are similar to models in models.py, which are Django models. We
need these ndb versions for use with runtime: python27, which is required by
endpoints.
"""
import collections
import logging
import math
import os
import webapp2
from google.appengine.api import search
from google.appengine.ext import ndb, blobstore
import general_utils
# TODO: move to global config
SALES_TAX_RATE = float(os.environ.get('SALES_TAX_RATE', 0.0925))
def _SortItemsWithSections(items):
"""Sort a list of items so they look OK in the UI."""
items.sort(
key=lambda x: (x.order_form_section or None, x.name))
prev_section = None
for i in items:
new_section = i.order_form_section or None
if prev_section != new_section:
i.first_in_section = True
prev_section = new_section
class _ActiveItems(object):
"""Similar to backreference "*_set" properties in the old db interface."""
def __init__(self, ref, kind_cls):
"""
Args:
ref: instance of a model that is referenced by another kind of model
kind_cls: ndb kind to be selected, like in Key(kind=kind_cls)
"""
self._query = kind_cls.query(kind_cls.site == ref.key,
kind_cls.state != 'new',
kind_cls.state != 'deleted',
kind_cls.state != 'Deleted'
)
def Count(self):
return self._query.count()
def Items(self):
for item in sorted(self._query,
key=lambda o: o.modified, reverse=True):
yield item
def __iter__(self):
return self.Items()
class SearchableModel(ndb.Model):
def get_search_result_headline(self):
return "{} id={}".format(type(self), self.key.integer_id())
def get_search_result_detail_lines(self):
return ["{}: {}".format(prop, getattr(self, prop)) for prop in self._properties if hasattr(self, prop)]
@staticmethod
def get_search_order():
"""override with lower number to search this index first"""
return 1e10
def get_canonical_request_response(self, request):
"""override to build a default response to requests whose search resolve to this model"""
raise NotImplementedError("{} has no canonical request response defined".format(self.__class__.__name__))
def get_indexed_fields(self):
fields = []
for prop_name, prop in self._properties.items():
if not hasattr(self, prop_name):
continue
value = getattr(self, prop_name)
if value is None:
continue
prop_type = type(prop)
value_processor = lambda v: v
if prop_type in (ndb.TextProperty, ndb.StringProperty):
search_type = search.TextField
elif prop_type in (ndb.FloatProperty, ndb.IntegerProperty):
search_type = search.NumberField
elif prop_type in (ndb.DateProperty, ndb.DateTimeProperty):
search_type = search.DateField
elif prop_type == ndb.UserProperty:
search_type = search.TextField
value_processor = lambda v: v.email()
elif prop_type == ndb.KeyProperty:
search_type = search.TextField
value_processor = lambda v: unicode(v.id())
elif prop_type == ndb.BooleanProperty:
search_type = search.AtomField
value_processor = lambda v: unicode(v)
else:
logging.warning("type {} not supported {}".format(prop_type, SearchableModel.__name__))
continue
if prop._repeated:
for s in value:
fields.append(search_type(name=prop_name, value=value_processor(s)))
else:
try:
fields.append(search_type(name=prop_name, value=value_processor(value)))
except TypeError:
raise
return fields
def _post_put_hook(self, future):
put_result = future.get_result() # blocks on put but not a bad idea anyway
model_key_id = put_result.integer_id()
self.index(model_key_id)
def index(self, model_key_id):
index_name = self.__class__.__name__
index = search.Index(index_name)
self.delete_by_model_key_id(model_key_id)
fields = [
search.AtomField(name="model_name", value=index_name),
search.AtomField(name="model_key_id", value=unicode(model_key_id)),
search.TextField(name='headline', value=self.get_search_result_headline())
]
for detail in self.get_search_result_detail_lines():
fields.append(search.TextField(name='details', value=detail))
fields.extend(self.get_indexed_fields())
doc = search.Document(doc_id=unicode(self.key.integer_id()), fields=fields)
index.put(doc)
@classmethod
def delete_by_model_key_id(cls, model_key_id):
index_name = cls.__name__
index = search.Index(index_name)
index.delete(document_ids=map(lambda d: d.doc_id, index.search("model_key_id={}".format(model_key_id))))
@classmethod
def _post_delete_hook(cls, key, future):
cls.delete_by_model_key_id(key.id())
class Jurisdiction(SearchableModel):
"""A jurisdiction name for reporting purposes."""
name = ndb.StringProperty()
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class ProgramType(SearchableModel):
"""
year-independent representation of a program
names are like NRD, Teambuild and Safe
there should only be a handful of these and
they should be relatively static
"""
name = ndb.StringProperty()
@staticmethod
def get_or_create(name):
"""
returns a tuple of the (possibly new) instance and a boolean indicating whether
it was created
WARNING: This method puts the new model if it does not yet exist
:param name: name of the program type
:type name: str
:return: tuple of instance and boolean (true if created, false otherwise)
:rtype: tuple[ProgramType, bool]
"""
created = False
assert isinstance(name, str) or isinstance(name, unicode)
result = ProgramType.query().filter(ProgramType.name == name).get()
if result is None:
created = True
result = ProgramType(name=name)
result.key = ndb.Key(ProgramType, name)
result.put()
return result, created
class Program(SearchableModel):
"""Identifies a program type like "National Rebuilding Day" and its year.
Programs with status 'Active' will be visible to Captains.
The name property is shorthand for the year and program type like "2012 NRD".
"""
ACTIVE_STATUS = "Active"
INACTIVE_STATUS = "Inactive"
STATUSES = (ACTIVE_STATUS, INACTIVE_STATUS)
program_type = ndb.KeyProperty(ProgramType)
year = ndb.IntegerProperty(choices=range(1987, 2500))
status = ndb.StringProperty(choices=STATUSES, default=STATUSES[0])
name = ndb.StringProperty()
def get_sort_key(self):
return -self.year, self.program_type
def put(self, *a, **k):
program_type_name = self.program_type.get().name
self.name = "{} {}".format(self.year, program_type_name)
self.status = self.status or Program.ACTIVE_STATUS
return super(Program, self).put(*a, **k)
@staticmethod
def from_fully_qualified_name(fully_qualified_name):
query = Program.query()
query = query.filter(Program.name == fully_qualified_name)
return query.get()
@staticmethod
def get_or_create(program_type_key, year, status=None):
"""
returns a tuple of the (possibly new) instance and a boolean indicating whether
it was created
WARNING: This method puts the new model if it does not yet exist
:param program_type_key: program type
:type program_type_key: ndb.Key
:param year: year
:type year: int
:param status: status
:type status: str
:return: tuple of instance and boolean (true if created, false otherwise)
:rtype: tuple[Program, bool]
"""
assert isinstance(year, int) or isinstance(year, long)
assert status is None or status in Program.STATUSES
created = False
query = Program.query()
query = query.filter(Program.program_type == program_type_key)
query = query.filter(Program.year == year)
result = query.get()
if result is None:
created = True
result = Program(program_type=program_type_key, year=year, status=status)
result.put()
elif status is not None:
assert result.status == status
return result, created
class Staff(SearchableModel):
"""Minimal variant of the Staff model.
For use in authorization within endpoints.
"""
name = ndb.StringProperty()
email = ndb.StringProperty(required=True)
program_selected = ndb.StringProperty()
program_selected_key = ndb.KeyProperty(kind=Program)
last_welcome = ndb.DateProperty(auto_now=True)
notes = ndb.TextProperty()
since = ndb.DateProperty(auto_now_add=True)
class Captain(SearchableModel):
"""A work captain."""
name = ndb.StringProperty(required=True) # "Joe User"
# Using the UserProperty seems to be more hassle than it's worth.
# I was getting errors about users that didn't exist when loading sample
# data.
email = ndb.StringProperty() # "joe@user.com"
rooms_id = ndb.StringProperty() # "R00011"
phone_mobile = ndb.StringProperty()
phone_work = ndb.StringProperty()
phone_home = ndb.StringProperty()
phone_fax = ndb.StringProperty()
phone_other = ndb.StringProperty()
tshirt_size = ndb.StringProperty(choices=(
'Small',
'Medium',
'Large',
'X-Large',
'2XL',
'3XL'))
notes = ndb.TextProperty()
last_welcome = ndb.DateTimeProperty()
modified = ndb.DateTimeProperty(auto_now=True)
last_editor = ndb.UserProperty(auto_current_user=True)
search_prefixes = ndb.StringProperty(repeated=True)
def put(self, *a, **k):
prefixes = set()
if self.name:
prefixes.add(self.name)
for part in self.name.split():
prefixes.add(part)
for i in xrange(1, 7):
prefixes.add(part[:i])
if self.email:
prefixes.add(self.email)
for i in xrange(1, 7):
prefixes.add(self.email[:i])
self.search_prefixes = [p.lower() for p in prefixes]
return super(Captain, self).put(*a, **k)
def __unicode__(self):
return self.name
def Label(self):
return "%s <%s>" % (self.name, self.email)
class Supplier(SearchableModel):
"""A supplier of Items."""
name = ndb.StringProperty(required=True)
email = ndb.StringProperty()
address = ndb.StringProperty()
phone1 = ndb.StringProperty()
phone2 = ndb.StringProperty()
notes = ndb.TextProperty()
since = ndb.DateProperty(auto_now_add=True)
active = ndb.StringProperty(choices=('Active', 'Inactive'),
default='Active')
visibility = ndb.StringProperty(choices=('Everyone', 'Staff Only'),
default='Everyone')
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class OrderSheet(SearchableModel):
"""Set of items commonly ordered together.
Corresponds to one of the old paper forms, like the Cleaning Supplies form.
"""
name = ndb.StringProperty()
visibility = ndb.StringProperty(choices=('Everyone', 'Staff Only', 'Inactive'),
default='Everyone')
supports_extra_name_on_order = ndb.BooleanProperty(default=False)
supports_internal_invoice = ndb.BooleanProperty(default=False)
code = ndb.StringProperty()
instructions = ndb.TextProperty(default='')
logistics_instructions = ndb.TextProperty(default='')
default_supplier = ndb.KeyProperty(kind=Supplier)
# Choose one of the next three.
delivery_options = ndb.StringProperty(choices=['Yes', 'No'], default='No')
pickup_options = ndb.StringProperty(choices=['Yes', 'No'], default='No')
borrow_options = ndb.StringProperty(choices=['Yes', 'No'], default='No')
retrieval_options = ndb.StringProperty(choices=['Yes', 'No'], default='No')
def __unicode__(self):
return '%s' % (self.name)
def HasLogistics(self):
return (self.delivery_options == 'Yes' or
self.pickup_options == 'Yes' or
self.borrow_options == 'Yes' or
self.retrieval_options == 'Yes')
@property
def item_set(self):
return Item.query(Item.appears_on_order_form == self.key)
class Item(SearchableModel):
"""Represents a type of thing that may be in the inventory or possible to order."""
bar_code_number = ndb.IntegerProperty()
# bar_code_number.unique = True
name = ndb.StringProperty(required=True)
# name.unique = True
appears_on_order_form = ndb.KeyProperty(kind=OrderSheet)
order_form_section = ndb.StringProperty()
description = ndb.StringProperty()
# 'Each' 'Box' 'Pair' etc
measure = ndb.StringProperty(
choices=('Each', 'Roll', 'Bottle', 'Box', 'Pair', 'Board', 'Bundle',
'Bag', 'Ton', 'Yard', 'Sheet', 'Cartridge', 'Tube', 'Tub',
'Sq. Yds.', 'Gallon', 'Section', 'Home', 'Box', 'Drop-off',
'', 'Other'))
# Dollars.
unit_cost = ndb.FloatProperty()
must_be_returned = ndb.StringProperty(choices=['Yes', 'No'], default='No')
picture = ndb.BlobProperty()
thumbnail = ndb.BlobProperty()
supplier = ndb.KeyProperty(kind=Supplier)
supplier_part_number = ndb.StringProperty()
url = ndb.StringProperty()
last_editor = ndb.UserProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
modified = ndb.DateTimeProperty(auto_now=True)
supports_extra_name_on_order = ndb.BooleanProperty(default=False)
def __unicode__(self):
return self.description
def VisibleSortableLabel(self, label):
"""Strips numeric prefixes used for sorting.
Labels may have a digit prefix which is used for sorting, but
should not be shown to users.
"""
if not label:
return ''
parts = label.split()
if len(parts) > 0 and parts[0].isdigit():
return ' '.join(parts[1:])
return label
def VisibleName(self):
return self.VisibleSortableLabel(self.name)
def VisibleOrderFormSection(self):
return self.VisibleSortableLabel(self.order_form_section)
def SupportsName(self):
return (self.supports_extra_name_on_order
or self.appears_on_order_form.get().supports_extra_name_on_order)
class UploadedDocument(ndb.Model):
filename = ndb.StringProperty()
user = ndb.UserProperty(auto_current_user=True)
time = ndb.DateTimeProperty(auto_now=True)
blob_key = ndb.BlobKeyProperty()
@property
def formatted_time(self):
return self.time.strftime("%b %d %Y %H:%M UTC")
@property
def uri(self):
return webapp2.uri_for('DownloadSiteAttachment', blob_key=self.blob_key)
class SiteAttachments(ndb.Model):
one = ndb.KeyProperty(kind=UploadedDocument,
name='Planned Scope of Work',
verbose_name="This is RTP's rough scope of work recommendation for the Construction Captain. "
"This should be reviewed by the Captain prior to first site visit. Captain is to "
"take this scope and adjust it according to what they can realistically commit to."
"<br><br>This document will be on ROOMs prior to Captain Kick off.")
two = ndb.KeyProperty(kind=UploadedDocument,
name='Signed Scope of Work',
verbose_name="It's crucial that Captains have their site owners sign-off on the scope of work "
"prior to any work starting. Captains, after you have walked the property "
"and assessed priorities, please write or type out the scope, review it with "
"site owner, and have them sign up top on the scope of work form to show "
"approval. Please leave them a copy and upload a scanned version here. This is "
"RTP's way of confirming everyone is in agreement. Upload your scanned signed "
"scope of work here.<br><br>Due March 26th for 2018 National Rebuilding Day")
three = ndb.KeyProperty(kind=UploadedDocument,
name='Submitted Scope of Work',
verbose_name="Since signed scopes are usually in a PDF format, we also need a typed out "
"version uploaded. This is important for RTP's reporting purposes. Captains "
"please do your best to also upload a submitted typed scope of work (doc). If "
"only a PDF signed scoped is uploaded, RTP staff will type this info into a "
"submitted scope of work for the Site.<br><br>Also Due March 26th for 2018 "
"National Rebuilding Day.")
four = ndb.KeyProperty(kind=UploadedDocument,
name='Fully Executed Signed Scope of Work',
verbose_name="On National Rebuilding Day (or within a few weeks after), please omplete all "
"\"primary tasks\" on the scope of work, review the completion with the site "
"owner, and have them sign at the bottom of the scope of work form (feel free "
"to use the exact same document that was signed before work started). Please "
"upload \"Fully Executed Signed Scope of Work\" here. This is RTP's way of "
"recognizing that the Scope of work is complete and the site owner is in "
"agreement. This is the final document needed.<br><br>Due May 23rd for 2018 "
"National Rebuilding Today.")
five = ndb.KeyProperty(kind=UploadedDocument,
name='Signed Runner Waiver Form',
verbose_name='')
six = ndb.KeyProperty(kind=UploadedDocument,
name='Scanned Driver\'s Licence',
verbose_name='')
seven = ndb.KeyProperty(kind=UploadedDocument,
name='Car Insurance Form',
verbose_name='')
def set_attachment_by_property_name(self, property_name, document_key):
for prop in self.get_ordered_properties():
if prop._name == property_name:
setattr(self, prop._code_name, document_key)
self.put()
return
raise Exception("No property named {} could be found on {}".format(property_name, self.__class__.__name__))
def get_ordered_file_keys(self):
return [getattr(self, p._code_name) for p in self.get_ordered_properties()]
def get_ordered_properties(self):
return [SiteAttachments.one,
SiteAttachments.two,
SiteAttachments.three,
SiteAttachments.four,
SiteAttachments.five,
SiteAttachments.six,
SiteAttachments.seven]
def get_attachments(self, site_id):
attachments = []
files_and_properties = zip(self.get_ordered_file_keys(), self.get_ordered_properties())
for file_key, property in files_and_properties:
attached_file = file_key.get() if file_key else None
attachments.append(SiteAttachmentHandlerData(
site_id=site_id,
attachments_id=self.key.integer_id(),
attached_file=attached_file,
name=property._name,
verbose_name=property._verbose_name
))
return attachments
class SiteAttachmentHandlerData(object):
def __init__(self, site_id, attachments_id, attached_file, name, verbose_name):
self.site_id = site_id
self.attachments_id = attachments_id
self.attached_file = attached_file
self.name = name
self.verbose_name = verbose_name
self.upload_uri = None
self.remove_uri = None
self.filename = attached_file.filename if attached_file else None
self._build_uris()
def _build_uris(self):
self.upload_uri = blobstore.create_upload_url(webapp2.uri_for(
'UploadSiteAttachment',
site_id=self.site_id,
attachment_type=self.name))
if self.attached_file is not None:
self.download_uri = webapp2.uri_for('DownloadSiteAttachment', blob_key=self.attached_file.blob_key)
self.remove_uri = webapp2.uri_for(
'RemoveSiteAttachment',
site_id=self.site_id,
attachments_id=self.attachments_id,
name=self.name)
class NewSite(SearchableModel):
"""
A work site.
number "17001DAL" reads:
year=2017
program=NRD (encoded as 0)
site=01
jurisdiction=Daly City
"""
number = ndb.StringProperty(required=True) # unique
program = ndb.StringProperty() # reference
program_key = ndb.KeyProperty(kind=Program) # TODO: Set to required after migration
name = ndb.StringProperty() # "Belle Haven"
applicant = ndb.StringProperty()
applicant_home_phone = ndb.StringProperty()
applicant_work_phone = ndb.StringProperty()
applicant_mobile_phone = ndb.StringProperty()
applicant_email = ndb.StringProperty()
rating = ndb.StringProperty()
roof = ndb.StringProperty()
rrp_test = ndb.StringProperty()
rrp_level = ndb.StringProperty()
jurisdiction = ndb.StringProperty()
jurisdiction_choice = ndb.KeyProperty(kind=Jurisdiction)
scope_of_work = ndb.TextProperty()
sponsor = ndb.StringProperty()
street_number = ndb.StringProperty()
city_state_zip = ndb.StringProperty()
budget = ndb.IntegerProperty(default=0)
attachments = ndb.KeyProperty(kind=SiteAttachments)
announcement_subject = ndb.StringProperty(default='Nothing Needs Attention')
announcement_body = ndb.TextProperty(
default="Pat yourself on the back - no items need attention.\n"
"You have a clean bill of health.")
search_prefixes = ndb.StringProperty(repeated=True)
photo_link = ndb.StringProperty()
volunteer_signup_link = ndb.StringProperty()
volunteer_roster = ndb.StringProperty()
latest_computed_expenses = ndb.FloatProperty()
@staticmethod
def get_search_order():
return 0
def get_search_result_headline(self):
return "Site {}".format(self.number)
def get_search_result_detail_lines(self):
return [self.street_number or "N/A", self.city_state_zip]
def add_attachment(self, attachment_name, uploaded_file):
document = UploadedDocument(blob_key=uploaded_file.key(), filename=uploaded_file.filename)
document.put()
if not self.attachments:
attachments = SiteAttachments()
attachments.put()
self.attachments = attachments.key
attachments_model = self.attachments.get() # type: SiteAttachments
attachments_model.set_attachment_by_property_name(attachment_name, document.key)
self.put()
@property
def IsCDBG(self):
return 'CDBG' in self.jurisdiction
@property
def ContactPerson(self):
if self.applicant:
return self.applicant
return self.name
@property
def Orders(self):
return _ActiveItems(self, Order)
@property
def CheckRequests(self):
return _ActiveItems(self, CheckRequest)
@property
def VendorReceipts(self):
return _ActiveItems(self, VendorReceipt)
@property
def InKindDonations(self):
return _ActiveItems(self, InKindDonation)
@property
def StaffTimes(self):
return _ActiveItems(self, StaffTime)
@property
def StaffTimesByPosition(self):
class Pos(object):
def __init__(self):
self.name = None
self.hours = 0.0
self.hours_subtotal = 0.0
self.miles = 0.0
self.mileage_subtotal = 0.0
self.stafftimes = []
@property
def subtotal(self):
return self.hours_subtotal + self.mileage_subtotal
by_pos = collections.defaultdict(Pos)
for s in self.StaffTimes:
name = str(s.position.get())
pos = by_pos[name]
if pos.name is None:
pos.name = name
pos.stafftimes.append(s)
pos.hours += s.hours
pos.hours_subtotal += s.HoursTotal()
pos.miles += s.miles
pos.mileage_subtotal += s.MileageTotal()
return list(by_pos.itervalues())
@property
def ScopeOfWork(self):
if self.scope_of_work:
return self.scope_of_work
sow = ''
for o in self.Orders:
if o.order_sheet.get().name == 'Scope of Work':
sow = o.notes
self.scope_of_work = sow
self.put()
return sow
def SaveTheChildren(self):
for child in (self.Orders, self.CheckRequests,
self.VendorReceipts, self.InKindDonations,
self.StaffTimes):
for obj in child:
obj.put()
def put(self, *a, **k):
if self.jurisdiction_choice:
self.jurisdiction = self.jurisdiction_choice.get().name
# issue213: program should be configurable
if not self.program:
program = self.program_key.get()
self.program = program.fully_qualified_name
prefixes = set()
for f in self.name, self.applicant, self.street_number, self.jurisdiction:
if not f:
continue
prefixes.add(f)
for part in f.split():
prefixes.add(part)
for i in xrange(1, 7):
prefixes.add(part[:i])
if self.number:
prefixes.add(self.number)
for i in xrange(1, 7):
prefixes.add(self.number[:i])
prefixes.add(self.number[2:2 + i])
prefixes.add(self.number[5:5 + i])
self.search_prefixes = [p.lower() for p in prefixes]
k = super(NewSite, self).put(*a, **k)
return k
def Label(self):
return "%s %s" % (self.number, self.name)
def __unicode__(self):
"""Only works if self has been saved."""
return 'Site #%s | %s' % (self.number, self.name)
def StreetAddress(self):
if not self.street_number or not self.city_state_zip:
return "TODO - enter an address"
return '%s, %s' % (' '.join(self.street_number.split()),
' '.join(self.city_state_zip.split()))
def NeedsAttention(self):
return self.announcement_subject is not None
@property
def sitecaptain_set(self):
return SiteCaptain.query(SiteCaptain.site == self.key)
def OrderTotal(self):
"""Only works if self has been saved."""
cost = sum(order.GrandTotal() for order in self.Orders)
return cost
@property
def order_total(self):
if not hasattr(self, '_order_total'):
self._order_total = self.OrderTotal()
return self._order_total
def CheckRequestTotal(self):
"""Only works if self has been saved."""
return sum(cr.Total() or 0 for cr in self.CheckRequests)
def VendorReceiptTotal(self):
"""Only works if self has been saved."""
return sum(cr.amount or 0 for cr in self.VendorReceipts)
def InKindDonationTotal(self):
"""Only works if self has been saved."""
return sum(cr.Total() or 0 for cr in self.InKindDonations)
def StaffTimeTotal(self):
"""Only works if self has been saved."""
return sum(cr.Total() or 0 for cr in self.StaffTimes)
def RecomputeExpenses(self):
logging.info('Recomputing expenses for %s', self.number)
self.latest_computed_expenses = (
self.order_total +
self.CheckRequestTotal() +
self.StaffTimeTotal() +
self.VendorReceiptTotal())
self.put()
def Expenses(self):
if self.latest_computed_expenses is None:
self.RecomputeExpenses()
return self.latest_computed_expenses
def BudgetRemaining(self):
if self.budget:
return self.budget - self.Expenses()
else:
return 0.
@property
def budget_remaining(self):
if not hasattr(self, '_budget_remaining'):
self._budget_remaining = self.BudgetRemaining()
return self._budget_remaining
@property
def in_the_red(self):
return self.budget_remaining < 0
def BudgetStatement(self):
if self.BudgetRemaining() > 0:
return '$%0.2f unspent budget' % self.BudgetRemaining()
elif self.BudgetRemaining() < 0:
return '$%0.2f over budget' % (-1 * self.BudgetRemaining())
else:
return ''
class SiteCaptain(SearchableModel):
"""Associates a site and a Captain."""
site = ndb.KeyProperty(kind=NewSite, required=True)
captain = ndb.KeyProperty(kind=Captain, required=True)
type = ndb.StringProperty(choices=(
'Construction',
'Team',
'Volunteer',
))
class InvoiceNumber(SearchableModel):
"""Simple counter for invoice numbers.
Currently there's a singleton with a Key(InvoiceNumber, 'global')
"""
next_invoice_number = ndb.IntegerProperty()
class OrderInvoice(SearchableModel):
"""An internal invoice number that an Order can point at.
Parent is the InvoiceNumber that generates the invoice_number value.
"""
invoice_number = ndb.IntegerProperty()
class Order(SearchableModel):
"""A Captain can make an Order for a list of Items."""
site = ndb.KeyProperty(kind=NewSite, required=True)
order_sheet = ndb.KeyProperty(kind=OrderSheet, required=True)
program = ndb.StringProperty()
program_key = ndb.KeyProperty(kind=Program)
sub_total = ndb.FloatProperty()
notes = ndb.TextProperty()
state = ndb.StringProperty()
actual_total = ndb.FloatProperty()
reconciliation_notes = ndb.TextProperty(default='')
invoice_date = ndb.DateProperty()
internal_invoice = ndb.KeyProperty(kind=OrderInvoice)
vendor = ndb.KeyProperty(kind=Supplier)
logistics_start = ndb.StringProperty()
logistics_end = ndb.StringProperty()
logistics_instructions = ndb.TextProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
created_by = ndb.UserProperty(auto_current_user_add=True)
modified = ndb.DateTimeProperty(auto_now=True)
last_editor = ndb.UserProperty(auto_current_user=True)
@staticmethod
def get_search_order():
return 1
@property
def name(self):
return '%s %s' % (self.site.get().number, self.order_sheet.get().name)
@property
def OrderItems(self):
return OrderItem.query(OrderItem.order == self.key)
@property
def orderdelivery_set(self):
return OrderDelivery.query(OrderDelivery.order == self.key)
@property
def orderpickup_set(self):
return OrderPickup.query(OrderPickup.order == self.key)
@property
def orderborrow_set(self):
return OrderBorrow.query(OrderBorrow.order == self.key)
@property
def orderretrieval_set(self):
return OrderRetrieval.query(OrderRetrieval.order == self.key)
def put(self, *a, **k):
self.program = self.site.get().program
me = super(Order, self).put(*a, **k)
self.site.get().RecomputeExpenses()
return me
def SetInvoiceNumber(self):
"""Sets order_invoice field to an OrderInvoice with a unique invoice_number."""
if self.internal_invoice:
return
@ndb.transactional()
def _NewInvoiceNumber():
ink = ndb.Key(InvoiceNumber, 'global')
ino = ink.get()
oio = OrderInvoice(invoice_number=ino.next_invoice_number,
parent=ink)
oio.put()
ino.next_invoice_number += 1
ino.put()
return oio.key
self.internal_invoice = _NewInvoiceNumber()
self.put()
def __unicode__(self):
return ' '.join((self.site.get().number, self.site.get().name,
self.order_sheet.get().name,
'%d items' % self.OrderItems.count(),
'$%0.2f' % self.GrandTotal()))
def CanMakeChanges(self):
return self.state in ('new', 'Received')
def VisibleNotes(self):
if self.notes is None:
return ''
return self.notes
def EstimatedTotal(self):
if self.sub_total is None:
return 0.
t = self.sub_total * (1. + SALES_TAX_RATE)
return math.ceil(t * 100.) / 100.
def GrandTotal(self):
if self.state == 'Deleted':
return 0.
if self.actual_total is not None:
return self.actual_total
else:
return self.EstimatedTotal()
def Total(self):
return self.GrandTotal()
def SalesTax(self):
if self.state == 'Deleted':
return 0.
if self.sub_total is None:
return 0.
return self.sub_total * SALES_TAX_RATE
def LogisticsStart(self):
for od in self.orderdelivery_set:
return "%s (Delivery)" % od.delivery.get().delivery_date
for od in self.orderpickup_set:
return "%s (Pickup)" % od.pickup.get().pickup_date
for od in self.orderborrow_set:
return "%s (Borrow)" % od.borrow.get().borrow_date
for od in self.orderretrieval_set:
return "%s (Drop-off)" % od.retrieval.get().dropoff_date
return None
def LogisticsEnd(self):
for od in self.orderretrieval_set:
return "%s (Retrieval)" % od.retrieval.get().retrieval_date
return None
def LogisticsInstructions(self):
for od in self.orderdelivery_set:
return "%s%s %s%s %s" % (
od.delivery.get().contact and 'Contact ' or '',
od.delivery.get().contact or '',
od.delivery.get().contact_phone and 'at ' or '',
od.delivery.get().contact_phone or '',
od.delivery.get().notes or '')
for od in self.orderpickup_set:
return "%s%s %s%s %s" % (
od.pickup.get().contact and 'Contact ' or '',
od.pickup.get().contact or '',
od.pickup.get().contact_phone and 'at ' or '',
od.pickup.get().contact_phone or '',
od.pickup.get().notes or '')
for od in self.orderborrow_set:
return "%s%s %s%s %s" % (
od.borrow.get().contact and 'Contact ' or '',
od.borrow.get().contact or '',
od.borrow.get().contact_phone and 'at ' or '',
od.borrow.get().contact_phone or '',
od.borrow.get().notes or '')
for od in self.orderretrieval_set:
return "%s%s %s%s %s" % (
od.retrieval.get().contact and 'Contact ' or '',
od.retrieval.get().contact or '',
od.retrieval.get().contact_phone and 'at ' or '',
od.retrieval.get().contact_phone or '',
od.retrieval.get().notes or '')
return ''
def UpdateLogistics(self):
self.logistics_start = self.LogisticsStart()
self.logistics_end = self.LogisticsEnd()
self.logistics_instructions = self.LogisticsInstructions()
self.put()
class OrderItem(SearchableModel):
"""The Items that are in a given Order."""
item = ndb.KeyProperty(kind=Item)
order = ndb.KeyProperty(kind=Order)
supplier = ndb.KeyProperty(kind=Supplier)
quantity = ndb.IntegerProperty(default=0)
quantity_float = ndb.FloatProperty(default=0.0)
name = ndb.StringProperty(default="")
# no default because it's not present for all objects, yet.
unit_cost = ndb.FloatProperty()
def FloatQuantity(self):
"""Returns quantity as a float."""
if self.quantity:
return float(self.quantity)
elif self.quantity_float:
return self.quantity_float
else:
return 0.0
def IsEmpty(self):
quantity = self.FloatQuantity()
return not quantity and not self.name
def SupportsName(self):
return (self.item.get().supports_extra_name_on_order
or self.order.get().order_sheet.get().supports_extra_name_on_order)
def VisibleQuantity(self):
quantity = self.FloatQuantity()
if quantity:
if quantity % 1 == 0:
return str(int(quantity))
else:
return str(quantity)
else:
return ''
def VisibleCost(self):
quantity = self.FloatQuantity()
unit_cost = self.item.get().unit_cost
if quantity and not unit_cost:
return '0'
if quantity and unit_cost:
return '%.2f' % (quantity * unit_cost)
else:
return ''
class Delivery(SearchableModel):
"""Delivery to a site (no retrieval)."""
site = ndb.KeyProperty(kind=NewSite, required=True)
delivery_date = ndb.StringProperty()
contact = ndb.StringProperty()
contact_phone = ndb.StringProperty()
notes = ndb.TextProperty()
class OrderDelivery(SearchableModel):
"""Maps Order to Delivery."""
order = ndb.KeyProperty(kind=Order, required=True)
delivery = ndb.KeyProperty(kind=Delivery, required=True)
class Pickup(SearchableModel):
"""Pick up from RTP warehouse."""
site = ndb.KeyProperty(kind=NewSite, required=True)
pickup_date = ndb.StringProperty()
return_date = ndb.StringProperty()
contact = ndb.StringProperty()
contact_phone = ndb.StringProperty()
notes = ndb.TextProperty()
class OrderPickup(SearchableModel):
"""Maps Order to Pickup."""
order = ndb.KeyProperty(kind=Order, required=True)
pickup = ndb.KeyProperty(kind=Pickup, required=True)
class Borrow(SearchableModel):
"""Pick up from RTP warehouse."""
site = ndb.KeyProperty(kind=NewSite, required=True)
borrow_date = ndb.StringProperty()
return_date = ndb.StringProperty()
contact = ndb.StringProperty()
contact_phone = ndb.StringProperty()
notes = ndb.TextProperty()
class OrderBorrow(SearchableModel):
"""Maps Order to Borrow."""
order = ndb.KeyProperty(kind=Order, required=True)
borrow = ndb.KeyProperty(kind=Borrow, required=True)
class Retrieval(SearchableModel):
"""Delivery and retrieval to and from a site."""
site = ndb.KeyProperty(kind=NewSite, required=True)
dropoff_date = ndb.StringProperty()
retrieval_date = ndb.StringProperty()
contact = ndb.StringProperty()
contact_phone = ndb.StringProperty()
notes = ndb.TextProperty()
class OrderRetrieval(SearchableModel):
"""Maps Order to Retrieval."""
order = ndb.KeyProperty(kind=Order, required=True)
retrieval = ndb.KeyProperty(kind=Retrieval, required=True)
class InventoryItem(SearchableModel):
"""The Items that are in the inventory."""
item = ndb.KeyProperty(kind=Item)
quantity = ndb.IntegerProperty(default=0)
quantity_float = ndb.FloatProperty(default=0.0)
location = ndb.StringProperty()
available_on = ndb.DateProperty()
last_editor = ndb.UserProperty()
modified = ndb.DateTimeProperty(auto_now=True)
def _GetRateFromArray(default, array, activity_date):
if not array:
return default
activity_date_str = activity_date.isoformat()
rate = default
for dr in sorted(s.split() for s in array):
if activity_date_str < dr[0]:
break
rate = float(dr[1])
return rate
class StaffPosition(SearchableModel):
"""Staff positions that have hourly billing."""
position_name = ndb.StringProperty()
# Defaults possibly superceded by the date-based lists below, and destined to be deprecated once
# all objects have moved to the date-based lists.
hourly_rate = ndb.FloatProperty(default=0.0)
mileage_rate = ndb.FloatProperty(default=0.0)
# Space-separated pairs of date and rate strings, to support
# rates that change over time. The scheme here is to list the effective date of rate changes,
# along with the new rate.
# These are entered in the datastore editor as
# type=Array and a value formatted like
# {
# "values": [
# {
# "stringValue": "2016-01-01 10.0"
# },
# {
# "stringValue": "2017-01-01 20.0"
# }
# ]
# }
# Then the values appear here as unicode strings:
# [u'2016-01-01 10.0', u'2017-01-01 20.0']
hourly_rate_after_date = ndb.StringProperty(repeated=True)
mileage_rate_after_date = ndb.StringProperty(repeated=True)
last_editor = ndb.UserProperty()
modified = ndb.DateTimeProperty(auto_now=True)
@property
def name(self):
return self.position_name
def GetHourlyRate(self, activity_date):
return _GetRateFromArray(self.hourly_rate, self.hourly_rate_after_date, activity_date)
def GetMileageRate(self, activity_date):
return _GetRateFromArray(self.mileage_rate, self.mileage_rate_after_date, activity_date)
def __unicode__(self):
return '%s' % self.position_name
def __str__(self):
return '%s' % self.position_name
class CheckRequest(SearchableModel):
"""A Check Request is a request for reimbursement."""
site = ndb.KeyProperty(kind=NewSite)
captain = ndb.KeyProperty(kind=Captain)
program = ndb.StringProperty()
program_key = ndb.KeyProperty(kind=Program)
payment_date = ndb.DateProperty()
labor_amount = ndb.FloatProperty(default=0.0)
materials_amount = ndb.FloatProperty(default=0.0)
food_amount = ndb.FloatProperty(default=0.0)
description = ndb.TextProperty()
name = ndb.StringProperty()
address = ndb.TextProperty()
tax_id = ndb.StringProperty()
form_of_business = ndb.StringProperty(
choices=('Corporation', 'Partnership', 'Sole Proprietor',
'Don\'t Know'))
state = ndb.StringProperty()
last_editor = ndb.UserProperty(auto_current_user=True)
modified = ndb.DateTimeProperty(auto_now=True)
def put(self, *a, **k):
self.program = self.site.get().program
me = super(CheckRequest, self).put(*a, **k)
self.site.get().RecomputeExpenses()
return me
def Total(self):
return self.labor_amount + self.materials_amount + self.food_amount
class VendorReceipt(SearchableModel):
"""A Vendor Receipt is a report of a purchase outside of ROOMS."""
site = ndb.KeyProperty(kind=NewSite)
captain = ndb.KeyProperty(kind=Captain)
program = ndb.StringProperty()
program_key = ndb.KeyProperty(kind=Program)
purchase_date = ndb.DateProperty()
vendor = ndb.StringProperty()
supplier = ndb.KeyProperty(kind=Supplier)
amount = ndb.FloatProperty(default=0.0)
description = ndb.TextProperty()
state = ndb.StringProperty()
last_editor = ndb.UserProperty()
modified = ndb.DateTimeProperty(auto_now=True)
@property
def name(self):
if self.supplier:
return self.supplier.get().name
return self.vendor
def put(self, *a, **k):
self.program = self.site.get().program
me = super(VendorReceipt, self).put(*a, **k)
self.site.get().RecomputeExpenses()
return me
def Total(self):
return self.amount or 0
class InKindDonation(SearchableModel):
"""An In-kind donation to a site."""
site = ndb.KeyProperty(kind=NewSite)
captain = ndb.KeyProperty(kind=Captain)
program = ndb.StringProperty()
program_key = ndb.KeyProperty(kind=Program)
donation_date = ndb.DateProperty()
donor = ndb.StringProperty()
donor_phone = ndb.StringProperty()
donor_info = ndb.TextProperty()
labor_amount = ndb.FloatProperty(default=0.0)
materials_amount = ndb.FloatProperty(default=0.0)
description = ndb.TextProperty()
budget = ndb.StringProperty(choices=('Normal', 'Roofing'), default='Normal')
state = ndb.StringProperty()
last_editor = ndb.UserProperty()
modified = ndb.DateTimeProperty(auto_now=True)
@property
def name(self):
return self.donor
def put(self, *a, **k):
self.program = self.site.get().program
me = super(InKindDonation, self).put(*a, **k)
self.site.get().RecomputeExpenses()
return me
def Total(self):
if self.labor_amount is None:
self.labor_amount = 0.
if self.materials_amount is None:
self.materials_amount = 0.
return self.labor_amount + self.materials_amount
class StaffTime(SearchableModel):
"""Expense type that represents hourly staff time."""
site = ndb.KeyProperty(kind=NewSite, required=True)
captain = ndb.KeyProperty(kind=Captain)
position = ndb.KeyProperty(kind=StaffPosition)
program = ndb.StringProperty()
program_key = ndb.KeyProperty(kind=Program)
state = ndb.StringProperty()
hours = ndb.FloatProperty(default=0.0)
miles = ndb.FloatProperty(default=0.0)
activity_date = ndb.DateProperty()
description = ndb.TextProperty()
last_editor = ndb.UserProperty(auto_current_user=True)
modified = ndb.DateTimeProperty(auto_now=True)
def put(self, *a, **k):
self.program = self.site.get().program
me = super(StaffTime, self).put(*a, **k)
self.site.get().RecomputeExpenses()
return me
@property
def name(self):
return self.position
def HoursTotal(self):
if not self.position:
logging.warning('empty position %s', str(self))
if self.state in ('new', 'deleted'):
return 0.0
if self.hours is None:
self.hours = 0.0
return self.hours * self.position.get().GetHourlyRate(self.activity_date)
def MileageTotal(self):
if not self.position:
logging.warning('empty position %s', str(self))
if self.state in ('new', 'deleted'):
return 0.0
if self.miles is None:
self.miles = 0.0
return self.miles * self.position.get().GetMileageRate(self.activity_date)
def Total(self):
return self.HoursTotal() + self.MileageTotal()
# I think this can be removed. There is a template and view called "Expense"
# but I don't see anything that references this model. And there are no
# entities in the prod datastore.
class Expense(SearchableModel):
"""A generic expense."""
payee = ndb.KeyProperty(kind=Supplier)
action = ndb.StringProperty(choices=('on account', 'need reimbursement'))
site = ndb.KeyProperty(kind=NewSite)
captain = ndb.KeyProperty(kind=Captain)
program = ndb.StringProperty()
program_key = ndb.KeyProperty(kind=Program)
date = ndb.DateProperty()
amount = ndb.FloatProperty()
description = ndb.TextProperty()
state = ndb.StringProperty()
last_editor = ndb.UserProperty()
modified = ndb.DateTimeProperty(auto_now=True)
def get_all_searchable_models():
searchable_models = general_utils.get_all_subclasses(SearchableModel)
searchable_models.sort(key=lambda m: m.get_search_order())
return searchable_models
SEARCHABLE_MODELS = get_all_searchable_models()
def model_from_search_document(doc):
name_to_model_type_map = {m.__name__: m for m in SEARCHABLE_MODELS}
key_ids = doc['model_key_id']
assert len(key_ids) == 1
model_type_names = doc['model_name']
assert len(model_type_names) == 1
model_type = name_to_model_type_map.get(model_type_names[0].value)
assert model_type is not None
return model_type.get_by_id(int(key_ids[0].value))
|
babybunny/rebuildingtogethercaptain
|
gae/room/ndb_models.py
|
Python
|
apache-2.0
| 45,574
|
[
"VisIt"
] |
1283a78a20b76f3fb1821ddd5904cd5e014d1fd488fb49483e74528ec947892e
|
## This code is written by Davide Albanese, <albanese@fbk.eu>.
## (C) 2011 mlpy Developers.
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
__all__ = ["Kernel", "KernelLinear", "KernelPolynomial", "KernelGaussian",
"KernelExponential", "KernelSigmoid"]
import sys
if sys.version >= '3':
from . import kernel
else:
import kernel
class Kernel:
"""Base class for kernels.
"""
pass
class KernelLinear(Kernel):
"""Linear kernel, t_i' x_j.
"""
def __init__(self):
pass
def kernel(self, t, x):
return kernel.kernel_linear(t, x)
class KernelPolynomial(Kernel):
"""Polynomial kernel, (gamma t_i' x_j + b)^d.
"""
def __init__(self, gamma=1.0, b=1.0, d=2.0):
self.gamma = gamma
self.b = b
self.d = d
def kernel(self, t, x):
return kernel.kernel_polynomial(t, x,
self.gamma, self.b, self.d)
class KernelGaussian(Kernel):
"""Gaussian kernel, exp(-||t_i - x_j||^2 / 2 * sigma^2).
"""
def __init__(self, sigma=1.0):
self.sigma = sigma
def kernel(self, t, x):
return kernel.kernel_gaussian(t, x,
self.sigma)
class KernelExponential(Kernel):
"""Exponential kernel, exp(-||t_i - x_j|| / 2 * sigma^2).
"""
def __init__(self, sigma=1.0):
self.sigma = sigma
def kernel(self, t, x):
return kernel.kernel_exponential(t, x,
self.sigma)
class KernelSigmoid(Kernel):
"""Sigmoid kernel, tanh(gamma t_i' x_j + b).
"""
def __init__(self, gamma=1.0, b=1.0):
self.gamma = gamma
self.b = b
def kernel(self, t, x):
return kernel.kernel_sigmoid(t, x,
self.gamma, self.b)
|
manhtuhtk/mlpy
|
mlpy/kernel_class.py
|
Python
|
unlicense
| 2,326
|
[
"Gaussian"
] |
e27c2e517c0bc85159f616018d61b23f2baf4691fef376e7827f2b03fbd43f24
|
## OUTPUT FUNCTIONS
# PART1: STORE DATA in netCDF4 file (output__nc_ini,output_nc,output_nc_fin)
# PART2: STORE INFO in txt file (output_txt_ini, ...
# PART3: STORE PARAMETERS IN .NPY FILE
## STORE DATA
def output_nc_ini():
""" Initialise the netCDF4 file."""
param['output_j'] = 0 # output index
# store files, dimensions and variables in dictionnaries
ncu = dict()
ncv = dict()
nceta = dict()
# creating the netcdf files
ncformat = 'NETCDF4'
ncu['file'] = Dataset(param['output_runpath']+'/u.nc','w',format=ncformat)
ncv['file'] = Dataset(param['output_runpath']+'/v.nc','w',format=ncformat)
nceta['file'] = Dataset(param['output_runpath']+'/eta.nc','w',format=ncformat)
# write general attributes
for ncfile in [ncu,ncv,nceta]:
ncfile['file'].history = 'Created ' + tictoc.ctime(tictoc.time())
ncfile['file'].description = 'Data from: Shallow-water model in double gyre configuration.'
ncfile['file'].details = 'Cartesian coordinates, beta-plane approximation, Arakawa C-grid'
# all param ints floats and strings as global attribute
for key in param.keys():
if (type(param[key]) is int) or (type(param[key]) is float) or (type(param[key]) is str):
ncfile['file'].setncattr(key,param[key])
# create dimensions
ncu['xdim'] = ncu['file'].createDimension('x',param['nx']-1)
ncu['ydim'] = ncu['file'].createDimension('y',param['ny'])
#ncu['tdim'] = ncu['file'].createDimension('t',param['output_tlen'])
ncu['tdim'] = ncu['file'].createDimension('t',None)
ncv['xdim'] = ncv['file'].createDimension('x',param['nx'])
ncv['ydim'] = ncv['file'].createDimension('y',param['ny']-1)
ncv['tdim'] = ncv['file'].createDimension('t',None)
nceta['xdim'] = nceta['file'].createDimension('x',param['nx'])
nceta['ydim'] = nceta['file'].createDimension('y',param['ny'])
nceta['tdim'] = nceta['file'].createDimension('t',None)
# create variables
p = 'f4' # 32-bit precision storing, or f8 for 64bit
for ncfile,var in zip([ncu,ncv,nceta],['u','v','eta']):
# store time as integers as measured in seconds and gets large
ncfile['t'] = ncfile['file'].createVariable('t','i8',('t',),zlib=True,fletcher32=True)
ncfile['x'] = ncfile['file'].createVariable('x','f8',('x',),zlib=True,fletcher32=True)
ncfile['y'] = ncfile['file'].createVariable('y','f8',('y',),zlib=True,fletcher32=True)
ncfile[var] = ncfile['file'].createVariable(var,p,('t','y','x'),zlib=True,fletcher32=True)
# write units
for ncfile in [ncu,ncv,nceta]:
ncfile['t'].units = 's'
ncfile['t'].long_name = 'time'
ncfile['x'].units = 'm'
ncfile['x'].long_name = 'x'
ncfile['y'].units = 'm'
ncfile['y'].long_name = 'y'
ncu['u'].units = 'm/s'
ncv['v'].units = 'm/s'
nceta['eta'].units = 'm'
# write dimensions
for ncfile,var in zip([ncu,ncv,nceta],['u','v','T']):
ncfile['x'][:] = param['x_'+var]
ncfile['y'][:] = param['y_'+var]
# make globally available
global ncfiles
ncfiles = [ncu,ncv,nceta]
output_txt('Output will be stored in '+param['outputpath']+param['runfolder']+' every %i hours.' % (param['output_dt']/3600.))
def output_nc(u,v,eta,t):
""" Writes u,v,eta fields on every nth time step """
# output index j
j = param['output_j'] # for convenience
for ncfile in ncfiles:
ncfile['t'][j] = t
#TODO issue, use unlimited time dimension or not?
ncfiles[0]['u'][j,:,:] = u2mat(u)
ncfiles[1]['v'][j,:,:] = v2mat(v)
ncfiles[2]['eta'][j,:,:] = h2mat(eta)
param['output_j'] += 1
def output_nc_fin():
""" Finalise the output netCDF4 file."""
for ncfile in ncfiles:
ncfile['file'].close()
output_txt('All output written in '+param['runfolder']+'.')
## STORE INFO in TXT FILE
def readable_secs(secs):
""" Returns a human readable string representing seconds in terms of days, hours, minutes, seconds. """
days = np.floor(secs/3600/24)
hours = np.floor((secs/3600) % 24)
minutes = np.floor((secs/60) % 60)
seconds = np.floor(secs%3600%60)
if days > 0:
return ("%id, %ih" % (days,hours))
elif hours > 0:
return ("%ih, %imin" % (hours,minutes))
elif minutes > 0:
return ("%imin, %is" % (minutes,seconds))
else:
return ("%.2fs" % secs)
def duration_est(tic):
""" Saves an estimate for the total time the model integration will take in the output txt file. """
time_togo = (tictoc.time()-tic) / (i+1) * param['Nt']
str1 = 'Model integration will take approximately '+readable_secs(time_togo)+', '
print(str1)
if param['output']:
str2 = 'and is hopefully done on '+tictoc.asctime(tictoc.localtime(tic + time_togo))
output_txt(str1+str2)
print(str2)
def output_txt_ini():
""" Initialise the output txt file for information about the run."""
if param['output']:
param['output_txtfile'] = open(param['output_runpath']+'/info.txt','w')
s = ('Shallow water model run %i initialised on ' % param['run_id'])+tictoc.asctime()+'\n'
param['output_txtfile'].write(s)
def output_scripts():
"""Save all model scripts into a zip file."""
if param['output']:
zf = zipfile.ZipFile(param['output_runpath']+'/scripts.zip','w')
all_scripts = glob.glob('swm_*.py')
[zf.write(script) for script in all_scripts]
zf.close()
output_txt('All model scripts stored in a zipped file.')
def output_txt(s,end='\n'):
""" Write into the output txt file."""
if param['output']:
param['output_txtfile'].write(s+end)
param['output_txtfile'].flush()
def output_txt_fin():
""" Finalise the output txt file."""
if param['output']:
param['output_txtfile'].close()
## STORE PARAMETERS
def output_param():
""" Stores the param dictionary in a .npy file """
if param['output']:
# filter out 'output_txtfile' as this is a unsaveable textwrapper
dict_tmp = {key:param[key] for key in param.keys() if key != 'output_txtfile'}
np.save(param['output_runpath']+'/param.npy',dict_tmp)
# store also as a more readable .txt file for quick access on the parameters
param_txtfile = open(param['output_runpath']+'/param.txt','w')
for key in dict_tmp.keys():
if not(key in ['x_T','y_T','x_u','y_u','x_v','y_v','x_q','y_q']):
param_txtfile.write(key + 2*'\t' + str(dict_tmp[key]) + '\n')
param_txtfile.close()
output_txt('Param dictionary stored as txt and zip.\n')
|
milankl/swm
|
swm_output.py
|
Python
|
gpl-3.0
| 6,697
|
[
"NetCDF"
] |
acd3abb4c52f91410597aa2135fd90af937a5b742f91bd65a86b8fc7d7307099
|
from __future__ import print_function, division
from sympy.core import S, sympify, Dummy
from sympy.core.function import Function, ArgumentIndexError
from sympy.core.logic import fuzzy_and
from sympy.core.numbers import Integer
from sympy.ntheory import sieve
from math import sqrt as _sqrt
from sympy.core.compatibility import reduce, range
from sympy.core.cache import cacheit
class CombinatorialFunction(Function):
"""Base class for combinatorial functions. """
def _eval_simplify(self, ratio, measure):
from sympy.simplify.simplify import combsimp
expr = combsimp(self)
if measure(expr) <= ratio*measure(self):
return expr
return self
###############################################################################
######################## FACTORIAL and MULTI-FACTORIAL ########################
###############################################################################
class factorial(CombinatorialFunction):
"""Implementation of factorial function over nonnegative integers.
By convention (consistent with the gamma function and the binomial
coefficients), factorial of a negative integer is complex infinity.
The factorial is very important in combinatorics where it gives
the number of ways in which `n` objects can be permuted. It also
arises in calculus, probability, number theory, etc.
There is strict relation of factorial with gamma function. In
fact n! = gamma(n+1) for nonnegative integers. Rewrite of this
kind is very useful in case of combinatorial simplification.
Computation of the factorial is done using two algorithms. For
small arguments naive product is evaluated. However for bigger
input algorithm Prime-Swing is used. It is the fastest algorithm
known and computes n! via prime factorization of special class
of numbers, called here the 'Swing Numbers'.
Examples
========
>>> from sympy import Symbol, factorial, S
>>> n = Symbol('n', integer=True)
>>> factorial(0)
1
>>> factorial(7)
5040
>>> factorial(-2)
zoo
>>> factorial(n)
factorial(n)
>>> factorial(2*n)
factorial(2*n)
>>> factorial(S(1)/2)
factorial(1/2)
See Also
========
factorial2, RisingFactorial, FallingFactorial
"""
def fdiff(self, argindex=1):
from sympy import gamma, polygamma
if argindex == 1:
return gamma(self.args[0] + 1)*polygamma(0, self.args[0] + 1)
else:
raise ArgumentIndexError(self, argindex)
_small_swing = [
1, 1, 1, 3, 3, 15, 5, 35, 35, 315, 63, 693, 231, 3003, 429, 6435, 6435, 109395,
12155, 230945, 46189, 969969, 88179, 2028117, 676039, 16900975, 1300075,
35102025, 5014575, 145422675, 9694845, 300540195, 300540195
]
@classmethod
def _swing(cls, n):
if n < 33:
return cls._small_swing[n]
else:
N, primes = int(_sqrt(n)), []
for prime in sieve.primerange(3, N + 1):
p, q = 1, n
while True:
q //= prime
if q > 0:
if q & 1 == 1:
p *= prime
else:
break
if p > 1:
primes.append(p)
for prime in sieve.primerange(N + 1, n//3 + 1):
if (n // prime) & 1 == 1:
primes.append(prime)
L_product = R_product = 1
for prime in sieve.primerange(n//2 + 1, n + 1):
L_product *= prime
for prime in primes:
R_product *= prime
return L_product*R_product
@classmethod
def _recursive(cls, n):
if n < 2:
return 1
else:
return (cls._recursive(n//2)**2)*cls._swing(n)
@classmethod
def eval(cls, n):
n = sympify(n)
if n.is_Number:
if n is S.Zero:
return S.One
elif n is S.Infinity:
return S.Infinity
elif n.is_Integer:
if n.is_negative:
return S.ComplexInfinity
else:
n, result = n.p, 1
if n < 20:
for i in range(2, n + 1):
result *= i
else:
N, bits = n, 0
while N != 0:
if N & 1 == 1:
bits += 1
N = N >> 1
result = cls._recursive(n)*2**(n - bits)
return Integer(result)
def _eval_rewrite_as_gamma(self, n):
from sympy import gamma
return gamma(n + 1)
def _eval_rewrite_as_Product(self, n):
from sympy import Product
if n.is_nonnegative and n.is_integer:
i = Dummy('i', integer=True)
return Product(i, (i, 1, n))
def _eval_is_integer(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_positive(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_composite(self):
x = self.args[0]
if x.is_integer:
return (x - 3).is_nonnegative
def _eval_is_real(self):
x = self.args[0]
if x.is_nonnegative or x.is_noninteger:
return True
class MultiFactorial(CombinatorialFunction):
pass
class subfactorial(CombinatorialFunction):
"""The subfactorial counts the derangements of n items and is
defined for non-negative integers as::
,
| 1 for n = 0
!n = { 0 for n = 1
| (n - 1)*(!(n - 1) + !(n - 2)) for n > 1
`
It can also be written as int(round(n!/exp(1))) but the recursive
definition with caching is implemented for this function.
This function is generalized to noninteger arguments [2]_ as
.. math:: !x = \Gamma(x + 1, -1)/e
References
==========
.. [1] http://en.wikipedia.org/wiki/Subfactorial
.. [2] http://mathworld.wolfram.com/Subfactorial.html
Examples
========
>>> from sympy import subfactorial
>>> from sympy.abc import n
>>> subfactorial(n + 1)
subfactorial(n + 1)
>>> subfactorial(5)
44
See Also
========
sympy.functions.combinatorial.factorials.factorial,
sympy.utilities.iterables.generate_derangements,
sympy.functions.special.gamma_functions.uppergamma
"""
@classmethod
@cacheit
def _eval(self, n):
if not n:
return S.One
elif n == 1:
return S.Zero
return (n - 1)*(self._eval(n - 1) + self._eval(n - 2))
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg.is_Integer and arg.is_nonnegative:
return cls._eval(arg)
elif arg is S.Infinity:
return arg
def _eval_is_even(self):
if self.args[0].is_odd and self.args[0].is_nonnegative:
return True
def _eval_is_integer(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_rewrite_as_uppergamma(self, arg):
from sympy import uppergamma
return uppergamma(arg + 1, -1)/S.Exp1
def _eval_is_nonnegative(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_odd(self):
if self.args[0].is_even and self.args[0].is_nonnegative:
return True
class factorial2(CombinatorialFunction):
"""The double factorial n!!, not to be confused with (n!)!
The double factorial is defined for nonnegative integers and for odd
negative integers as::
,
| n*(n - 2)*(n - 4)* ... * 1 for n positive odd
n!! = { n*(n - 2)*(n - 4)* ... * 2 for n positive even
| 1 for n = 0
| (n+2)!! / (n+2) for n negative odd
`
References
==========
.. [1] https://en.wikipedia.org/wiki/Double_factorial
Examples
========
>>> from sympy import factorial2, var
>>> var('n')
n
>>> factorial2(n + 1)
factorial2(n + 1)
>>> factorial2(5)
15
>>> factorial2(-1)
1
>>> factorial2(-5)
1/3
See Also
========
factorial, RisingFactorial, FallingFactorial
"""
@classmethod
def eval(cls, arg):
# TODO: extend this to complex numbers?
if arg.is_Number:
if arg.is_infinite:
return
# This implementation is faster than the recursive one
# It also avoids "maximum recursion depth exceeded" runtime error
if arg.is_nonnegative:
if arg.is_even:
k = arg / 2
return 2 ** k * factorial(k)
return factorial(arg) / factorial2(arg - 1)
if arg.is_even:
raise ValueError("argument must be nonnegative or odd")
return arg * (S.NegativeOne) ** ((1 - arg) / 2) / factorial2(-arg)
def _eval_is_even(self):
# Double factorial is even for every positive even input
n = self.args[0]
if n.is_integer:
if n.is_odd:
return False
if n.is_even:
if n.is_positive:
return True
if n.is_zero:
return False
def _eval_is_integer(self):
# Double factorial is an integer for every nonnegative input, and for
# -1 and -3
n = self.args[0]
if n.is_integer:
if (n + 1).is_nonnegative:
return True
if n.is_odd:
return (n + 3).is_nonnegative
def _eval_is_odd(self):
# Double factorial is odd for every odd input not smaller than -3, and
# for 0
n = self.args[0]
if n.is_odd:
return (n + 3).is_nonnegative
if n.is_even:
if n.is_positive:
return False
if n.is_zero:
return True
def _eval_is_positive(self):
# Double factorial is positive for every nonnegative input, and for
# every odd negative input which is of the form -1-4k for an
# nonnegative integer k
n = self.args[0]
if n.is_integer:
if (n + 1).is_nonnegative:
return True
if n.is_odd:
return ((n + 1) / 2).is_even
###############################################################################
######################## RISING and FALLING FACTORIALS ########################
###############################################################################
class RisingFactorial(CombinatorialFunction):
"""Rising factorial (also called Pochhammer symbol) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by:
rf(x, k) = x * (x+1) * ... * (x + k-1)
where 'x' can be arbitrary expression and 'k' is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit http://mathworld.wolfram.com/RisingFactorial.html page.
Examples
========
>>> from sympy import rf
>>> from sympy.abc import x
>>> rf(x, 0)
1
>>> rf(1, 5)
120
>>> rf(x, 5) == x*(1 + x)*(2 + x)*(3 + x)*(4 + x)
True
See Also
========
factorial, factorial2, FallingFactorial
"""
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN:
return S.NaN
elif x is S.One:
return factorial(k)
elif k.is_Integer:
if k is S.NaN:
return S.NaN
elif k is S.Zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
return reduce(lambda r, i: r*(x + i), range(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
return 1/reduce(lambda r, i: r*(x - i), range(1, abs(int(k)) + 1), 1)
def _eval_rewrite_as_gamma(self, x, k):
from sympy import gamma
return gamma(x + k) / gamma(x)
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer,
self.args[1].is_nonnegative))
def _sage_(self):
import sage.all as sage
return sage.rising_factorial(self.args[0]._sage_(), self.args[1]._sage_())
class FallingFactorial(CombinatorialFunction):
"""Falling factorial (related to rising factorial) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by
ff(x, k) = x * (x-1) * ... * (x - k+1)
where 'x' can be arbitrary expression and 'k' is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit http://mathworld.wolfram.com/FallingFactorial.html page.
>>> from sympy import ff
>>> from sympy.abc import x
>>> ff(x, 0)
1
>>> ff(5, 5)
120
>>> ff(x, 5) == x*(x-1)*(x-2)*(x-3)*(x-4)
True
See Also
========
factorial, factorial2, RisingFactorial
"""
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN:
return S.NaN
elif k.is_Integer:
if k is S.NaN:
return S.NaN
elif k is S.Zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
return reduce(lambda r, i: r*(x - i), range(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
return 1/reduce(lambda r, i: r*(x + i), range(1, abs(int(k)) + 1), 1)
def _eval_rewrite_as_gamma(self, x, k):
from sympy import gamma
return (-1)**k * gamma(-x + k) / gamma(-x)
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer,
self.args[1].is_nonnegative))
def _sage_(self):
import sage.all as sage
return sage.falling_factorial(self.args[0]._sage_(), self.args[1]._sage_())
rf = RisingFactorial
ff = FallingFactorial
###############################################################################
########################### BINOMIAL COEFFICIENTS #############################
###############################################################################
class binomial(CombinatorialFunction):
"""Implementation of the binomial coefficient. It can be defined
in two ways depending on its desired interpretation:
C(n,k) = n!/(k!(n-k)!) or C(n, k) = ff(n, k)/k!
First, in a strict combinatorial sense it defines the
number of ways we can choose 'k' elements from a set of
'n' elements. In this case both arguments are nonnegative
integers and binomial is computed using an efficient
algorithm based on prime factorization.
The other definition is generalization for arbitrary 'n',
however 'k' must also be nonnegative. This case is very
useful when evaluating summations.
For the sake of convenience for negative 'k' this function
will return zero no matter what valued is the other argument.
To expand the binomial when n is a symbol, use either
expand_func() or expand(func=True). The former will keep the
polynomial in factored form while the latter will expand the
polynomial itself. See examples for details.
Examples
========
>>> from sympy import Symbol, Rational, binomial, expand_func
>>> n = Symbol('n', integer=True, positive=True)
>>> binomial(15, 8)
6435
>>> binomial(n, -1)
0
Rows of Pascal's triangle can be generated with the binomial function:
>>> for N in range(8):
... print([ binomial(N, i) for i in range(N + 1)])
...
[1]
[1, 1]
[1, 2, 1]
[1, 3, 3, 1]
[1, 4, 6, 4, 1]
[1, 5, 10, 10, 5, 1]
[1, 6, 15, 20, 15, 6, 1]
[1, 7, 21, 35, 35, 21, 7, 1]
As can a given diagonal, e.g. the 4th diagonal:
>>> N = -4
>>> [ binomial(N, i) for i in range(1 - N)]
[1, -4, 10, -20, 35]
>>> binomial(Rational(5, 4), 3)
-5/128
>>> binomial(Rational(-5, 4), 3)
-195/128
>>> binomial(n, 3)
binomial(n, 3)
>>> binomial(n, 3).expand(func=True)
n**3/6 - n**2/2 + n/3
>>> expand_func(binomial(n, 3))
n*(n - 2)*(n - 1)/6
"""
def fdiff(self, argindex=1):
from sympy import polygamma
if argindex == 1:
# http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/01/
n, k = self.args
return binomial(n, k)*(polygamma(0, n + 1) - \
polygamma(0, n - k + 1))
elif argindex == 2:
# http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/02/
n, k = self.args
return binomial(n, k)*(polygamma(0, n - k + 1) - \
polygamma(0, k + 1))
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def _eval(self, n, k):
# n.is_Number and k.is_Integer and k != 1 and n != k
if k.is_Integer:
if n.is_Integer and n >= 0:
n, k = int(n), int(k)
if k > n:
return S.Zero
elif k > n // 2:
k = n - k
M, result = int(_sqrt(n)), 1
for prime in sieve.primerange(2, n + 1):
if prime > n - k:
result *= prime
elif prime > n // 2:
continue
elif prime > M:
if n % prime < k % prime:
result *= prime
else:
N, K = n, k
exp = a = 0
while N > 0:
a = int((N % prime) < (K % prime + a))
N, K = N // prime, K // prime
exp = a + exp
if exp > 0:
result *= prime**exp
return Integer(result)
else:
d = result = n - k + 1
for i in range(2, k + 1):
d += 1
result *= d
result /= i
return result
@classmethod
def eval(cls, n, k):
n, k = map(sympify, (n, k))
d = n - k
if d.is_zero:
return S.One
elif d.is_zero is False:
if (k - 1).is_zero:
return n
elif k.is_negative:
return S.Zero
elif k.is_zero:
return S.One
elif n.is_integer and n.is_nonnegative and d.is_negative:
return S.Zero
if k.is_Integer and k > 0 and n.is_Number:
return cls._eval(n, k)
def _eval_expand_func(self, **hints):
"""
Function to expand binomial(n,k) when m is positive integer
Also,
n is self.args[0] and k is self.args[1] while using binomial(n, k)
"""
n = self.args[0]
if n.is_Number:
return binomial(*self.args)
k = self.args[1]
if k.is_Add and n in k.args:
k = n - k
if k.is_Integer:
if k == S.Zero:
return S.One
elif k < 0:
return S.Zero
else:
n = self.args[0]
result = n - k + 1
for i in range(2, k + 1):
result *= n - k + i
result /= i
return result
else:
return binomial(*self.args)
def _eval_rewrite_as_factorial(self, n, k):
return factorial(n)/(factorial(k)*factorial(n - k))
def _eval_rewrite_as_gamma(self, n, k):
from sympy import gamma
return gamma(n + 1)/(gamma(k + 1)*gamma(n - k + 1))
def _eval_is_integer(self):
return self.args[0].is_integer and self.args[1].is_integer
|
vipulroxx/sympy
|
sympy/functions/combinatorial/factorials.py
|
Python
|
bsd-3-clause
| 21,778
|
[
"VisIt"
] |
d1cfbdd05f2cd65dd7a39a8e2bda902c2e8257bdd831d375b1d8f243c727f250
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import espressomd.lb
import espressomd.lbboundaries
import espressomd.shapes
import unittest as ut
import numpy as np
@ut.skipIf(not espressomd.has_features(["VIRTUAL_SITES"]),
"Features not available, skipping test.")
class LBBoundaryThermoVirtualTest(ut.TestCase):
"""Test slip velocity of boundaries.
In this simple test add wall with a slip verlocity is
added and checkeckt if the fluid obtains the same velocity.
"""
system = espressomd.System(box_l=[10.0, 10.0, 10.0])
system.time_step = 1.0
system.cell_system.skin = 0.1
def tearDown(self):
self.system.part.clear()
for a in self.system.actors:
self.system.actors.remove(a)
def check_virtual(self, fluid_class):
s = self.system
lb_fluid = fluid_class(
agrid=1.0, dens=1.0, visc=1.0, fric=1.0, tau=1.0)
s.actors.add(lb_fluid)
virtual = s.part.add(pos=[0, 0, 0], virtual=True, v=[1, 0, 0])
physical = s.part.add(pos=[0, 0, 0], virtual=False, v=[1, 0, 0])
s.thermostat.set_lb(kT=0, act_on_virtual=False)
s.integrator.run(1)
np.testing.assert_almost_equal(np.copy(virtual.f), [0, 0, 0])
np.testing.assert_almost_equal(np.copy(physical.f), [-1, 0, 0])
s.thermostat.set_lb(kT=0, act_on_virtual=True)
virtual.v = [1, 0, 0]
physical.v = [1, 0, 0]
s.actors.remove(lb_fluid)
lb_fluid = fluid_class(
agrid=1.0, dens=1.0, visc=1.0, fric=1.0, tau=1.0)
s.actors.add(lb_fluid)
virtual.pos = physical.pos
virtual.v = 1, 0, 0
physical.v = 1, 0, 0
s.integrator.run(1)
# The forces are not exactly -1. because the fluid is not at
# rest anymore because of the previous check.
np.testing.assert_almost_equal(np.copy(physical.f), np.copy(virtual.f))
np.testing.assert_almost_equal(np.copy(physical.f), [-1, 0, 0])
np.testing.assert_almost_equal(np.copy(virtual.f), [-1, 0, 0])
@ut.skipIf(not espressomd.has_features(["LB"]),
"Features not available, skipping test.")
def test_lb_cpu(self):
self.check_virtual(espressomd.lb.LBFluid)
@ut.skipIf(not espressomd.has_features(["LB_GPU"]),
"Features not available, skipping test.")
def test_lb_gpu(self):
self.check_virtual(espressomd.lb.LBFluidGPU)
if __name__ == "__main__":
ut.main()
|
hmenke/espresso
|
testsuite/python/lb_thermo_virtual.py
|
Python
|
gpl-3.0
| 3,198
|
[
"ESPResSo"
] |
9a9922943bbdccdae0490c0ff8ae0032e7471e23932d5662a680e75d4d6dd259
|
#
# Copyright 2015 James Kermode (Warwick U.)
# 2015 Till Junge (EPFL)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import multiprocessing
import multiprocessing.managers
try:
import argparse
except ImportError:
from matscipy.logger import screen
screen.pr('argparse module not availability, some functionality disabled')
import abc
import datetime
import sys
class BaseResultManager(object):
"""
Baseclass for job distribution servers. User needs to implement the method process
"""
__metaclass__ = abc.ABCMeta
def __init__(self, port, key):
"""
Keyword Arguments:
port -- listening port
key -- auth_key
verbose -- (default False) if set, outputs debugging messages
"""
self.port = port
self.key = key
self.job_queue = None
self.result_queue = None
self.todo_counter = None
self.work_done_flag = None
self.manager = None
self.done = False
self.create_manager()
def create_manager(self):
"""
creates a multiprocessing.SyncManager
"""
self.job_queue = multiprocessing.JoinableQueue()
self.result_queue = multiprocessing.JoinableQueue()
# the -1 is for 'uninitialized
self.todo_counter = multiprocessing.Manager().Value('i', -1)
self.work_done_flag = multiprocessing.Manager().Event()
self.work_done_flag.clear()
# This is based on the examples in the official docs of multiprocessing.
# get_{job|result}_q return synchronized proxies for the actual Queue
# objects.
class JobQueueManager(multiprocessing.managers.SyncManager):
pass
JobQueueManager.register('get_job_queue',
callable=lambda: self.job_queue)
JobQueueManager.register('get_result_queue',
callable=lambda: self.result_queue)
JobQueueManager.register('get_todo_counter',
callable=lambda: self.todo_counter,
proxytype= multiprocessing.managers.ValueProxy)
JobQueueManager.register('get_work_done_event',
callable=lambda: self.work_done_flag,
proxytype= multiprocessing.managers.EventProxy)
self.manager = JobQueueManager(address=('', self.port), authkey=self.key)
self.manager.start()
def set_todo_counter(self, counter):
self.todo_counter.set(counter)
self.done = (counter == 0)
def get_todo_counter(self):
return self.todo_counter.get()
def decrement_todo_counter(self):
new_counter = self.todo_counter.get() - 1
self.done = (new_counter == 0)
self.todo_counter.set(self.todo_counter.get() - 1)
@classmethod
def get_arg_parser(cls, parser=None):
"""
create or extend a argparser to read command line arguments required by
the server.
Keyword Arguments:
parser -- optional: if provided, parser is extended to include port and
authentication key
"""
if parser is None:
parser = argparse.ArgumentParser()
parser.add_argument('--port', type=int, default=9995,
help='server listening port')
parser.add_argument('--auth-token', type=str, default='auth_token',
help=('shared information used to authenticate the '
'client to the server'))
return parser
def run(self):
"""
this is the actual serving method. it fills the jobqueue and processes
incoming results
"""
print("Start serving jobs and processing results")
while not self.done:
self.schedule_available_jobs()
self.receive_results()
print()
print("Signalling end of work to worker processes")
self.work_done_flag.set()
print("Waiting for stragglers to hand in results")
self.result_queue.join()
print("Wrapping this up")
self.manager.shutdown()
@abc.abstractmethod
def schedule_available_jobs(self):
"""
to be implemented by inheriting classes. should push available jobs
into the job queue
"""
raise NotImplementedError()
def receive_results(self):
"""
proposed standard result receiver, can be overloaded by inheriting
classes
"""
try:
result = self.result_queue.get()
if result:
value, job_id = result
self.process(value, job_id)
finally:
self.result_queue.task_done()
@abc.abstractmethod
def process(self, value, job_id):
"""
to be implemented by inheriting classes. should push available jobs
into the job queue
"""
raise NotImplementedError()
class BaseWorker(multiprocessing.Process):
"""
Baseclass for distributed calculation worker threads
"""
__metaclass__ = abc.ABCMeta
def __init__(self, server_address, port, key, verbose=False, walltime=None):
"""
Keyword Arguments:
server_address -- ip or fully qualified hostname
port -- listening port
key -- auth_key
verbose -- (default False) if set, outputs debugging messages
walltime -- (default None) if set, worker commits suicide after
walltime hours
"""
super(BaseWorker, self).__init__()
self.server_address = server_address
self.port = port
self.key = key
self.job_queue = None
self.result_queue = None
self.todo_counter = None
self.work_done_flag = None
self.manager = None
self.create_manager()
self.verbose = verbose
self.commit_suicide = walltime is not None
self.time_of_death = None
if self.commit_suicide:
self.time_of_death = (datetime.datetime.now() +
datetime.timedelta(hours = walltime))
def create_manager(self):
"""
creates a multiprocessing.SyncManager
"""
self.job_queue = multiprocessing.JoinableQueue()
self.result_queue = multiprocessing.JoinableQueue()
# the -1 is for 'uninitialized
self.todo_counter = multiprocessing.Manager().Value('i', -1)
self.work_done_flag = multiprocessing.Manager().Event()
self.work_done_flag.clear()
# This is based on the examples in the official docs of multiprocessing.
# get_{job|result}_q return synchronized proxies for the actual Queue
# objects.
class ServerQueueManager(multiprocessing.managers.SyncManager):
pass
ServerQueueManager.register('get_job_queue')
ServerQueueManager.register('get_result_queue')
ServerQueueManager.register('get_todo_counter')
ServerQueueManager.register('get_work_done_event')
self.manager = ServerQueueManager(
address=(self.server_address, self.port),
authkey=self.key)
self.manager.connect()
self.job_queue = self.manager.get_job_queue()
self.result_queue = self.manager.get_result_queue()
self.todo_counter = self.manager.get_todo_counter()
self.work_done_flag = self.manager.get_work_done_event()
return self.manager
@classmethod
def get_arg_parser(cls, parser=None):
"""
create or extend a argparser to read command line arguments required by
the cliend.
Keyword Arguments:
parser -- optional: if provided, parser is extended to include port and
authentication key
"""
if parser is None:
parser = argparse.ArgumentParser()
parser.add_argument('--server_address', metavar='INET_ADDR', type=str,
default='',
help=('job server ip address or fully qualified '
'hostname'))
parser.add_argument('--port', type=int, default=9995,
help='server listening port')
parser.add_argument('--auth-token', type=str, default='auth_token',
help=('shared information used to authenticate the '
'client to the server'))
return parser
def run(self):
"""
standard method that any multiprocessing.Process must implement
"""
if self.verbose:
print("Starting to run")
if self.commit_suicide:
def gotta_commit_suicide():
do_I = datetime.datetime.now() > self.time_of_death
if do_I:
print("Reached walltime, stopping accepting new jobs (zombie)")
return do_I
else:
def gotta_commit_suicide():
return False
while not self.work_done_flag.is_set() and not gotta_commit_suicide():
try:
if self.verbose:
print("trying to get a job")
job_description, job_id = self.job_queue.get()
if self.verbose:
print("got job {}".format(job_id))
try:
self.process(job_description, job_id)
except Exception as err:
print("ERROR:::: {}".format(err))
raise
finally:
try:
self.job_queue.task_done()
except EOFError:
pass
@abc.abstractmethod
def process(self, job_description, job_id):
raise NotImplementedError()
|
libAtoms/matscipy
|
matscipy/distributed_computation.py
|
Python
|
lgpl-2.1
| 10,625
|
[
"Matscipy"
] |
6949b1f22186c56f196bd7e635c74439568e94b863d439d801f4c7ab619fecb6
|
import vtk
import os
def ensight2vtk(file_path, out_dir, file_name,
vtu_out_1="wall_outfile_node.vtu",
vtu_out_2="inlet_outfile_node.vtu", vtu_out_3="interior_outfile_node.vtu",
wall=True, inlet=True, interior=True, interior_name="default_interior-1"):
print(wall, inlet, interior)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
reader = vtk.vtkEnSightGoldBinaryReader()
reader.SetFilePath(file_path)
reader.SetCaseFileName(file_name)
reader.Update()
# solution_writer = vtk.vtkXMLMultiBlockDataWriter()
# solution_writer.SetFileName(os.path.join(out_dir, vtu_out_3))
# solution_writer.SetInputData(reader.GetOutput())
# solution_writer.Write()
append = vtk.vtkAppendFilter()
append.MergePointsOn()
append2 = vtk.vtkAppendFilter()
append2.MergePointsOn()
append3 = vtk.vtkAppendFilter()
append3.MergePointsOn()
time_sets = reader.GetTimeSets()
time_array = time_sets.GetItem(0)
current_time = reader.GetTimeValue()
print(current_time)
if (wall):
writer = vtk.vtkXMLUnstructuredGridWriter()
writer.SetFileName(os.path.join(out_dir, vtu_out_1))
writer.SetNumberOfTimeSteps(int(time_array.GetNumberOfTuples()))
writer.SetInputConnection(append.GetOutputPort())
writer.Start()
if (inlet):
writer2 = vtk.vtkXMLUnstructuredGridWriter()
writer2.SetFileName(os.path.join(out_dir,vtu_out_2))
writer2.SetNumberOfTimeSteps(int(time_array.GetNumberOfTuples()))
writer2.SetInputConnection(append2.GetOutputPort())
writer2.Start()
if(interior):
writer3 = vtk.vtkXMLUnstructuredGridWriter()
writer3.SetFileName(os.path.join(out_dir,vtu_out_3))
writer3.SetNumberOfTimeSteps(int(time_array.GetNumberOfTuples()))
writer3.SetInputConnection(append3.GetOutputPort())
writer3.Start()
print("Number of Blocks: {0}".format(time_array.GetNumberOfTuples()))
for i in range(time_array.GetNumberOfTuples()):
next_time = time_array.GetTuple(i)[0]
print(next_time)
if( current_time == next_time):
print("first time")
pass
else:
# update the reader
reader.SetTimeValue(next_time)
current_time = next_time
reader.Update()
print("success")
#N = reader.GetNumberOfCellArrays()
N = reader.GetOutput().GetNumberOfBlocks()
for i in range(0, N):
name = reader.GetOutput().GetMetaData(i).Get(vtk.vtkCompositeDataSet.NAME())
if (wall):
if (name.split(':')[-1] == "wall"):
append.AddInputData(reader.GetOutput().GetBlock(i))
print("saving just the {0} in block {1}".format(name, i))
if(inlet):
if (name.split(':')[-1].split('_')[0] in ["inlet", "ica"]):
append2.AddInputData(reader.GetOutput().GetBlock(i))
print("saving just the {0} in block {1}".format(name, i))
if(interior):
if (name == interior_name):
append3.AddInputData(reader.GetOutput().GetBlock(i))
print("saving just the {0} in block {1}".format(name, i))
if(wall):
writer.WriteNextTime(current_time)
if(inlet):
writer2.WriteNextTime(current_time)
if(interior):
writer3.WriteNextTime(current_time)
if (current_time == reader.GetMaximumTimeValue()):
pass
else:
for i in range(0, N):
name = reader.GetOutput().GetMetaData(i).Get(vtk.vtkCompositeDataSet.NAME())
if (wall):
if (name.split(':')[-1] == "wall"):
append.RemoveInputData(reader.GetOutput().GetBlock(i))
#print("removing the {0} in block {1}".format(name, i))
if(inlet):
if (name.split(':')[-1].split('_')[0] in ["inlet", "ica"]):
append2.RemoveInputData(reader.GetOutput().GetBlock(i))
#print("removing the {0} in block {1}".format(name, i))
if(interior):
if (name == interior_name):
append3.RemoveInputData(reader.GetOutput().GetBlock(i))
#print("removing the {0} in block {1}".format(name, i))
if(wall):
writer.Stop()
if(inlet):
writer2.Stop()
if(interior):
writer3.Stop()
if ( __name__ == '__main__' ):
file_path = "/raid/home/ksansom/caseFiles/tcd/case1/fluent/ensight/"
out_dir = "/raid/home/ksansom/caseFiles/tcd/case1/fluent/vtk_out"
file_pattern = "case1-ensight.encas"
#ensight2vtk(file_path, out_dir, file_pattern, interior=False)
ensight2vtk(file_path, out_dir, file_pattern, vtu_out_3="interior_vol_outfile_node.vtu", wall=False, inlet=False, interior=True, interior_name="case1_vmtk_decimate2_fill_trim_ext2")
"""
reader = vtk.vtkEnSightGoldBinaryReader()
reader.SetFilePath("/raid/home/ksansom/caseFiles/mri/VWI_proj/case1/fluent_dsa2/ensight")
reader.SetCaseFileName("case1_dsa-5-6.0000.dat.encas")
reader.Update()
#N = reader.GetNumberOfCellArrays()
N = reader.GetNumberOfVariables()
append = vtk.vtkAppendFilter()
append.MergePointsOn()
for i in range(0, N):
append.AddInputData(reader.GetOutput().GetBlock(i))
append.Update()
umesh = vtk.vtkUnstructuredGrid()
umesh = append.GetOutput()
writer = vtk.vtkXMLUnstructuredGridWriter()
writer.SetFileName("test.vtu")
writer.SetInputData(umesh)
writer.Update()
"""
|
kayarre/Tools
|
vtk/ensight2vtk_single_encas_tcd.py
|
Python
|
bsd-2-clause
| 5,696
|
[
"VTK"
] |
718ead34d116984b032ab2a684786e254c67366c201cd595f0dc2f88ae03d814
|
# -*- coding: utf-8 -*-
#
# test-aeif_cond_alpha_RK5.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
## Test script for new implementation of AdEx numerics.
# Stefan BUCHER (web@stefan-bucher.ch), July 2013.
import nest
import numpy as np
import timeit
"""
This script compares the two aeif_cond_alpha flavors with respect to sped and precision.
Version 1 is the GSL based 'aeif_cond_alpha' model.
Version 2 is called 'aeif_cond_alpha_RK5' which uses an explicitly coded version
of the RK-45 method as described in Numerical Recepies, Chap. 17.2, Press et al (2007).
Reference is Version 1 at a temporal resolution of 0.001 ms.
The test comparest both versions at a resolution of 0.1 ms with the reference. Tho errors are computed:
1. the difference is spike times wrt reference
2. the L2 (root mean squared) error of the voltage response to a step current input.
"""
def run_model(model='aeif_cond_alpha', dt=0.1,reps=1):
nest.ResetKernel()
nest.sr("30 setverbosity")
nest.SetKernelStatus({"overwrite_files": True})
nest.SetStatus([0],[{"resolution": dt}])
nest.SetDefaults('aeif_cond_alpha_RK5',{'HMIN':0.001})
nest.SetDefaults('aeif_cond_alpha_RK5',{'MAXERR':1e-10})
neuron = nest.Create(model,2)
nest.SetStatus(neuron,[{"V_peak": 0.0, "a": 4.0, "b":80.5}])
dc=nest.Create("dc_generator")
nest.SetStatus(dc,[{"amplitude":700.0,
"start":700.0,
"stop":2700.0}])
nest.Connect(dc,[neuron[0]])
sd = nest.Create('spike_detector')
nest.Connect([neuron[0]],sd)
meter0 = nest.Create('multimeter', params={'record_from': ['V_m', 'g_ex','g_in','w'], 'interval' :0.1})
nest.Connect(meter0,[neuron[0]])
nest.SetStatus(meter0,[{"to_file": False, "withtime": True}])
t = timeit.Timer("nest.Simulate(3000)","import nest")
runtime = t.timeit(number=reps)/reps
sptimes = nest.GetStatus(sd,"events")[0]['times']
voltage_trace = nest.GetStatus(meter0,"events")[0]['V_m']
return (runtime,sptimes,voltage_trace)
# Running Simulations
reference = run_model(model='aeif_cond_alpha',dt=0.001,reps=50)
gsl = run_model(model='aeif_cond_alpha',dt=0.1,reps=50)
test = run_model(model='aeif_cond_alpha_RK5',dt=0.1,reps=50)
# Runtime Comparison
print 'Runtime GSL: ' + str(gsl[0])
print 'Test: ' + str(test[0])
print 'Ratio: ' + str(test[0]/gsl[0])
# Spike Time Difference
print 'Spike Times GSL - Reference: ' + str(np.array(gsl[1])-np.array(reference[1]))
print 'Spike Times Test - Reference: ' + str(np.array(test[1])-np.array(reference[1]))
# L2-Norm of Voltage Traces
print 'L2-Error (per s) GSL - Reference: ' + str(np.linalg.norm(gsl[2]-reference[2],ord=2)/3 )
print 'L2-Error (per s) Test - Reference: ' + str(np.linalg.norm(test[2]-reference[2],ord=2)/3 )
print 'L2-Error ratio: ' + str((np.linalg.norm(test[2]-reference[2],ord=2)/3) /(np.linalg.norm(gsl[2]-reference[2],ord=2)/3) )
|
synergetics/nest
|
pynest/examples/test-aeif_cond_alpha_RK5.py
|
Python
|
gpl-2.0
| 3,520
|
[
"NEURON"
] |
770a1c024ee963039591afa8c2e59133a744e5156883070e86f45f80f053e8cc
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v10.enums.types import (
targeting_dimension as gage_targeting_dimension,
)
__protobuf__ = proto.module(
package="google.ads.googleads.v10.common",
marshal="google.ads.googleads.v10",
manifest={
"TargetingSetting",
"TargetRestriction",
"TargetRestrictionOperation",
},
)
class TargetingSetting(proto.Message):
r"""Settings for the targeting-related features, at the campaign
and ad group levels. For more details about the targeting
setting, visit
https://support.google.com/google-ads/answer/7365594
Attributes:
target_restrictions (Sequence[google.ads.googleads.v10.common.types.TargetRestriction]):
The per-targeting-dimension setting to
restrict the reach of your campaign or ad group.
target_restriction_operations (Sequence[google.ads.googleads.v10.common.types.TargetRestrictionOperation]):
The list of operations changing the target
restrictions.
Adding a target restriction with a targeting
dimension that already exists causes the
existing target restriction to be replaced with
the new value.
"""
target_restrictions = proto.RepeatedField(
proto.MESSAGE, number=1, message="TargetRestriction",
)
target_restriction_operations = proto.RepeatedField(
proto.MESSAGE, number=2, message="TargetRestrictionOperation",
)
class TargetRestriction(proto.Message):
r"""The list of per-targeting-dimension targeting settings.
Attributes:
targeting_dimension (google.ads.googleads.v10.enums.types.TargetingDimensionEnum.TargetingDimension):
The targeting dimension that these settings
apply to.
bid_only (bool):
Indicates whether to restrict your ads to show only for the
criteria you have selected for this targeting_dimension, or
to target all values for this targeting_dimension and show
ads based on your targeting in other TargetingDimensions. A
value of ``true`` means that these criteria will only apply
bid modifiers, and not affect targeting. A value of
``false`` means that these criteria will restrict targeting
as well as applying bid modifiers.
This field is a member of `oneof`_ ``_bid_only``.
"""
targeting_dimension = proto.Field(
proto.ENUM,
number=1,
enum=gage_targeting_dimension.TargetingDimensionEnum.TargetingDimension,
)
bid_only = proto.Field(proto.BOOL, number=3, optional=True,)
class TargetRestrictionOperation(proto.Message):
r"""Operation to be performed on a target restriction list in a
mutate.
Attributes:
operator (google.ads.googleads.v10.common.types.TargetRestrictionOperation.Operator):
Type of list operation to perform.
value (google.ads.googleads.v10.common.types.TargetRestriction):
The target restriction being added to or
removed from the list.
"""
class Operator(proto.Enum):
r"""The operator."""
UNSPECIFIED = 0
UNKNOWN = 1
ADD = 2
REMOVE = 3
operator = proto.Field(proto.ENUM, number=1, enum=Operator,)
value = proto.Field(proto.MESSAGE, number=2, message="TargetRestriction",)
__all__ = tuple(sorted(__protobuf__.manifest))
|
googleads/google-ads-python
|
google/ads/googleads/v10/common/types/targeting_setting.py
|
Python
|
apache-2.0
| 4,071
|
[
"VisIt"
] |
aa9c3252088d5324d51f639304d410b3266a8ef000f216875053736810b004cc
|
# !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2018-06-21 17:01:09
# @Last modified by: José Sánchez-Gallego (gallegoj@uw.edu)
# @Last Modified time: 2018-10-17 00:22:19
from __future__ import absolute_import, division, print_function
import abc
import os
import time
import six
import marvin
import marvin.tools.plate
from marvin.core.exceptions import MarvinError
from marvin.utils.general import parseIdentifier
from astropy.io import fits
from functools import wraps
import sdss_access.path
import sdss_access.sync
__ALL__ = ['VACContainer', 'VACMixIn']
def check_for_vac(f):
''' Decorator to check for and download VAC '''
@wraps(f)
def decorated_function(inst, *args, **kwargs):
if 'path' in kwargs and kwargs['path']:
for kw in kwargs['path'].split('/'):
if len(kw) == 0:
continue
var, value = kw.split('=')
kwargs[var] = value
kwargs.pop('path')
return f(inst, *args, **kwargs)
return decorated_function
class VACContainer(object):
def __repr__(self):
return '<VACContainer ({0})>'.format(', '.join(map(repr, list(self))))
def __dir__(self):
props = []
for value in self.__class__.__dict__.keys():
if not value.startswith('_'):
props.append(value)
return props
def __getitem__(self, value):
return getattr(self, value)
def __iter__(self):
for value in self.__dir__():
yield value
class VACMixIn(object, six.with_metaclass(abc.ABCMeta)):
"""MixIn that allows VAC integration in Marvin.
This parent class provides common tools for downloading data using
sdss_access or directly from the sandbox. `~VACMixIn.get_vacs` returns a
container with properties pointing to all the VACs that subclass from
`.VACMixIn`. In general, VACs can be added to a class in the following way:
.. code-block:: python
from marvin.contrib.vacs.base import VACMixIn
class Maps(MarvinToolsClass):
def __init__(self, *args, **kwargs):
...
self.vacs = VACMixIn.get_vacs(self)
and then the VACs can be accessed as properties in ``my_map.vacs``.
"""
# Set this is True on your VAC to exclude it from Marvin
_hidden = False
_hidden_for = None
# The name and description of the VAC.
name = None
description = None
url = None
display_name = None
# custom data container for VAC data in summary file(s)
# used by tools.vacs.VACs
data_container = None
def __init__(self):
if not sdss_access.sync.Access:
raise MarvinError('sdss_access is not installed')
else:
self._release = marvin.config.release
# is_public = 'DR' in self._release
# rsync_release = self._release.lower() if is_public else None
self.rsync_access = sdss_access.sync.Access(release=self._release)
# file path for VAC summary file
self.summary_file = None
self.set_summary_file(marvin.config.release)
def __repr__(self):
return '<VAC (name={0}, description={1})>'.format(self.name, self.description)
@abc.abstractmethod
def get_target(self, parent_object):
"""Returns VAC data that matches the `parent_object` target.
This method must be overridden in each subclass of `VACMixIn`. Details
will depend on the exact implementation and the type of VAC, but in
general each version of this method must:
* Check whether the VAC file exists locally.
* If it does not, download it using `~VACMixIn.download_vac`.
* Open the file using the appropriate library.
* Retrieve the VAC data matching ``parent_object``. Usually one will
use attributes in ``parent_object`` such as ``.mangaid`` or
``.plateifu`` to perform the match.
* Return the VAC data in whatever format is appropriate.
"""
pass
@staticmethod
def get_vacs(parent_object):
"""Returns a container with all the VACs subclassing from `VACMixIn`.
Because this method loops over ``VACMixIn.__subclasses__()``, all the
class that inherit from `VACMixIn` and that must be included in the
container need to have been imported before calling
`~VACMixIn.get_vacs`.
Parameters
----------
parent_object : object
The object to which the VACs are being attached. It will be passed
to `~VACMixIn.get_target` when the subclass of `VACMixIn` is
called.
Returns
-------
vac_container : object
An instance of a class that contains just a list of properties, one
for to each on of the VACs that subclass from `VACMixIn`.
"""
vac_container = VACContainer()
for subvac in VACMixIn.__subclasses__():
# Excludes VACs from showing up in Plate
if issubclass(parent_object.__class__, marvin.tools.plate.Plate):
continue
# Only shows VACs if in the include list.
if (hasattr(subvac, 'include') and subvac.include is not None and
not issubclass(parent_object.__class__, subvac.include)):
continue
# check if VAC is hidden
if subvac._hidden and (not subvac._hidden_for or
(subvac._hidden_for == parent_object._release)):
continue
# We need to set sv=subvac in the lambda function to prevent
# a cell-var-from-loop issue.
if parent_object._release in subvac.version:
setattr(VACContainer, subvac.name,
property(lambda self, sv=subvac: sv().get_target(parent_object)))
return vac_container
def download_vac(self, name=None, path_params={}, verbose=True):
"""Download the VAC using rsync and returns the local path."""
if name is None:
name = self.name
assert name in self.rsync_access.templates, 'VAC path has not been set in the tree.'
if verbose:
marvin.log.info('downloading file for VAC {0!r}'.format(self.name))
self.rsync_access.remote()
self.rsync_access.add(name, **path_params)
self.rsync_access.set_stream()
self.rsync_access.commit()
paths = self.rsync_access.get_paths()
# adding a millisecond pause for download to finish and file existence to register
time.sleep(0.001)
return paths[0] # doing this for single files, may need to change
def get_path(self, name=None, path_params={}):
"""Returns the local VAC path or False if it does not exist."""
if name is None:
name = self.name
# return the full local path to the file
path = self.rsync_access.full(name, **path_params)
return path
# # check for and expand any wildcards present in the path_params
# if self.rsync_access.any(name, **path_params):
# files = self.rsync_access.expand(name, **path_params)
# return files[0]
# else:
# return False
def file_exists(self, path=None, name=None, path_params={}):
"""Check whether a file exists locally"""
# use the filepath if present
if path:
return os.path.exists(path)
# otherwise use name and path_params
if name is None:
name = self.name
if os.path.exists(self.get_path(name=name, path_params=path_params)):
return True
return False
def check_vac(self, summary_file):
''' Checks the summary file for existence '''
pass
@abc.abstractmethod
def set_summary_file(self, release):
""" Sets the VAC summary file
This method must be overridden in each subclass of `VACMixIn`. Details
will depend on the exact implementation and the type of VAC, but in
general each version of this method must:
* Access the version of your VAC matching the current ``release``
* Define a dictionary of keyword parameters that defines the `tree` path
* Use `~VACMixIn.get_path` to construct the VAC path
* Set that path to the `~VACMixIn.summary_file` attribute
Setting a VAC summary file allows the `~marvin.tools.vacs.VACs` tool to load
the full VAC data. If the VAC does not contain a summary file, this method
should `pass` or return `None`.
"""
pass
def update_path_params(self, params):
''' Update the path_params dictionary with additional parameters '''
assert isinstance(params, dict), 'input parameters must be a dictionary'
self.path_params.update(params)
def get_ancillary_file(self, name, path_params={}):
''' Get a path to an ancillary VAC file '''
path = self.get_path(name, path_params=path_params)
if not path:
path = self.download_vac(name, path_params=path_params)
return path
class VACTarget(object):
''' Customization Class to allow for returning complex target data
This parent class provides a framework for returning more complex data associated
with a given target observation, for example ancillary spectral or image data. In these
cases, returning a target row from the main VAC summary file, or a simple dictionary of values
may not be sufficient. This class can be subclassed and customized to return any
extra functionality or data.
When used, this class provides convenient access to the underlying VAC data as well
as a boolean to indicate if the given target is included in the VAC.
Parameters:
targetid (str):
The target id, usually plateifu or mangaid. Required.
vacfile (str):
The path to the VAC summary file. Required.
Attributes:
targetid (str):
The plateifu or mangaid target designation
data (row):
The extracted row VAC data for the provided targetid
_data (HDU):
the first data HDU of the summary VAC FITS file
_indata (bool):
A boolean indicating if the target is included in the VAC
To use, subclass this class, add a new `__init__` method. Make sure to call the original
class's `__init__` method with `super`.
.. code-block:: python
from marvin.contrib.vacs.base import VACTarget
class ExampleTarget(VACTarget):
def __init__(self, targetid, vacfile):
super(ExampleTarget, self).__init__(targetid, vacfile)
Further customization can now be done, e.g. adding new parameters in the initializtion of the
object, adding new methods or attributes, or overriding existing methods, e.g. to customize the
return `data` attribute.
To access a single HDU from the VAC, use the `_get_data()` method. If you need to access the entire file,
use the `_open_file()` method.
'''
def __init__(self, targetid, vacfile, **kwargs):
self.targetid = targetid
self._ttype = parseIdentifier(targetid)
assert self._ttype in ['plateifu', 'mangaid'], 'Input targetid must be a valid plateifu or mangaid'
self._vacfile = vacfile
self._data = self._get_data(self._vacfile)
self._indata = targetid in self._data[self._ttype]
def __repr__(self):
return 'Target({0})'.format(self.targetid)
@property
def data(self):
''' The data row from a VAC for a specific targetid '''
if not self._indata:
return "No data exists for {0}".format(self.targetid)
idx = self._data[self._ttype] == self.targetid
return self._data[idx]
@staticmethod
def _open_file(vacfile):
''' Opens the full FITS VAC file '''
return fits.open(vacfile)
def _get_data(self, vacfile=None, ext=1):
''' Get only the data from the VAC file from a given extension '''
if not vacfile:
vacfile = self._vacfile
return fits.getdata(vacfile, ext)
|
sdss/marvin
|
python/marvin/contrib/vacs/base.py
|
Python
|
bsd-3-clause
| 12,350
|
[
"Brian"
] |
fe91facfbaab8b2a042fdd2c2ea832ae8d71251f9cf9c921cf36e03edb92da69
|
"""
@name: Modules/House/Entertainment/pandora/pandora.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c)2014-2020 by D. Brian Kimmel
@note: Created on Feb 27, 2014
@license: MIT License
@summary: Controls pandora playback thru pianobar.
When PyHouse starts initially, Pandora is inactive.
When "pandora" button is pressed on a web page, pianobar is fired up as a process.
Further Mqtt messages control the pianobar process as needed, volume, next station etc.
When the stop button is pressed on a web page, pianobar is terminated and
this module goes back to its initial state ready for another session.
Now (2018) works with MQTT messages to control Pandora via PioanBar and PatioBar.
"""
__updated__ = '2020-02-17'
__version_info__ = (19, 10, 5)
__version__ = '.'.join(map(str, __version_info__))
# Import system type stuff
from twisted.internet import protocol
from _datetime import datetime # , time
from pathlib import Path
# Import PyMh files and modules.
from Modules.Core.Config.config_tools import Api as configApi
from Modules.Core.Utilities import extract_tools
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
from Modules.Core.Utilities.extract_tools import extract_quoted
from Modules.House.Entertainment.Pandora import \
PandoraPluginInformation, \
PandoraServiceInformation, \
PandoraDeviceConnectionInformation, \
PandoraServiceControlInformation, \
PandoraDeviceControl, \
PandoraServiceStatus, \
MOD_NAME
from Modules.Core import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.Pandora ')
PIANOBAR_LOCATION = '/usr/bin/pianobar'
class MqttActions:
""" Process messages to and from this module.
Output Control messages use Mqtt to send messages to control the amplifier type device attached to the raspberry pi computer.
Input Control messages come from a node red computer and are the listener (user) commands for their listening experience.
"""
m_api = None
m_transport = None
def __init__(self, p_pyhouse_obj):
self.m_pyhouse_obj = p_pyhouse_obj
def send_mqtt_status_msg(self, p_message):
l_topic = 'house/entertainment/pandora/status'
self.m_pyhouse_obj.Core.MqttApi.MqttPublish(l_topic, p_message)
def _send_control(self, p_family, p_message):
l_topic = 'house/entertainment/{}/control'.format(p_family)
LOG.debug('Sending control message to A/V Device\n\t{}\n\t{}'.format(l_topic, p_message))
self.m_pyhouse_obj.Core.MqttApi.MqttPublish(l_topic, p_message)
def _decode_status(self, _p_topic, _p_message):
l_logmsg = '\tPandora Status'
return l_logmsg
def _decode_control(self, p_topic, p_message):
""" Decode the Pandora Control message we just received.
Someone (web page via node-red) wants to control pandora in some manner.
ServiceName must match one of the Pandora services on this node.
==>
Topic: pyhouse/<house name>/house/entertainment/pandora/control
Msg:{ 'Time': '2019-05-07T22:19:19.536Z',
'Sender': 'pi-04-pp',
'Status': 'On'}
We may need to issue a message to control connected audio devices.
Zone: 0,1 ...
Power: On, Off
Input: Tv, Game
Volume: 0..100
As a side effect, we need to control Pandora ( PianoBar ) via the control socket
Like:
Dislike:
Skip:
"""
l_logmsg = '\tPandora Control'
l_zone = extract_tools.get_mqtt_field(p_message, 'Zone')
l_input = extract_tools.get_mqtt_field(p_message, 'Input')
l_power = extract_tools.get_mqtt_field(p_message, 'Power')
l_volume = extract_tools.get_mqtt_field(p_message, 'Volume')
l_like = extract_tools.get_mqtt_field(p_message, 'Like')
l_skip = extract_tools.get_mqtt_field(p_message, 'Skip')
if l_zone == None:
l_zone = 0
LOG.debug('{} {}'.format(p_topic, p_message))
# These directly control pianobar(pandora)
if l_power == 'On':
l_logmsg += ' Turn On '
PandoraControl(self.m_pyhouse_obj)._start_pandora(p_message)
A_V_Control(self.m_pyhouse_obj).change_av_device(l_zone, l_power, l_input, l_volume)
return l_logmsg
elif l_power == 'Off':
l_logmsg += ' Turn Off '
PandoraControl(self.m_pyhouse_obj)._halt_pandora(p_message)
A_V_Control(self.m_pyhouse_obj).change_av_device(l_zone, l_power, l_input, l_volume)
return l_logmsg
elif l_volume != None:
l_logmsg += ' Volume to: {}'.format(l_volume)
A_V_Control(self.m_pyhouse_obj).change_av_device(l_zone, l_power, l_input, l_volume)
return l_logmsg
elif l_like == 'LikeYes':
l_logmsg += ' Like '
l_like = 'Yes'
elif l_like == 'LikeNo':
l_logmsg += ' Dislike '
l_like = 'No'
elif l_skip == 'SkipYes':
l_logmsg += ' Skip '
l_skip = 'Yes'
else:
l_logmsg += ' Unknown Pandora Control Message {} {}'.format(p_topic, p_message)
return l_logmsg
def decode(self, p_msg):
""" Decode the Mqtt message
We currently handle only control messages for Pandora.
We are not interested in other module's status.
==> pyhouse/<house name>/entertainment/pandora/<Action>
where: <action> = control, status
@param p_topic: is the topic after ',,,/pandora/'
@return: the log message with information stuck in there.
"""
l_topic = p_msg.UnprocessedTopic
p_msg.UnprocessedTopic = p_msg.UnprocessedTopic[1:]
p_msg.LogMessage += ' Pandora '
LOG.debug('{} {}'.format(l_topic[0], p_msg.Payload))
if l_topic[0].lower() == 'control':
p_msg.LogMessage += '\tControl: {}\n'.format(self._decode_control(l_topic[0], p_msg.Payload))
elif l_topic[0].lower() == 'status':
p_msg.LogMessage += '\tStatus: {}\n'.format(self._decode_status(l_topic[0], p_msg.Payload))
else:
p_msg.LogMessage += '\tUnknown Pandora sub-topic {}'.format(PrettyFormatAny.form(p_msg.Payload, 'Entertainment msg', 160))
LOG.warning('Unknown Pandora Topic: {}'.format(l_topic[0]))
class ExtractPianobar:
"""
This handles the information coming back from pianobar concerning the playing song.
"""
m_pyhouse_obj = None
m_now_playing = None
def __init__(self, p_pyhouse_obj):
self.m_pyhouse_obj = p_pyhouse_obj
self.m_buffer = bytes()
self.m_now_playing = PandoraServiceStatus()
def _extract_like(self, p_line):
""" The like info comes back as a '<' in the now-playing info.
"""
l_ix = p_line.find(b'<')
if l_ix > 0:
l_like = p_line[l_ix + 1:l_ix + 2].decode('utf-8')
l_remain = p_line[:l_ix] + p_line[l_ix + 3:].strip()
else:
l_like = ''
l_remain = p_line.strip()
return l_like, l_remain
def _extract_station(self, p_line):
""" Extract the station information from the now-playing message.
"""
l_ix = p_line.find(b'@')
l_sta = p_line[l_ix + 1:].decode('utf-8').strip()
l_remain = p_line[:l_ix].strip()
return l_sta, l_remain
def _extract_nowplaying(self, p_obj, p_line):
"""
@param p_obj: is the status
@param p_line: is the line from pianobar
"""
p_line = p_line[2:]
try:
p_obj.From = self.m_pyhouse_obj.Computer.Name
p_obj.DateTimePlayed = '{:%H:%M:%S}'.format(datetime.now())
p_obj.Song, p_line = extract_quoted(p_line, b'\"')
p_obj.Artist, p_line = extract_quoted(p_line)
p_obj.Album, p_line = extract_quoted(p_line)
p_obj.Likability, p_line = self._extract_like(p_line)
p_obj.Station, p_line = self._extract_station(p_line)
p_obj.Status = 'Playing'
except:
pass
return p_obj
def _extract_playtime(self, p_obj, p_line):
"""
b'# -03:00/03:00\r'
b'# -02:29/03:21'
"""
p_line = p_line[1:]
l_line = p_line.strip()
l_ix = l_line.find(b'/')
try:
l_left = l_line[l_ix - 5:l_ix].decode('utf-8')
l_total = l_line[l_ix + 1:].decode('utf-8')
except:
l_left = '01:23'
l_left = '06:54'
p_obj.TimeLeft = l_left
p_obj.TimeTotal = l_total
return p_obj
def _extract_errors(self, p_playline):
"""
"""
pass
def extract_line(self, p_line):
"""
b'\x1b[2K|> Station "QuickMix" (1608513919875785623)\n\x1b[2K(i) Receiving new playlist...'
After breaking into lines and strippping off the esc sequence we have ...
b'|> Station "QuickMix" (1608513919875785623)\n\x1b[2K(i) Receiving new playlist...'
b'|> "Mississippi Blues" by "Tim Sparks" on "Sidewalk Blues" <3 @ Acoustic Blues Radio\n'
b'# -02:29/03:09\r'
b' "Carroll County Blues" by "Bryan Sutton" on "Not Too Far From The Tree" @ Bluegrass Radio'
b' "Love Is On The Way" by "Dave Koz" on "Greatest Hits" <3 @ Smooth Jazz Radio'
@param p_line: is an input line from pianobar.
"""
if len(p_line) < 5:
return None
# <ESC>[2K Ansi esc sequence needs stripped off first.
if p_line[0] == 0x1B:
p_line = p_line[4:]
if p_line.startswith(b'Welcome') or \
p_line.startswith(b'Press ? for') or \
p_line.startswith(b'Ok.') or \
p_line.startswith(b'(i)'):
LOG.info(p_line)
return None
if p_line[0] == b'q':
LOG.info('Quitting Pandora')
return 'Quit'
# We gather the play data here
# We do not send the message yet but will wait for the first time to arrive.
if p_line.startswith(b'|>'): # This is a new playing selection line.
self.m_now_playing = PandoraServiceStatus()
LOG.info("Playing: {}".format(p_line))
self.m_now_playing = self._extract_nowplaying(self.m_now_playing, p_line)
self.m_now_playing.Error = None
MqttActions(self.m_pyhouse_obj).send_mqtt_status_msg(self.m_now_playing)
return self.m_now_playing
# get the time and then send the message of now-playing
if p_line.startswith(b'#'):
self._extract_playtime(self.m_now_playing, p_line)
if self.m_now_playing.TimeTotal == self.m_now_playing.TimeLeft or \
self.m_now_playing.TimeLeft.endswith('00'):
LOG.info(p_line)
MqttActions(self.m_pyhouse_obj).send_mqtt_status_msg(self.m_now_playing)
return self.m_now_playing
if p_line.startswith(b'Network'): # A network error has occurred, restart
LOG.info(p_line)
PandoraControl(self.m_pyhouse_obj)._halt_pandora('Network Error')
PandoraControl(self.m_pyhouse_obj)._start_pandora('Restarting')
return 'Restarted'
LOG.debug("Data = {}".format(p_line))
return None
class PianobarProtocol(protocol.ProcessProtocol):
"""
OutReceived - Some data was received from stdout.
ErrReceived - Some data was received from stderr.
ProcessExited - This will be called when the subprocess exits.
ProcessEnded - Called when the child process exits and all file descriptors associated with it have been closed.
"""
m_pyhouse_obj = None
m_buffer = bytes()
m_extract = None
m_hold = None # Playing info
def __init__(self, p_pyhouse_obj):
self.m_pyhouse_obj = p_pyhouse_obj
self.m_buffer = bytes()
self.m_hold = PandoraServiceStatus() # Clear playing info
self.m_extract = ExtractPianobar(self.m_pyhouse_obj)
def _get_line(self, p_buffer):
""" Get a single line from the buffer.
Remove the first line from the buffer.
"""
p_buffer = p_buffer.lstrip()
l_ix = p_buffer.find(b'\r')
l_line = p_buffer[:l_ix]
p_buffer = p_buffer[l_ix:]
return p_buffer, l_line
def _process_buffer(self):
""" Process the entire buffer - perhaps several, in extract_line
"""
self.m_buffer = self.m_buffer.lstrip()
while self.m_buffer:
self.m_buffer, l_line = self._get_line(self.m_buffer)
l_ret = self.m_extract.extract_line(l_line)
if l_ret == 'Quit':
return
elif l_ret == None:
continue
else:
pass
continue
def connectionMade(self):
""" Write to stdin.
We do not have to do any initialization here.
When we connect, the data flow from pianobar begins,
"""
LOG.info("Connection to PianoBar Made.")
def outReceived(self, p_data):
"""Data received from stdout.
Note: Strings seem to begin with an ansi sequence <esc>[xxx
# The line is a timestamp - every second
(i) This is an information message - Login, new playlist, etc.
"""
self.m_buffer += p_data
self._process_buffer()
def errReceived(self, p_data):
""" Data received from StdErr.
"""
LOG.warning("StdErr received - {}".format(p_data))
def ProcessEnded(self, p_reason):
"""
"""
LOG.info("PianoBar closed. {}".format(p_reason))
class A_V_Control:
""" Control the A/V device that pandora plays thru.
"""
def __init__(self, p_pyhouse_obj):
"""
"""
self.m_pyhouse_obj = p_pyhouse_obj
def change_av_device(self, p_zone, p_power, p_input, p_volume):
""" Build the control message for the A/V device.
Fill in only what is necessary
"""
l_pandora_plugin = self.m_pyhouse_obj.House.Entertainment[MOD_NAME] # PandoraPluginData()
for l_service in l_pandora_plugin.Services.values():
l_service_control_obj = PandoraDeviceControl() # Use the base control structure
l_service_control_obj.Family = l_family = l_service.ConnectionFamily
l_service_control_obj.Model = l_service.ConnectionModel
l_service_control_obj.From = MOD_NAME
l_service_control_obj.InputName = p_input
l_service_control_obj.Power = p_power
l_service_control_obj.Volume = p_volume
l_service_control_obj.Zone = p_zone
# LOG.debug(PrettyFormatAny.form(l_service_control_obj, 'Obj', 190))
# l_json = encode_json(l_service_control_obj)
# LOG.debug(PrettyFormatAny.form(l_json, 'Json', 190))
MqttActions(self.m_pyhouse_obj)._send_control(l_family, l_service_control_obj)
class PandoraControl(A_V_Control):
""" This section starts and stops pandora.
It also sends control messages to the connected A/V device
"""
m_session_count = 0
m_transport = None
def __init__(self, p_pyhouse_obj):
"""
"""
self.m_pyhouse_obj = p_pyhouse_obj
self.m_session_count = 0
def _start_pianobar(self):
""" Start the pianobar process.
Ensure that only 1 instance is running.
"""
LOG.info('Start Pianobar.')
l_pandora_plugin_obj = self.m_pyhouse_obj.House.Entertainment[MOD_NAME]
if l_pandora_plugin_obj._OpenSessions > 0:
LOG.warning('multiple pianobar start attempts')
return
l_pandora_plugin_obj._OpenSessions += 1
self.m_processProtocol = PianobarProtocol(self.m_pyhouse_obj)
self.m_processProtocol.deferred = PianobarProtocol(self.m_pyhouse_obj)
#
l_executable = PIANOBAR_LOCATION
l_args = ('pianobar',)
l_env = None # this will pass <os.environ>
self.m_transport = self.m_pyhouse_obj._Twisted.Reactor.spawnProcess(self.m_processProtocol, l_executable, l_args, l_env)
def _stop_pianobar(self):
""" Stop the pianobar process
Clean up and prepare for starting again.
"""
LOG.info('Halt Pianobar')
self.m_transport.write(b'q')
self.m_transport.loseConnection()
def is_pianobar_installed(self, _p_pyhouse_obj):
""" Check this node to see if pianobar is installed.
If it is, assume we are the player and connect to the A/V equipment to play
"""
l_file = Path(PIANOBAR_LOCATION)
if l_file.is_file():
return True
return False
def _clear_status_fields(self):
"""
Send message to Node-Red to update the status.
All the fields used in node-red must be defined.
"""
l_msg = PandoraServiceStatus()
l_msg.Likability = ''
l_msg.TimeLeft = ''
l_msg.TimeTotal = ''
l_date_time = datetime.now()
l_msg.DateTimePlayed = '{:%H:%M:%S}'.format(l_date_time)
return l_msg
def issue_pandora_stopped_status(self):
"""
Send message to Node-Red to update the status.
"""
l_msg = self._clear_status_fields()
l_msg.Status = 'Stopped'
MqttActions(self.m_pyhouse_obj).send_mqtt_status_msg(l_msg)
def _pandora_starting(self):
"""
Send message to Node-Red to update the status.
"""
l_msg = self._clear_status_fields()
l_msg.Status = 'Starting'
MqttActions(self.m_pyhouse_obj).send_mqtt_status_msg(l_msg)
def _start_pandora(self, p_message):
""" Start playing pandora.
When we receive a proper Mqtt message to start (power on) the pandora player we:
start the pianobar service to play pandora,
send a control message to entertainment device pandora is hooked to to start that device
"""
LOG.info('Play Pandora - {}'.format(p_message))
if not self.is_pianobar_installed(self.m_pyhouse_obj):
LOG.warning('Pianobar is not installed yet pandora is configured.')
return
l_pandora_plugin_obj = self.m_pyhouse_obj.House.Entertainment[MOD_NAME]
if l_pandora_plugin_obj._OpenSessions > 0:
LOG.warning('multiple pianobar start attempts')
return
self._pandora_starting()
l_pandora_plugin_obj._OpenSessions += 1
self.m_processProtocol = PianobarProtocol(self.m_pyhouse_obj)
l_executable = PIANOBAR_LOCATION
l_args = ('pianobar',)
l_env = None # this will pass <os.environ>
self.m_transport = self.m_pyhouse_obj._Twisted.Reactor.spawnProcess(self.m_processProtocol, l_executable, l_args, l_env)
#
for l_service in l_pandora_plugin_obj.Services.values():
l_device_control_obj = PandoraDeviceControl()
l_device_control_obj.Family = l_family = l_service.ConnectionFamily
l_device_control_obj.Model = l_model = l_service.ConnectionModel
l_device_control_obj.From = MOD_NAME
l_device_control_obj.Power = "On"
l_device_control_obj.InputName = l_service.InputName
l_device_control_obj.Zone = '1'
LOG.info('Sending control-command to {}-{}'.format(l_family, l_model))
l_topic = 'house/entertainment/{}/control'.format(l_family)
self.m_pyhouse_obj.Core.NqttApi.MqttPublish(l_topic, l_device_control_obj)
def build_av_control_msg(self, p_service):
"""
"""
l_service_control_obj = PandoraServiceControlInformation()
l_service_control_obj.Family = l_family = p_service.ConnectionFamily
l_service_control_obj.Device = l_name = p_service.ConnectionModel
l_service_control_obj.From = MOD_NAME
l_service_control_obj.Model = p_service.ConnectionModel
l_service_control_obj.Power = "Off"
l_service_control_obj.InputName = p_service.InputName
l_service_control_obj.Volume = p_service.Volume
l_service_control_obj.Zone = '0'
LOG.info('Sending control-command to {}-{}'.format(l_family, l_name))
l_topic = 'house/entertainment/{}/control'.format(l_family)
self.m_pyhouse_obj.Core.NqttApi.MqttPublish(l_topic, l_service_control_obj)
def _halt_pandora(self, p_message):
""" We have received a control message and therefore we stop the pandora player.
This control message may come from a MQTT message or from a timer.
"""
LOG.info('Halt Pandora - {}'.format(p_message))
l_pandora_plugin_obj = self.m_pyhouse_obj.House.Entertainment[MOD_NAME]
l_pandora_plugin_obj._OpenSessions -= 1
try:
self.m_transport.write(b'q')
self.m_transport.closeStdin()
except Exception as e_err:
LOG.warning('Could not close pianobar - {}'.format(e_err))
pass
LOG.info('Service Stopped')
for l_service in l_pandora_plugin_obj.Services.values():
l_service_control_obj = PandoraDeviceControl()
l_service_control_obj.Family = l_family = l_service.Connection.Family
l_service_control_obj.Device = l_name = l_service.Connection.Model
l_service_control_obj.From = MOD_NAME
l_service_control_obj.Model = l_service.Connection.Model
l_service_control_obj.Power = "Off"
l_service_control_obj.InputName = l_service.Connection.Input
l_service_control_obj.Volume = l_service.Volume
l_service_control_obj.Zone = '1'
LOG.info('Sending control-command to {}-{}'.format(l_family, l_name))
l_topic = 'house/entertainment/{}/control'.format(l_family)
self.m_pyhouse_obj.Core.MqttApi.MqttPublish(l_topic, l_service_control_obj)
self.issue_pandora_stopped_status()
def control_audio_device(self, p_audio_device, p_control):
"""
"""
class LocalConfig:
"""
"""
m_config = None
m_pyhouse_obj = None
def __init__(self, p_pyhouse_obj):
self.m_pyhouse_obj = p_pyhouse_obj
self.m_config = configApi(p_pyhouse_obj)
def dump_struct(self):
"""
"""
l_entertain = self.m_pyhouse_obj.House.Entertainment
l_pandora = l_entertain[MOD_NAME]
LOG.debug(PrettyFormatAny.form(l_entertain, 'Entertainment'))
LOG.debug(PrettyFormatAny.form(l_pandora, 'Pandora'))
LOG.debug(PrettyFormatAny.form(l_pandora.Services, 'Pandora'))
#
for _l_key, l_service in l_pandora.Services.items():
LOG.debug(PrettyFormatAny.form(l_service, 'Service'))
if hasattr(l_service, 'Connection'):
LOG.debug(PrettyFormatAny.form(l_service.Connection, 'Connection'))
if hasattr(l_service, 'Host'):
LOG.debug(PrettyFormatAny.form(l_service.Host, 'Host'))
if hasattr(l_service, 'Access'):
LOG.debug(PrettyFormatAny.form(l_service.Access, 'Access'))
def _extract_connection_group(self, p_config):
"""
"""
l_obj = PandoraDeviceConnectionInformation()
try:
for l_key, l_value in p_config.items():
# LOG.debug('Connection Key:{}; Value:{}'.format(l_key, l_value))
setattr(l_obj, l_key, l_value)
return l_obj
except:
l_obj.Name = p_config
l_ret = None
return l_ret
def _extract_one_service(self, p_config):
"""
"""
# self.dump_struct()
l_required = ['Name', 'Host', 'Connection', 'Access']
l_obj = PandoraServiceInformation()
for l_key, l_value in p_config.items():
if l_key == 'Host':
l_obj.Host = self.m_config.extract_host_group(l_value)
elif l_key == 'Connection':
l_ret = self._extract_connection_group(l_value)
l_obj.Connection = l_ret
elif l_key == 'Access':
l_obj.Access = self.m_config.extract_access_group(l_value)
else:
setattr(l_obj, l_key, l_value)
# Check for data missing from the config file.
for l_key in [l_attr for l_attr in dir(l_obj) if not l_attr.startswith('_') and not callable(getattr(l_obj, l_attr))]:
if getattr(l_obj, l_key) == None and l_key in l_required:
LOG.warning('Pandora Yaml is missing an entry for "{}"'.format(l_key))
return l_obj # For testing.
def _extract_all_services(self, p_config):
"""
"""
l_dict = {}
for l_ix, l_value in enumerate(p_config):
l_service = self._extract_one_service(l_value)
l_dict[l_ix] = l_service
return l_dict
def _extract_all_pandora(self, p_config):
"""
"""
# self.dump_struct()
l_required = ['Name']
l_obj = PandoraPluginInformation()
for l_key, l_value in p_config.items():
if l_key == 'Service':
l_services = self._extract_all_services(l_value)
l_obj.Services = l_services
l_obj.ServiceCount = len(l_services)
else:
setattr(l_obj, l_key, l_value)
# Check for data missing from the config file.
for l_key in [l_attr for l_attr in dir(l_obj) if not l_attr.startswith('_') and not callable(getattr(l_obj, l_attr))]:
if getattr(l_obj, l_key) == None and l_key in l_required:
LOG.warning('Pandora Yaml is missing an entry for "{}"'.format(l_key))
return l_obj # For testing.
def load_yaml_config(self):
""" Read the pandora.yaml file.
"""
LOG.info('Loading Config - Version:{}'.format(__version__))
l_yaml = self.m_config.read_config_file(MOD_NAME)
if l_yaml == None:
LOG.error('{}.yaml is missing.'.format(MOD_NAME))
return None
try:
l_yaml = l_yaml['Pandora']
except:
LOG.warning('The config file does not start with "Pandora:"')
return None
l_pandora = self._extract_all_pandora(l_yaml)
self.m_pyhouse_obj.House.Entertainment['pandora'] = l_pandora
# self.dump_struct()
return l_pandora # for testing purposes
class Api(MqttActions):
m_pyhouse_obj = None
m_local_config = None
def __init__(self, p_pyhouse_obj):
""" Do the housekeeping for the Pandora plugin.
"""
self.m_pyhouse_obj = p_pyhouse_obj
self._add_storage()
self.m_api = self
self.m_local_config = LocalConfig(p_pyhouse_obj)
LOG.info("Api Initialized - Version:{}".format(__version__))
self.m_pandora_control_api = PandoraControl(p_pyhouse_obj)
def _add_storage(self):
self.m_pyhouse_obj.House.Entertainment['Pandora'] = {}
def LoadConfig(self):
""" Read the Config for pandora.
"""
LOG.info("Loading Config - Version:{}".format(__version__))
if self.m_pandora_control_api.is_pianobar_installed(self.m_pyhouse_obj):
LOG.info('Pianobar present')
self.m_pyhouse_obj.House.Entertainment['Pandora'] = self.m_local_config.load_yaml_config()
else:
LOG.warning('Pianobar Missing')
def Start(self):
""" Start the Pandora plugin since we have it configured.
This does not start playing pandora. That takes a control message to play.
The control message comes from some external source (Alexa, WebPage, SmartPhone) etc.
"""
self.m_pandora_control_api.issue_pandora_stopped_status()
LOG.info("Started - Version:{}".format(__version__))
def SaveConfig(self):
"""
"""
def Stop(self):
""" Stop the Pandora player when we receive a signal to play some other thing.
"""
self.m_transport.write(b'q')
self.m_transport.closeStdin()
LOG.info("Stopped.")
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/House/Entertainment/Pandora/pandora.py
|
Python
|
mit
| 28,349
|
[
"Brian"
] |
cfbe714bd9875ee15a1eccc7c6ab50e3ad359cdeebb3c0b2bad087e6d15df3d2
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2008-2010, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2, or (at your
# option) any later version, as published by the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
from pysamba.library import *
from pysamba.rpc.credentials import CRED_SPECIFIED
import logging
log = logging.getLogger('p.composite_context')
( COMPOSITE_STATE_INIT, COMPOSITE_STATE_IN_PROGRESS,
COMPOSITE_STATE_DONE, COMPOSITE_STATE_ERROR ) = range(4)
class composite_context(Structure): pass
composite_context_callback = CFUNCTYPE(None, POINTER(composite_context));
class async(Structure):
_fields_ = [
('fn', composite_context_callback),
('private_data', c_void_p),
]
composite_context._fields_ = [
('state', enum),
('private_data', c_void_p),
('status', NTSTATUS),
('event_ctx', c_void_p), # struct event_context *
('async', async),
('used_wait', BOOL),
]
# _PUBLIC_ struct composite_context *composite_create(TALLOC_CTX *mem_ctx,
# struct event_context *ev);
library.composite_create.restype = POINTER(composite_context)
library.composite_create.argtypes = [c_void_p, c_void_p]
def composite_create(memctx, eventContext):
result = library.composite_create(memctx, eventContext)
if not result:
raise RuntimeError("Unable to allocate a composite_context")
return result
# _PUBLIC_ BOOL composite_nomem(const void *p, struct composite_context *ctx);
library.composite_nomem.restype = BOOL
library.composite_nomem.argtypes = [c_void_p, POINTER(composite_context)]
library.composite_wait.restype = NTSTATUS
library.composite_wait.argtypes = [POINTER(composite_context)]
library.composite_is_ok.restype = BOOL
library.composite_is_ok.argtypes = [POINTER(composite_context)]
library.composite_error.restype = None
library.composite_error.argtypes = [POINTER(composite_context), NTSTATUS]
library.composite_done.restype = None
library.composite_done.argtypes = [POINTER(composite_context)]
|
NetNow/wmi-samba
|
pysamba/composite_context.py
|
Python
|
gpl-2.0
| 2,396
|
[
"VisIt"
] |
7747fd0e24ea5d7a3daccb921708faeccb2ef3384052e28b0edcf78903d5e4f5
|
#!/usr/bin/env python
# File created on 09 Feb 2010
from __future__ import division
__author__ = "Greg Caporaso, Jens Reeder"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Greg Caporaso", "Daniel McDonald", "Jens Reeder",
"Jose Antonio Navas Molina"]
__credits__ = [
"Greg Caporaso",
"Daniel McDonald",
"Jens Reeder",
"William Walters"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
from os.path import split, splitext, join, abspath
from multiprocessing import cpu_count
from qiime.util import (parse_command_line_parameters, get_options_lookup,
make_option, create_dir)
from qiime.identify_chimeric_seqs import (blast_fragments_identify_chimeras,
chimeraSlayer_identify_chimeras, usearch61_chimera_check)
options_lookup = get_options_lookup()
# identify_chimeric_seqs.py
script_info = {}
script_info[
'brief_description'] = """Identify chimeric sequences in input FASTA file"""
script_info['script_description'] = """A FASTA file of sequences, can be screened to remove chimeras (sequences generated due to the PCR amplification of multiple templates or parent sequences). QIIME currently includes a taxonomy-assignment-based approach, blast_fragments, for identifying sequences as chimeric and the ChimeraSlayer algorithm.
1. Blast_fragments approach:
The reference sequences (-r) and id-to-taxonomy map (-t) provided are the same format as those provided to assign_taxonomy.py. The reference sequences are in fasta format, and the id-to-taxonomy map contains tab-separated lines where the first field is a sequence identifier, and the second field is the taxonomy separated by semi-colons (e.g., Archaea;Euryarchaeota;Methanobacteriales;Methanobacterium). The reference collection should be derived from a chimera-checked database (such as the full greengenes database), and filtered to contain only sequences at, for example, a maximum of 97% sequence identity.
2. ChimeraSlayer:
ChimeraSlayer uses BLAST to identify potential chimera parents and computes the optimal branching alignment of the query against two parents.
We suggest to use the pynast aligned representative sequences as input.
3. usearch61:
usearch61 performs both de novo (abundance based) chimera and reference based detection. Unlike the other two chimera checking software, unclustered sequences should be used as input rather than a representative sequence set, as these sequences need to be clustered to get abundance data. The results can be taken as the union or intersection of all input sequences not flagged as chimeras. For details, see: http://drive5.com/usearch/usearch_docs.html
"""
script_info['script_usage'] = []
script_info['script_usage'].append(("""blast_fragments example""", """For each sequence provided as input, the blast_fragments method splits the input sequence into n roughly-equal-sized, non-overlapping fragments, and assigns taxonomy to each fragment against a reference database. The BlastTaxonAssigner (implemented in assign_taxonomy.py) is used for this. The taxonomies of the fragments are compared with one another (at a default depth of 4), and if contradictory assignments are returned the sequence is identified as chimeric. For example, if an input sequence was split into 3 fragments, and the following taxon assignments were returned:
========== ==========================================================
fragment1: Archaea;Euryarchaeota;Methanobacteriales;Methanobacterium
fragment2: Archaea;Euryarchaeota;Halobacteriales;uncultured
fragment3: Archaea;Euryarchaeota;Methanobacteriales;Methanobacterium
========== ==========================================================
The sequence would be considered chimeric at a depth of 3 (Methanobacteriales vs. Halobacteriales), but non-chimeric at a depth of 2 (all Euryarchaeota).
blast_fragments begins with the assumption that a sequence is non-chimeric, and looks for evidence to the contrary. This is important when, for example, no taxonomy assignment can be made because no blast result is returned. If a sequence is split into three fragments, and only one returns a blast hit, that sequence would be considered non-chimeric. This is because there is no evidence (i.e., contradictory blast assignments) for the sequence being chimeric. This script can be run by the following command, where the resulting data is written to the directory "identify_chimeras/" and using default parameters (e.g. chimera detection method ("-m blast_fragments"), number of fragments ("-n 3"), taxonomy depth ("-d 4") and maximum E-value ("-e 1e-30")):""", """%prog -i repr_set_seqs.fasta -t taxonomy_assignment.txt -r ref_seq_set.fna -m blast_fragments -o chimeric_seqs_blast.txt"""))
script_info[
'script_usage'].append(("""ChimeraSlayer Example:""", """Identify chimeric sequences using the ChimeraSlayer algorithm against a user provided reference data base. The input sequences need to be provided in aligned (Py)Nast format. The reference data base needs to be provided as aligned FASTA (-a). Note that the reference database needs to be the same that was used to build the alignment of the input sequences!""",
"""%prog -m ChimeraSlayer -i repr_set_seqs_aligned.fasta -a ref_seq_set_aligned.fasta -o chimeric_seqs_cs.txt"""))
script_info[
'script_usage'].append(("""usearch61 Example:""", """Identify chimeric sequences using the usearch61 algorithm against a user provided reference data base. The input sequences should be the demultiplexed (not clustered rep set!) sequences, such as those output from split_libraries.py. The input sequences need to be provided as unaligned fasta in the same orientation as the query sequences.""",
"""%prog -m usearch61 -i seqs.fna -r ref_sequences.fasta -o usearch61_chimera_checking/"""))
script_info[
'output_description'] = """The result of identify_chimeric_seqs.py is a text file that identifies which sequences are chimeric."""
script_info['required_options'] = [options_lookup['fasta_as_primary_input']]
chimera_detection_method_choices = ['blast_fragments', 'ChimeraSlayer',
'usearch61']
script_info['optional_options'] = [
make_option('-t', '--id_to_taxonomy_fp', type='existing_filepath',
help='Path to tab-delimited file mapping sequences to assigned '
'taxonomy. Each assigned taxonomy is provided as a comma-separated '
'list. [default: %default; REQUIRED when method is blast_fragments]'),
make_option('-r', '--reference_seqs_fp', type='existing_filepath',
help='Path to reference sequences (used to build a blast db when '
'method blast_fragments or reference database for usearch61). '
'[default: %default; REQUIRED when method blast_fragments' +
' if no blast_db is provided, suppress requirement for usearch61 '
'with --suppress_usearch61_ref;]'),
make_option('-a', '--aligned_reference_seqs_fp', type='existing_filepath',
help='Path to (Py)Nast aligned reference sequences. '
'REQUIRED when method ChimeraSlayer [default: %default]'),
make_option('-b', '--blast_db', type='blast_db',
help='Database to blast against. Must provide either --blast_db or '
'--reference_seqs_fp when method is blast_fragments [default: %default]'),
make_option('-m', '--chimera_detection_method',
type='choice', help='Chimera detection method. Choices: ' +
" or ".join(chimera_detection_method_choices) +
'. [default:%default]',
choices=chimera_detection_method_choices, default='ChimeraSlayer'),
make_option('-n', '--num_fragments',
type='int', help='Number of fragments to split sequences into' +
' (i.e., number of expected breakpoints + 1) [default: %default]',
default=3),
make_option('-d', '--taxonomy_depth',
type='int', help='Number of taxonomic divisions to consider' +
' when comparing taxonomy assignments [default: %default]',
default=4),
make_option('-e', '--max_e_value',
type='float', help='Max e-value to assign taxonomy' +
' [default: %default]', default=1e-30),
make_option('-R', '--min_div_ratio',
type='float', help='min divergence ratio ' +
'(passed to ChimeraSlayer). If set to None uses ' +
'ChimeraSlayer default value. ' +
' [default: %default]', default=None),
make_option('-k', '--keep_intermediates',
action='store_true', help='Keep intermediate files, ' +
'useful for debugging ' +
' [default: %default]', default=False),
make_option('--suppress_usearch61_intermediates', action='store_true',
help='Use to suppress retention of usearch intermediate files/logs.'
'[default: %default]', default=False),
make_option('--suppress_usearch61_ref', action='store_true',
help='Use to suppress reference based chimera detection with usearch61 '
'[default: %default]', default=False),
make_option('--suppress_usearch61_denovo', action='store_true',
help='Use to suppress de novo based chimera detection with usearch61 '
'[default: %default]', default=False),
make_option('--split_by_sampleid', action='store_true',
help='Enable to split sequences by initial SampleID, requires that fasta '
'be in demultiplexed format, e.g., >Sample.1_0, >Sample.2_1, >Sample.1_2, '
'with the initial string before first underscore matching SampleIDs. If '
'not in this format, could cause unexpected errors. [default: %default]',
default=False),
make_option('--non_chimeras_retention', default='union',
help=("usearch61 only - selects "
"subsets of sequences detected as non-chimeras to retain after "
"de novo and reference based chimera detection. Options are "
"intersection or union. union will retain sequences that are "
"flagged as non-chimeric from either filter, while intersection "
"will retain only those sequences that are flagged as non-"
"chimeras from both detection methods. [default: %default]"),
type='string'),
make_option('--usearch61_minh', default=0.28, help=("Minimum score (h). "
"Increasing this value tends to reduce the number of false "
"positives and decrease sensitivity."
"[default: %default]"), type='float'),
make_option('--usearch61_xn', default=8.0, help=("Weight of 'no' vote. "
"Increasing this value tends to the number of false positives "
"(and also sensitivity). Must be > 1."
"[default: %default]"), type='float'),
make_option('--usearch61_dn', default=1.4, help=("Pseudo-count prior for "
"'no' votes. (n). Increasing this value tends to the number of "
"false positives (and also sensitivity). Must be > 0."
"[default: %default]"), type='float'),
make_option('--usearch61_mindiffs', default=3, help=("Minimum number of "
"diffs in a segment. Increasing this value tends to reduce the "
"number of false positives while reducing sensitivity to very "
"low-divergence chimeras. Must be > 0."
"[default: %default]"), type='int'),
make_option('--usearch61_mindiv', default=0.8, help=("Minimum divergence, "
"i.e. 100% - identity between the query and closest reference "
"database sequence. Expressed as a percentage, so the default "
"is 0.8, which allows chimeras that are up to 99.2% similar to "
"a reference sequence. This value is chosen to improve "
"sensitivity to very low-divergence chimeras. Must be > 0."
"[default: %default]"), type='float'),
make_option('--usearch61_abundance_skew', default=2.0, help=("Abundance "
"skew setting for de novo chimera detection with usearch61. Must "
"be > 0."
" [default: %default]"), type='float'),
make_option('--percent_id_usearch61', default=0.97,
help=("Percent identity threshold for clustering "
"with usearch61, expressed as a fraction between 0 and "
"1. [default: %default]"), type='float'),
make_option('--minlen', default=64, help=("Minimum length of sequence "
"allowed for usearch61 [default: %default]"), type='int'),
make_option('--word_length', default=8,
help="word length value for usearch61. "
"[default: %default]", type='int'),
make_option('--max_accepts', default=1,
help="max_accepts value to usearch61. "
"[default: %default]", type='int'),
make_option('--max_rejects', default=8,
help="max_rejects value for usearch61. "
"[default: %default]", type='int'),
make_option('-o', '--output_fp', type='new_filepath',
help='Path to store output, output filepath in the case of '
'blast_fragments and ChimeraSlayer, or directory in case of usearch61 '
' [default: derived from input_seqs_fp]'),
make_option('--threads', default='one_per_cpu', help=(
"Specify number of threads per core to be used for "
"usearch61 commands that utilize multithreading. By default, "
"will calculate the number of cores to utilize so a single "
"thread will be used per CPU. Specify a fractional number, e.g."
" 1.0 for 1 thread per core, or 0.5 for a single thread on "
"a two core CPU. Only applies to usearch61. "
"[default: %default]"))
]
script_info['version'] = __version__
def main():
"""Run chimera checker with given options>"""
option_parser, opts, args = parse_command_line_parameters(**script_info)
# additional option checks
if opts.chimera_detection_method == 'blast_fragments':
if not (opts.blast_db or opts.reference_seqs_fp):
option_parser.error('Must provide either --blast_db or' +
' --reference_seqs_fp and --id_to_taxonomy_fp when' +
' method is blast_fragments.')
if not opts.id_to_taxonomy_fp:
option_parser.error('Must provide --id_to_taxonomy_fp when method' +
' is blast_fragments.')
if opts.num_fragments < 2:
option_parser.error('Invalid number of fragments (-n %d) Must be >= 2.'
% opts.num_fragments)
elif opts.chimera_detection_method == 'ChimeraSlayer':
if not opts.aligned_reference_seqs_fp:
option_parser.error("Must provide --aligned_reference_seqs_fp "
"when using method ChimeraSlayer")
elif opts.chimera_detection_method == 'usearch61':
if opts.suppress_usearch61_ref and opts.suppress_usearch61_denovo:
option_parser.error("Supressing both de novo and reference "
"chimera detection not allowed.")
if not opts.reference_seqs_fp and not opts.suppress_usearch61_ref:
option_parser.error("--reference_seqs_fp required for reference "
"based chimera detection, suppress reference based chimera "
"detection with --suppress_usearch61_ref")
if opts.reference_seqs_fp:
try:
temp_f = open(opts.reference_seqs_fp, "U")
temp_f.close()
except IOError:
raise IOError("Unable to open --reference_seqs_fp, please "
"check filepath and permissions.")
if opts.non_chimeras_retention not in ['intersection', 'union']:
option_parser.error("--non_chimeras_retention must be either "
"'union' or 'intersection'")
if opts.usearch61_xn <= 1:
option_parser.error("--usearch61_xn must be > 1")
if opts.usearch61_dn <= 0:
option_parser.error("--usearch61_dn must be > 0")
if opts.usearch61_mindiffs <= 0:
option_parser.error("--usearch61_mindiffs must be > 0")
if opts.usearch61_mindiv <= 0:
option_parser.error("--usearch61_mindiv must be > 0")
if opts.usearch61_abundance_skew <= 0:
option_parser.error("--usearch61_abundance_skew must be > 0")
verbose = opts.verbose # not used yet ...
input_seqs_fp = opts.input_fasta_fp
id_to_taxonomy_fp = opts.id_to_taxonomy_fp
reference_seqs_fp = opts.reference_seqs_fp
chimera_detection_method = opts.chimera_detection_method
num_fragments = opts.num_fragments
output_fp = opts.output_fp
taxonomy_depth = opts.taxonomy_depth
max_e_value = opts.max_e_value
blast_db = opts.blast_db
keep_intermediates = opts.keep_intermediates
threads = opts.threads
# calculate threads as 1 per CPU, or use float of input value
if threads == 'one_per_cpu':
threads = float(1 / cpu_count())
else:
# Make sure input is a float
try:
threads = float(threads)
except ValueError:
option_parser.error("--threads must be a float value if "
"default 'one_per_cpu' value overridden.")
if not output_fp:
if chimera_detection_method == "usearch61":
output_dir = "usearch61_chimeras/"
create_dir(output_dir, fail_on_exist=False)
else:
input_basename = splitext(split(input_seqs_fp)[1])[0]
output_fp = '%s_chimeric.txt' % input_basename
elif chimera_detection_method == "usearch61":
output_dir = output_fp
create_dir(output_dir, fail_on_exist=False)
if chimera_detection_method == 'blast_fragments':
blast_fragments_identify_chimeras(input_seqs_fp,
id_to_taxonomy_fp,
reference_seqs_fp, blast_db=blast_db,
num_fragments=opts.num_fragments,
max_e_value=max_e_value,
output_fp=output_fp,
taxonomy_depth=taxonomy_depth)
elif chimera_detection_method == 'ChimeraSlayer':
chimeraSlayer_identify_chimeras(input_seqs_fp,
output_fp=output_fp,
db_FASTA_fp=opts.reference_seqs_fp,
db_NAST_fp=opts.aligned_reference_seqs_fp,
min_div_ratio=opts.min_div_ratio,
keep_intermediates=keep_intermediates)
elif chimera_detection_method == 'usearch61':
usearch61_chimera_check(input_seqs_fp,
output_dir=output_dir,
reference_seqs_fp=reference_seqs_fp,
suppress_usearch61_intermediates=opts.suppress_usearch61_intermediates,
suppress_usearch61_ref=opts.suppress_usearch61_ref,
suppress_usearch61_denovo=opts.suppress_usearch61_denovo,
split_by_sampleid=opts.split_by_sampleid,
non_chimeras_retention=opts.non_chimeras_retention,
usearch61_minh=opts.usearch61_minh,
usearch61_xn=opts.usearch61_xn,
usearch61_dn=opts.usearch61_dn,
usearch61_mindiffs=opts.usearch61_mindiffs,
usearch61_mindiv=opts.usearch61_mindiv,
usearch61_abundance_skew=opts.usearch61_abundance_skew,
percent_id_usearch61=opts.percent_id_usearch61,
minlen=opts.minlen,
word_length=opts.word_length,
max_accepts=opts.max_accepts,
max_rejects=opts.max_rejects,
verbose=opts.verbose,
threads=threads)
if __name__ == "__main__":
main()
|
josenavas/qiime
|
scripts/identify_chimeric_seqs.py
|
Python
|
gpl-2.0
| 22,016
|
[
"BLAST"
] |
0ce63a521a1ad2e8f3fa98af3efa8d35a3bb2a8cd0924c74dca22276eeeb95fc
|
# -*- coding: utf-8 -*-
import datetime
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
try:
from django.utils import lorem_ipsum
except ImportError:
# Support Django < 1.8
from django.contrib.webdesign import lorem_ipsum
import os
import random
import re
import string
import sys
from decimal import Decimal
if sys.version_info[0] < 3:
str_ = unicode
else:
str_ = str
# backporting os.path.relpath, only availabe in python >= 2.6
try:
relpath = os.path.relpath
except AttributeError:
def relpath(path, start=os.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.curdir
return os.path.join(*rel_list)
class Generator(object):
coerce_type = staticmethod(lambda x: x)
empty_value = None
empty_p = 0
def __init__(self, empty_p=None, coerce=None):
if empty_p is not None:
self.empty_p = empty_p
if coerce:
self.coerce_type = coerce
def coerce(self, value):
return self.coerce_type(value)
def generate(self):
raise NotImplementedError
def get_value(self):
if random.random() < self.empty_p:
return self.empty_value
value = self.generate()
return self.coerce(value)
def __call__(self):
return self.get_value()
class StaticGenerator(Generator):
def __init__(self, value, *args, **kwargs):
self.value = value
super(StaticGenerator, self).__init__(*args, **kwargs)
def generate(self):
return self.value
class CallableGenerator(Generator):
def __init__(self, value, args=None, kwargs=None, *xargs, **xkwargs):
self.value = value
self.args = args or ()
self.kwargs = kwargs or {}
super(CallableGenerator, self).__init__(*xargs, **xkwargs)
def generate(self):
return self.value(*self.args, **self.kwargs)
class NoneGenerator(Generator):
def generate(self):
return self.empty_value
class StringGenerator(Generator):
coerce_type = str_
singleline_chars = string.ascii_letters + u' '
multiline_chars = singleline_chars + u'\n'
def __init__(self, chars=None, multiline=False, min_length=1, max_length=1000, *args, **kwargs):
assert min_length >= 0
assert max_length >= 0
self.min_length = min_length
self.max_length = max_length
if chars is None:
if multiline:
self.chars = self.multiline_chars
else:
self.chars = self.singleline_chars
else:
self.chars = chars
super(StringGenerator, self).__init__(*args, **kwargs)
def generate(self):
length = random.randint(self.min_length, self.max_length)
value = u''
for x in range(length):
value += random.choice(self.chars)
return value
class SlugGenerator(StringGenerator):
def __init__(self, chars=None, *args, **kwargs):
if chars is None:
chars = string.ascii_lowercase + string.digits + '-'
super(SlugGenerator, self).__init__(chars, multiline=False, *args, **kwargs)
class LoremGenerator(Generator):
coerce_type = str_
common = True
count = 3
method = 'b'
def __init__(self, count=None, method=None, common=None, max_length=None, *args, **kwargs):
if count is not None:
self.count = count
if method is not None:
self.method = method
if common is not None:
self.common = common
self.max_length = max_length
super(LoremGenerator, self).__init__(*args, **kwargs)
def generate(self):
if self.method == 'w':
lorem = lorem_ipsum.words(self.count, common=self.common)
elif self.method == 's':
lorem = u' '.join([
lorem_ipsum.sentence()
for i in range(self.count)])
else:
paras = lorem_ipsum.paragraphs(self.count, common=self.common)
if self.method == 'p':
paras = ['<p>%s</p>' % p for p in paras]
lorem = u'\n\n'.join(paras)
if self.max_length:
length = random.randint(round(int(self.max_length) / 10),
self.max_length)
lorem = lorem[:max(1, length)]
return lorem.strip()
class LoremSentenceGenerator(LoremGenerator):
method = 's'
class LoremHTMLGenerator(LoremGenerator):
method = 'p'
class LoremWordGenerator(LoremGenerator):
count = 7
method = 'w'
class IntegerGenerator(Generator):
coerce_type = int
min_value = - 10 ** 5
max_value = 10 ** 5
def __init__(self, min_value=None, max_value=None, *args, **kwargs):
if min_value is not None:
self.min_value = min_value
if max_value is not None:
self.max_value = max_value
super(IntegerGenerator, self).__init__(*args, **kwargs)
def generate(self):
value = random.randint(self.min_value, self.max_value)
return value
class SmallIntegerGenerator(IntegerGenerator):
min_value = -2 ** 7
max_value = 2 ** 7 - 1
class PositiveIntegerGenerator(IntegerGenerator):
min_value = 0
class PositiveSmallIntegerGenerator(SmallIntegerGenerator):
min_value = 0
class FloatGenerator(IntegerGenerator):
coerce_type = float
decimal_digits = 1
def __init__(self, decimal_digits=None, *args, **kwargs):
if decimal_digits is not None:
self.decimal_digits = decimal_digits
super(IntegerGenerator, self).__init__(*args, **kwargs)
def generate(self):
value = super(FloatGenerator, self).generate()
value = float(value)
if self.decimal_digits:
digits = random.randint(1, 10 ^ self.decimal_digits) - 1
digits = float(digits)
value = value + digits / (10 ^ self.decimal_digits)
return value
class ChoicesGenerator(Generator):
def __init__(self, choices=(), values=(), *args, **kwargs):
assert len(choices) or len(values)
self.choices = list(choices)
if not values:
self.values = [k for k, v in self.choices]
else:
self.values = list(values)
super(ChoicesGenerator, self).__init__(*args, **kwargs)
def generate(self):
return random.choice(self.values)
class BooleanGenerator(ChoicesGenerator):
def __init__(self, none=False, *args, **kwargs):
values = (True, False)
if none:
values = values + (None,)
super(BooleanGenerator, self).__init__(values=values, *args, **kwargs)
class NullBooleanGenerator(BooleanGenerator):
def __init__(self, none=True, *args, **kwargs):
super(NullBooleanGenerator, self).__init__(none=none, *args, **kwargs)
class DateTimeGenerator(Generator):
def __init__(self, min_date=None, max_date=None, *args, **kwargs):
from django.utils import timezone
if min_date is not None:
self.min_date = min_date
else:
self.min_date = timezone.now() - datetime.timedelta(365 * 5)
if max_date is not None:
self.max_date = max_date
else:
self.max_date = timezone.now() + datetime.timedelta(365 * 1)
assert self.min_date < self.max_date
super(DateTimeGenerator, self).__init__(*args, **kwargs)
def generate(self):
diff = self.max_date - self.min_date
seconds = random.randint(0, diff.days * 3600 * 24 + diff.seconds)
return self.min_date + datetime.timedelta(seconds=seconds)
class DateGenerator(Generator):
min_date = datetime.date.today() - datetime.timedelta(365 * 5)
max_date = datetime.date.today() + datetime.timedelta(365 * 1)
def __init__(self, min_date=None, max_date=None, *args, **kwargs):
if min_date is not None:
self.min_date = min_date
if max_date is not None:
self.max_date = max_date
assert self.min_date < self.max_date
super(DateGenerator, self).__init__(*args, **kwargs)
def generate(self):
diff = self.max_date - self.min_date
days = random.randint(0, diff.days)
date = self.min_date + datetime.timedelta(days=days)
return date
return datetime.date(date.year, date.month, date.day)
class DecimalGenerator(Generator):
coerce_type = Decimal
max_digits = 24
decimal_places = 10
def __init__(self, max_digits=None, decimal_places=None, *args, **kwargs):
if max_digits is not None:
self.max_digits = max_digits
if decimal_places is not None:
self.decimal_places = decimal_places
super(DecimalGenerator, self).__init__(*args, **kwargs)
def generate(self):
maxint = 10 ** self.max_digits - 1
value = (
Decimal(random.randint(-maxint, maxint)) /
10 ** self.decimal_places)
return value
class FirstNameGenerator(Generator):
""" Generates a first name, either male or female """
male = [
'Abraham', 'Adam', 'Anthony', 'Brian', 'Bill', 'Ben', 'Calvin',
'David', 'Daniel', 'George', 'Henry', 'Isaac', 'Ian', 'Jonathan',
'Jeremy', 'Jacob', 'John', 'Jerry', 'Joseph', 'James', 'Larry',
'Michael', 'Mark', 'Paul', 'Peter', 'Phillip', 'Stephen', 'Tony',
'Titus', 'Trevor', 'Timothy', 'Victor', 'Vincent', 'Winston', 'Walt']
female = [
'Abbie', 'Anna', 'Alice', 'Beth', 'Carrie', 'Christina', 'Danielle',
'Emma', 'Emily', 'Esther', 'Felicia', 'Grace', 'Gloria', 'Helen',
'Irene', 'Joanne', 'Joyce', 'Jessica', 'Kathy', 'Katie', 'Kelly',
'Linda', 'Lydia', 'Mandy', 'Mary', 'Olivia', 'Priscilla',
'Rebecca', 'Rachel', 'Susan', 'Sarah', 'Stacey', 'Vivian']
def __init__(self, gender=None):
self.gender = gender
self.all = self.male + self.female
def generate(self):
if self.gender == 'm':
return random.choice(self.male)
elif self.gender == 'f':
return random.choice(self.female)
else:
return random.choice(self.all)
class LastNameGenerator(Generator):
""" Generates a last name """
surname = [
'Smith', 'Walker', 'Conroy', 'Stevens', 'Jones', 'Armstrong',
'Johnson', 'White', 'Stone', 'Strong', 'Olson', 'Lee', 'Forrest',
'Baker', 'Portman', 'Davis', 'Clark', 'Brown', 'Roberts', 'Ellis',
'Jackson', 'Marshall', 'Wang', 'Chen', 'Chou', 'Tang', 'Huang', 'Liu',
'Shih', 'Su', 'Song', 'Yang', 'Chan', 'Tsai', 'Wong', 'Hsu', 'Cheng',
'Chang', 'Wu', 'Lin', 'Yu', 'Yao', 'Kang', 'Park', 'Kim', 'Choi',
'Ahn', 'Mujuni']
def generate(self):
return random.choice(self.surname)
class EmailGenerator(StringGenerator):
chars = string.ascii_lowercase
def __init__(self, chars=None, max_length=30, tlds=None, static_domain=None, *args, **kwargs):
assert max_length >= 6
if chars is not None:
self.chars = chars
self.tlds = tlds
self.static_domain = static_domain
super(EmailGenerator, self).__init__(self.chars, max_length=max_length, *args, **kwargs)
def generate(self):
maxl = self.max_length - 2
if self.static_domain is None:
if self.tlds:
tld = random.choice(self.tlds)
elif maxl > 4:
tld = StringGenerator(self.chars, min_length=3, max_length=3).generate()
maxl -= len(tld)
assert maxl >= 2
else:
maxl -= len(self.static_domain)
name = StringGenerator(self.chars, min_length=1, max_length=maxl-1).generate()
maxl -= len(name)
if self.static_domain is None:
domain = StringGenerator(self.chars, min_length=1, max_length=maxl).generate()
return '%s@%s.%s' % (name, domain, tld)
else:
return '%s@%s' % (name, self.static_domain)
class URLGenerator(StringGenerator):
chars = string.ascii_lowercase
protocol = 'http'
tlds = ()
def __init__(self, chars=None, max_length=30, protocol=None, tlds=None,
*args, **kwargs):
if chars is not None:
self.chars = chars
if protocol is not None:
self.protocol = protocol
if tlds is not None:
self.tlds = tlds
assert max_length > (
len(self.protocol) + len('://') +
1 + len('.') +
max([2] + [len(tld) for tld in self.tlds if tld]))
super(URLGenerator, self).__init__(
chars=self.chars, max_length=max_length, *args, **kwargs)
def generate(self):
maxl = self.max_length - len(self.protocol) - 4 # len(://) + len(.)
if self.tlds:
tld = random.choice(self.tlds)
maxl -= len(tld)
else:
tld_max_length = 3 if maxl >= 5 else 2
tld = StringGenerator(self.chars,
min_length=2, max_length=tld_max_length).generate()
maxl -= len(tld)
domain = StringGenerator(chars=self.chars, max_length=maxl).generate()
return u'%s://%s.%s' % (self.protocol, domain, tld)
class IPAddressGenerator(Generator):
coerce_type = str_
def generate(self):
return '.'.join([str_(part) for part in [
IntegerGenerator(min_value=1, max_value=254).generate(),
IntegerGenerator(min_value=0, max_value=254).generate(),
IntegerGenerator(min_value=0, max_value=254).generate(),
IntegerGenerator(min_value=1, max_value=254).generate(),
]])
class TimeGenerator(Generator):
coerce_type = str_
def generate(self):
return u'%02d:%02d:%02d' % (
random.randint(0,23),
random.randint(0,59),
random.randint(0,59),
)
class FilePathGenerator(Generator):
coerce_type = str_
def __init__(self, path, match=None, recursive=False, max_length=None, *args, **kwargs):
self.path = path
self.match = match
self.recursive = recursive
self.max_length = max_length
super(FilePathGenerator, self).__init__(*args, **kwargs)
def generate(self):
filenames = []
if self.match:
match_re = re.compile(self.match)
if self.recursive:
for root, dirs, files in os.walk(self.path):
for f in files:
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
filenames.append(f)
else:
try:
for f in os.listdir(self.path):
full_file = os.path.join(self.path, f)
if os.path.isfile(full_file) and \
(self.match is None or match_re.search(f)):
filenames.append(full_file)
except OSError:
pass
if self.max_length:
filenames = [fn for fn in filenames if len(fn) <= self.max_length]
return random.choice(filenames)
class MediaFilePathGenerator(FilePathGenerator):
'''
Generates a valid filename of an existing file from a subdirectory of
``settings.MEDIA_ROOT``. The returned filename is relative to
``MEDIA_ROOT``.
'''
def __init__(self, path='', *args, **kwargs):
from django.conf import settings
path = os.path.join(settings.MEDIA_ROOT, path)
super(MediaFilePathGenerator, self).__init__(path, *args, **kwargs)
def generate(self):
from django.conf import settings
filename = super(MediaFilePathGenerator, self).generate()
filename = relpath(filename, settings.MEDIA_ROOT)
return filename
class InstanceGenerator(Generator):
'''
Naive support for ``limit_choices_to``. It assignes specified value to
field for dict items that have one of the following form::
fieldname: value
fieldname__exact: value
fieldname__iexact: value
'''
def __init__(self, autofixture, limit_choices_to=None, *args, **kwargs):
self.autofixture = autofixture
limit_choices_to = limit_choices_to or {}
for lookup, value in limit_choices_to.items():
bits = lookup.split('__')
if len(bits) == 1 or \
len(bits) == 2 and bits[1] in ('exact', 'iexact'):
self.autofixture.add_field_value(bits[0], StaticGenerator(value))
super(InstanceGenerator, self).__init__(*args, **kwargs)
def generate(self):
return self.autofixture.create()[0]
class MultipleInstanceGenerator(InstanceGenerator):
empty_value = []
def __init__(self, *args, **kwargs):
self.min_count = kwargs.pop('min_count', 1)
self.max_count = kwargs.pop('max_count', 10)
super(MultipleInstanceGenerator, self).__init__(*args, **kwargs)
def generate(self):
instances = []
for i in range(random.randint(self.min_count, self.max_count)):
instances.append(
super(MultipleInstanceGenerator, self).generate())
return instances
class InstanceSelector(Generator):
'''
Select one or more instances from a queryset.
'''
empty_value = []
def __init__(self, queryset, min_count=None, max_count=None, fallback=None,
limit_choices_to=None, *args, **kwargs):
from django.db.models.query import QuerySet
if not isinstance(queryset, QuerySet):
queryset = queryset._default_manager.all()
limit_choices_to = limit_choices_to or {}
self.queryset = queryset.filter(**limit_choices_to)
self.fallback = fallback
self.min_count = min_count
self.max_count = max_count
super(InstanceSelector, self).__init__(*args, **kwargs)
def generate(self):
if self.max_count is None:
try:
return self.queryset.order_by('?')[0]
except IndexError:
return self.fallback
else:
min_count = self.min_count or 0
count = random.randint(min_count, self.max_count)
return self.queryset.order_by('?')[:count]
class WeightedGenerator(Generator):
"""
Takes a list of generator objects and integer weights, of the following form:
[(generator, weight), (generator, weight),...]
and returns a value from a generator chosen randomly by weight.
"""
def __init__(self, choices):
self.choices = choices
def weighted_choice(self, choices):
total = sum(w for c, w in choices)
r = random.uniform(0, total)
upto = 0
for c, w in choices:
if upto + w > r:
return c
upto += w
def generate(self):
return self.weighted_choice(self.choices).generate()
class ImageGenerator(Generator):
'''
Generates a valid palceholder image and saves it to the ``settings.MEDIA_ROOT``
The returned filename is relative to ``MEDIA_ROOT``.
'''
default_sizes = (
(100,100),
(200,300),
(400,600),
)
def __init__(self, width=None, height=None, sizes=None,
path='_autofixture', storage=None, *args, **kwargs):
self.width = width
self.height = height
self.sizes = list(sizes or self.default_sizes)
if self.width and self.height:
self.sizes.append((width, height))
self.path = path
self.storage = storage or default_storage
super(ImageGenerator, self).__init__(*args, **kwargs)
def generate_file_path(self, width, height, suffix=None):
suffix = suffix if suffix is not None else ''
filename ='{width}x{height}{suffix}.png'.format(
width=width, height=height, suffix=suffix)
return os.path.join(self.path, filename)
def generate(self):
from .placeholder import get_placeholder_image
width, height = random.choice(self.sizes)
# Ensure that _autofixture folder exists.
i = 0
path = self.generate_file_path(width, height)
while self.storage.exists(path):
i += 1
path = self.generate_file_path(width, height, '_{0}'.format(i))
return self.storage.save(
path,
ContentFile(get_placeholder_image(width, height))
)
|
paulmouzas/blogodrone
|
autofixture/generators.py
|
Python
|
unlicense
| 20,906
|
[
"Brian"
] |
6384c6c0d7f6e6a751a98295951168a436618fd288531d6199efdce8a444fc53
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
from setuptools import setup, find_packages
import os
import pbxplore
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst')) as f:
readme = f.read()
# Extras requirements for optional dependencies
extras = {
'analysis': ['weblogo', 'matplotlib'],
'trajectories': ['MDAnalysis>=0.11'],
'all': ['weblogo', 'matplotlib', 'MDAnalysis>=0.11']
}
setup(
name='pbxplore',
version=pbxplore.__version__,
description="PBxplore is a suite of tools dedicated to Protein Block analysis.",
long_description=readme,
url='https://github.com/pierrepo/PBxplore',
# Author details
author='Pierre Poulain',
author_email='pierre.poulain@cupnet.net',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
install_requires=['numpy'],
tests_require=['nose', 'coverage'],
# List additional groups of dependencies here
# To install, use
# $ pip install -e .[analysis]
extras_require=extras,
packages=find_packages(exclude=['test']),
include_package_data=True,
package_data={'pbxplore':['demo/*']},
entry_points={
'console_scripts': [
'PBassign = pbxplore.scripts.PBassign:pbassign_cli',
'PBclust = pbxplore.scripts.PBclust:pbclust_cli',
'PBcount = pbxplore.scripts.PBcount:pbcount_cli',
'PBstat = pbxplore.scripts.PBstat:pbstat_cli',
'PBdata = pbxplore.scripts.PBdata:pbdata_cli',
],
},
)
|
jbarnoud/PBxplore
|
setup.py
|
Python
|
mit
| 2,332
|
[
"MDAnalysis"
] |
53a440f089971ef8e631608968f5c8960755b8fb5c82bf9e433d8fe0502dd124
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Autoregressive distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.ops.distributions import util as distribution_util
class Autoregressive(distribution_lib.Distribution):
"""Autoregressive distributions.
The Autoregressive distribution enables learning (often) richer multivariate
distributions by repeatedly applying a [diffeomorphic](
https://en.wikipedia.org/wiki/Diffeomorphism) transformation (such as
implemented by `Bijector`s). Regarding terminology,
"Autoregressive models decompose the joint density as a product of
conditionals, and model each conditional in turn. Normalizing flows
transform a base density (e.g. a standard Gaussian) into the target density
by an invertible transformation with tractable Jacobian." [1]
In other words, the "autoregressive property" is equivalent to the
decomposition, `p(x) = prod{ p(x[i] | x[0:i]) : i=0, ..., d }`. The provided
`shift_and_log_scale_fn`, `masked_autoregressive_default_template`, achieves
this property by zeroing out weights in its `masked_dense` layers.
Practically speaking the autoregressive property means that there exists a
permutation of the event coordinates such that each coordinate is a
diffeomorphic function of only preceding coordinates. [2]
#### Mathematical Details
The probability function is,
```none
prob(x; fn, n) = fn(x).prob(x)
```
And a sample is generated by,
```none
x = fn(...fn(fn(x0).sample()).sample()).sample()
```
where the ellipses (`...`) represent `n-2` composed calls to `fn`, `fn`
constructs a `tf.distributions.Distribution`-like instance, and `x0` is a
fixed initializing `Tensor`.
#### Examples
```python
tfd = tf.contrib.distributions
def normal_fn(self, event_size):
n = event_size * (event_size + 1) / 2
p = tf.Variable(tfd.Normal(loc=0., scale=1.).sample(n))
affine = tfd.bijectors.Affine(
scale_tril=tfd.fill_triangular(0.25 * p))
def _fn(samples):
scale = math_ops.exp(affine.forward(samples)).eval()
return independent_lib.Independent(
normal_lib.Normal(loc=0., scale=scale, validate_args=True),
reinterpreted_batch_ndims=1)
return _fn
batch_and_event_shape = [3, 2, 4]
sample0 = array_ops.zeros(batch_and_event_shape)
ar = autoregressive_lib.Autoregressive(
self._normal_fn(batch_and_event_shape[-1]), sample0)
x = ar.sample([6, 5])
# ==> x.shape = [6, 5, 3, 2, 4]
prob_x = ar.prob(x)
# ==> x.shape = [6, 5, 3, 2]
```
[1]: "Masked Autoregressive Flow for Density Estimation."
George Papamakarios, Theo Pavlakou, Iain Murray. Arxiv. 2017.
https://arxiv.org/abs/1705.07057
[2]: "Conditional Image Generation with PixelCNN Decoders."
Aaron van den Oord, Nal Kalchbrenner, Oriol Vinyals, Lasse Espeholt, Alex
Graves, Koray Kavukcuoglu. Arxiv, 2016.
https://arxiv.org/abs/1606.05328
"""
def __init__(self,
distribution_fn,
sample0=None,
num_steps=None,
validate_args=False,
allow_nan_stats=True,
name="Autoregressive"):
"""Construct an `Autoregressive` distribution.
Args:
distribution_fn: Python `callable` which constructs a
`tf.distributions.Distribution`-like instance from a `Tensor` (e.g.,
`sample0`). The function must respect the "autoregressive property",
i.e., there exists a permutation of event such that each coordinate is a
diffeomorphic function of on preceding coordinates.
sample0: Initial input to `distribution_fn`; used to
build the distribution in `__init__` which in turn specifies this
distribution's properties, e.g., `event_shape`, `batch_shape`, `dtype`.
If unspecified, then `distribution_fn` should be default constructable.
num_steps: Number of times `distribution_fn` is composed from samples,
e.g., `num_steps=2` implies
`distribution_fn(distribution_fn(sample0).sample(n)).sample()`.
validate_args: Python `bool`. Whether to validate input with asserts.
If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Default value: "Autoregressive".
Raises:
ValueError: if `num_steps` and
`distribution_fn(sample0).event_shape.num_elements()` are both `None`.
ValueError: if `num_steps < 1`.
"""
parameters = locals()
with ops.name_scope(name):
self._distribution_fn = distribution_fn
self._sample0 = sample0
self._distribution0 = (distribution_fn() if sample0 is None
else distribution_fn(sample0))
if num_steps is None:
num_steps = self._distribution0.event_shape.num_elements()
if num_steps is None:
raise ValueError("distribution_fn must generate a distribution "
"with fully known `event_shape`.")
if num_steps < 1:
raise ValueError("num_steps ({}) must be at least 1.".format(num_steps))
self._num_steps = num_steps
super(Autoregressive, self).__init__(
dtype=self._distribution0.dtype,
reparameterization_type=self._distribution0.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=self._distribution0._graph_parents, # pylint: disable=protected-access
name=name)
@property
def distribution_fn(self):
return self._distribution_fn
@property
def sample0(self):
return self._sample0
@property
def num_steps(self):
return self._num_steps
@property
def distribution0(self):
return self._distribution0
def _batch_shape(self):
return self.distribution0.batch_shape
def _batch_shape_tensor(self):
return self.distribution0.batch_shape_tensor()
def _event_shape(self):
return self.distribution0.event_shape
def _event_shape_tensor(self):
return self.distribution0.event_shape_tensor()
def _sample_n(self, n, seed=None):
if seed is None:
seed = distribution_util.gen_new_seed(
seed=np.random.randint(2**32 - 1),
salt="autoregressive")
samples = self.distribution0.sample(n, seed=seed)
for _ in range(self._num_steps):
samples = self.distribution_fn(samples).sample(seed=seed)
return samples
def _log_prob(self, value):
return self.distribution_fn(value).log_prob(value)
def _prob(self, value):
return self.distribution_fn(value).prob(value)
|
rabipanda/tensorflow
|
tensorflow/contrib/distributions/python/ops/autoregressive.py
|
Python
|
apache-2.0
| 7,870
|
[
"Gaussian"
] |
ba45cfbbbca2c6e3e24a3a4ce1d2be16671bbac68925b992fae13ab1cdb185cd
|
#!/usr/bin/env python
#
# Restriction Analysis Libraries.
# Copyright (C) 2004. Frederic Sohm.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Update the Rebase emboss files used by Restriction to build the
Restriction_Dictionary.py module."""
from __future__ import print_function
import os
import sys
import time
import optparse
try:
from urllib import FancyURLopener
except ImportError:
# Python 3
from urllib.request import FancyURLopener
from Bio.Restriction.RanaConfig import *
class RebaseUpdate(FancyURLopener):
def __init__(self, e_mail='', ftpproxy=''):
"""RebaseUpdate([e_mail[, ftpproxy]]) -> new RebaseUpdate instance.
if e_mail and ftpproxy are not given RebaseUpdate uses the corresponding
variable from RanaConfig.
e_mail is the password for the anonymous ftp connection to Rebase.
ftpproxy is the proxy to use if any."""
proxy = {'ftp': ftpproxy or ftp_proxy}
global Rebase_password
Rebase_password = e_mail or Rebase_password
if not Rebase_password:
raise FtpPasswordError('Rebase')
if not Rebase_name:
raise FtpNameError('Rebase')
FancyURLopener.__init__(self, proxy)
def prompt_user_passwd(self, host, realm):
return (Rebase_name, Rebase_password)
def openRebase(self, name=ftp_Rebase):
print('\n Please wait, trying to connect to Rebase\n')
try:
self.open(name)
except:
raise ConnectionError('Rebase')
return
def getfiles(self, *files):
for file in self.update(*files):
print('copying %s' % file)
fn = os.path.basename(file)
# filename = os.path.join(Rebase, fn)
filename = os.path.join(os.getcwd(), fn)
print('to %s' % filename)
self.retrieve(file, filename)
self.close()
return
def localtime(self):
t = time.gmtime()
year = str(t.tm_year)[-1]
month = str(t.tm_mon)
if len(month) == 1:
month = '0' + month
return year+month
def update(self, *files):
if not files:
files = [ftp_emb_e, ftp_emb_s, ftp_emb_r]
return [x.replace('###', self.localtime()) for x in files]
def __del__(self):
if hasattr(self, 'tmpcache'):
self.close()
#
# self.tmpcache is created by URLopener.__init__ method.
#
return
class FtpNameError(ValueError):
def __init__(self, which_server):
print(" In order to connect to %s ftp server, you must provide a name.\
\n Please edit Bio.Restriction.RanaConfig\n" % which_server)
sys.exit()
class FtpPasswordError(ValueError):
def __init__(self, which_server):
print("\n\
\n In order to connect to %s ftp server, you must provide a password.\
\n Use the --e-mail switch to enter your e-mail address.\
\n\n" % which_server)
sys.exit()
class ConnectionError(IOError):
def __init__(self, which_server):
print('\
\n Unable to connect to the %s ftp server, make sure your computer\
\n is connected to the internet and that you have correctly configured\
\n the ftp proxy.\
\n Use the --proxy switch to enter the address of your proxy\
\n' % which_server)
sys.exit()
if __name__ == '__main__':
parser = optparse.OptionParser()
add = parser.add_option
add('-m', '--e-mail',
action="store",
dest='rebase_password',
default='',
help="set the e-mail address to be used as password for the"
"anonymous ftp connection to Rebase.")
add('-p', '--proxy',
action="store",
dest='ftp_proxy',
default='',
help="set the proxy to be used by the ftp connection.")
(option, args) = parser.parse_args()
Getfiles = RebaseUpdate(option.rebase_password, option.ftp_proxy)
Getfiles.openRebase()
Getfiles.getfiles()
Getfiles.close()
sys.exit()
|
updownlife/multipleK
|
dependencies/biopython-1.65/Scripts/Restriction/rebase_update.py
|
Python
|
gpl-2.0
| 4,208
|
[
"Biopython"
] |
db716f55d6139aaf3863b7bb22bd2eaf8a706856bb717c4937940375bef4932d
|
# powerSpec1.py
# test script for computing power spectrum
# 2014-06-10
"""
== Spectral analysis ==
0. RADAR domain -> normalise to WRF domain
tests to do -
1. average each 4x4 grid in RADAR then compare the spectrum of the resulting image
to the original RADAR image
2. filter (gaussian with various sigmas) and then averge each 4x4 grid
3. oversampling (compute 4x4 averages 16 times)
4. plot power spec for WRF and various preprocessings
A. WRF + RADAR/4x4 normalised (with or without oversampling)/no pre-filtering
B. WRF + RADAR/4x4 normalised (with or without oversampling)/pre-filter 1,2,3...
(unspecified/trial and error)
C. RADAR/normalise/no filtering + RADAR/normalised/pre-filtered 1,2,3...
+ difference
D. test successive gaussian filtering - is the result the same as doing it once
with a variance equal to the sum of variances?
USE
from armor.tests import powerSpec1 as ps
from armor import pattern
from armor import objects4 as ob
from armor import defaultParameters as dp
import numpy as np
import matplotlib.pyplot as plt
reload(ps); a_LOGspec = ps.testA(dbzList=ob.kongrey)
reload(ps); a_LOGspec = ps.testAwrf(dbzList=ob.kongreywrf)
"""
# imports
import pickle, os, shutil, time
from armor import defaultParameters as dp
from armor import pattern
from armor import objects4 as ob
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from scipy import ndimage
from scipy import signal
dbz=pattern.DBZ
root = dp.rootFolder
timeString = str(int(time.time()))
ob.march2014wrf.fix()
ob.kongreywrf.fix()
###############################################################################
# defining the parameters
thisScript = "powerSpec1.py"
testName = "powerSpec1"
scriptFolder = root + "python/armor/tests/"
outputFolder = root + "labLogs/powerSpec1/" + timeString + "/"
sigmaPreprocessing=20
thresPreprocessing=0
radarLL = np.array([18., 115.]) # lat/longitude of the lower left corner for radar data grids
wrfLL = np.array([20.,117.5])
wrfGrid = np.array([150,140])
radarGrid=np.array([881,921])
wrfGridSize = 0.05 #degrees
radarGridSize=0.0125
radar_wrf_grid_ratio = wrfGridSize / radarGridSize
#sigmas = [1, 2, 4, 5, 8 ,10 ,16, 20, 32, 40, 64, 80, 128, 160, 256,]
sigmas = [1, 2, 4, 5, 8 ,10 ,16, 20, 32, 40, 64, 80, 128]
scaleSpacePower = 0
dbzList = ob.kongrey
############################################################################
# setting up the output folder
if not os.path.exists(outputFolder):
os.makedirs(outputFolder)
shutil.copyfile(scriptFolder+thisScript, outputFolder+ thisScript)
# defining the functions:
# filtering, averaging, oversampling
def filtering(a, sigma=sigmaPreprocessing):
"""gaussian filter with appropriate sigmas"""
a.matrix = a.gaussianFilter(sigma=sigma).matrix
def averaging(a, starting=(0,0)):
"""4x4 to 1x1 averaging
oversampling 4x4 to 1x1 avaraging with various starting points"""
starting = (wrfLL - radarLL)/radarGridSize + starting
ending = starting + wrfGrid * radar_wrf_grid_ratio
mask = 1./16 * np.ones((4,4))
a1 = a.copy()
a1.matrix = signal.convolve2d(a1.matrix, mask, mode='same') #http://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.convolve2d.html
a1.matrix = a1.matrix[starting[0]:ending[0]:radar_wrf_grid_ratio,
starting[1]:ending[1]:radar_wrf_grid_ratio,
]
a1.matrix=np.ma.array(a1.matrix)
print 'starting, ending:',starting, ending #debug
return a1
def oversampling():
"""use averaging() to perform sampling
oversampling 4x4 to 1x1 avaraging with various starting points
and then average/compare"""
pass
def getLaplacianOfGaussianSpectrum(a, sigmas=sigmas, thres=thresPreprocessing, outputFolder=outputFolder, toReload=True):
L=[]
a.responseImages=[]
if toReload:
a.load()
a.backupMatrix(0)
for sigma in sigmas:
print "sigma:", sigma
a.restoreMatrix(0)
a.setThreshold(thres)
arr0 = a.matrix
arr1 = ndimage.filters.gaussian_laplace(arr0, sigma=sigma, mode="constant", cval=0.0) * sigma**scaleSpacePower #2014-05-14
a1 = dbz(matrix=arr1.real, name=a.name + "_" + testName + "_sigma" + str(sigma))
L.append({ 'sigma' : sigma,
'a1' : a1,
'abssum1': abs(a1.matrix).sum(),
'sum1' : a1.matrix.sum(),
})
print "abs sum", abs(a1.matrix.sum())
#a1.show()
#a2.show()
plt.close()
#a1.histogram(display=False, outputPath=outputFolder+a1.name+"_histogram.png")
###############################################################################
# computing the spectrum, i.e. sigma for which the LOG has max response
# 2014-05-02
a.responseImages.append({'sigma' : sigma,
'matrix' : arr1 * sigma**2,
})
pickle.dump(a.responseImages, open(outputFolder+a.name+"responseImagesList.pydump",'w'))
###
# numerical spec
a_LOGspec = dbz(name= a.name + "Laplacian-of-Gaussian_numerical_spectrum",
imagePath=outputFolder+a1.name+"_LOGspec.png",
outputPath = outputFolder+a1.name+"_LOGspec.dat",
cmap = 'jet',
)
a.responseImages = np.dstack([v['matrix'] for v in a.responseImages])
#print 'shape:', a.responseImages.shape #debug
a.responseMax = a.responseImages.max(axis=2) # the deepest dimension
a_LOGspec.matrix = np.zeros(a.matrix.shape)
for count, sigma in enumerate(sigmas):
a_LOGspec.matrix += sigma * (a.responseMax == a.responseImages[:,:,count])
a_LOGspec.vmin = a_LOGspec.matrix.min()
a_LOGspec.vmax = a_LOGspec.matrix.max()
#
######
print "saving to:", a_LOGspec.imagePath
a_LOGspec.saveImage()
print a_LOGspec.outputPath
a_LOGspec.saveMatrix()
a_LOGspec.histogram(display=False, outputPath=outputFolder+a1.name+"_LOGspec_histogram.png")
pickle.dump(a_LOGspec, open(outputFolder+ a_LOGspec.name + ".pydump","w"))
return a_LOGspec
def plotting(folder):
pass
# defining the workflows
# testA, testB, testC, testD
def testA(dbzList=ob.march2014,sigmas=sigmas):
for a in dbzList:
a.load()
a.matrix = a.threshold(thresPreprocessing).matrix
a1 = averaging(a)
filtering(a1)
a_LOGspec = getLaplacianOfGaussianSpectrum(a1, sigmas=sigmas)
#return a_LOGspec
#def testAwrf(dbzList=ob.kongreywrf, sigmas=sigmas):
def testAwrf(dbzList=ob.march2014wrf, sigmas=sigmas):
for a in dbzList:
a.load()
a.matrix = a.threshold(thresPreprocessing).matrix
#a1 = averaging(a)
a1=a
filtering(a1)
a_LOGspec = getLaplacianOfGaussianSpectrum(a1, sigmas=sigmas)
#return a_LOGspec
def testB():
'''
oversampling
'''
pass
def testC():
pass
def testD():
pass
### loading /setting up the objects ################################
## old type
# kongrey
kongrey = ob.kongrey
kongreywrf = ob.kongreywrf
# march2014
march2014 = ob.march2014
march2014wrf= ob.march2014wrf
# may2014
## new type
# may2014
# run
|
yaukwankiu/armor
|
tests/powerSpec1.py
|
Python
|
cc0-1.0
| 7,518
|
[
"Gaussian"
] |
dd561b5a62b1174c05466a7990bc93338378ca5ef2338061e5fc03f0298e0362
|
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from boot_config import *
import os, sys, re
import gzip
import json
import shutil
import webbrowser
import subprocess
import argparse
import hashlib
from datetime import datetime
from functools import partial
from collections import defaultdict
from distutils.version import LooseVersion
from os.path import (isdir, isfile, join, basename, splitext, dirname, split,
getmtime, abspath)
from pprint import pprint
if QT4: # ___ ______________ DEPENDENCIES __________________________
from PySide.QtSql import QSqlDatabase, QSqlQuery
from PySide.QtCore import (Qt, QTimer, Slot, QThread, QMimeData, QModelIndex,
QByteArray, QPoint)
from PySide.QtGui import (QMainWindow, QApplication, QMessageBox, QIcon, QFileDialog,
QTableWidgetItem, QTextCursor, QMenu, QAction, QHeaderView,
QPixmap, QListWidgetItem, QBrush, QColor)
else:
from PySide2.QtWidgets import (QMainWindow, QHeaderView, QApplication, QMessageBox,
QAction, QMenu, QTableWidgetItem, QListWidgetItem,
QFileDialog)
from PySide2.QtCore import (Qt, QTimer, QThread, QModelIndex, Slot, QPoint, QMimeData,
QByteArray)
from PySide2.QtSql import QSqlDatabase, QSqlQuery
from PySide2.QtGui import QIcon, QPixmap, QTextCursor, QBrush, QColor
from secondary import *
from gui_main import Ui_Base
from slppu import slppu as lua # https://github.com/noembryo/slppu
if PYTHON2: # ___ __________ PYTHON 2/3 COMPATIBILITY ______________
import cPickle as pickle
else:
import pickle
__author__ = "noEmbryo"
__version__ = "1.4.4.0"
def _(text): # for future gettext support
return text
def decode_data(path):
""" Converts a lua table to a Python dict
:type path: str|unicode
:param path: The path to the lua file
"""
with open(path, "r", encoding="utf8", newline=None) as txt_file:
txt = txt_file.read()[39:] # offset the first words of the file
data = lua.decode(txt.replace("--", "—"))
if type(data) == dict:
return data
def encode_data(path, dict_data):
""" Converts a Python dict to a lua table
:type path: str|unicode
:param path: The path to the lua file
:type dict_data: dict
:param dict_data: The dictionary to be encoded as lua table
"""
with open(path, "w+", encoding="utf8", newline="") as txt_file:
lua_text = "-- we can read Lua syntax here!\nreturn "
lua_text += lua.encode(dict_data)
txt_file.write(lua_text)
def sanitize_filename(filename):
""" Creates a safe filename
:type filename: str|unicode
:param filename: The filename to be sanitized
"""
filename = re.sub(r'[/:*?"<>|\\]', "_", filename)
return filename
def get_csv_row(data):
""" Return an RFC 4180 compliant csv row
:type data: dict
:param data: The highlight's data
"""
values = []
for key in CSV_KEYS:
value = data[key].replace('"', '""')
if "\n" in value or '"' in value:
value = '"' + value.lstrip() + '"'
values.append(value if value else "")
return "\t".join(values)
# if sys.platform.lower().startswith("win"):
# import ctypes
#
# def hide_console():
# """ Hides the console window in GUI mode. Necessary for frozen application,
# because this application support both, command line processing AND GUI mode
# and therefor cannot be run via pythonw.exe.
# """
#
# win_handles = ctypes.windll.kernel32.GetConsoleWindow()
# if win_handles != 0:
# ctypes.windll.user32.ShowWindow(win_handles, 0)
# # if you wanted to close the handles...
# # ctypes.windll.kernel32.CloseHandle(win_handles)
#
# def show_console():
# """ UnHides console window"""
# win_handles = ctypes.windll.kernel32.GetConsoleWindow()
# if win_handles != 0:
# ctypes.windll.user32.ShowWindow(win_handles, 1)
class Base(QMainWindow, Ui_Base):
def __init__(self, parent=None):
super(Base, self).__init__(parent)
self.scan_thread = None
self.setupUi(self)
self.version = __version__
# ___ ________ SAVED SETTINGS ___________
self.col_sort = MODIFIED
self.col_sort_asc = False
self.col_sort_h = DATE_H
self.col_sort_asc_h = False
self.highlight_width = None
self.comment_width = None
self.skip_version = "0.0.0.0"
self.opened_times = 0
self.last_dir = os.getcwd()
self.edit_lua_file_warning = True
self.current_view = BOOKS_VIEW
self.db_mode = False
self.toolbar_size = 48
self.alt_title_sort = False
self.high_by_page = False
self.high_merge_warning = True
self.archive_warning = True
self.exit_msg = True
self.db_path = join(SETTINGS_DIR, "data.db")
self.date_vacuumed = datetime.now().strftime(DATE_FORMAT)
# ___ ___________________________________
self.file_selection = None
self.sel_idx = None
self.sel_indexes = []
self.high_view_selection = None
self.sel_high_view = []
self.high_list_selection = None
self.sel_high_list = []
self.loaded_paths = set()
self.books2reload = set()
self.parent_book_data = {}
self.reload_highlights = True
self.threads = []
self.query = None
self.db = None
self.books = []
self.header_main = self.file_table.horizontalHeader()
self.header_main.setDefaultAlignment(Qt.AlignLeft)
self.header_main.setContextMenuPolicy(Qt.CustomContextMenu)
self.header_high_view = self.high_table.horizontalHeader()
self.header_high_view.setDefaultAlignment(Qt.AlignLeft)
# self.header_high_view.setResizeMode(HIGHLIGHT_H, QHeaderView.Stretch)
if QT4:
self.file_table.verticalHeader().setResizeMode(QHeaderView.Fixed)
self.header_main.setMovable(True)
self.high_table.verticalHeader().setResizeMode(QHeaderView.Fixed)
self.header_high_view.setMovable(True)
else:
self.file_table.verticalHeader().setSectionResizeMode(QHeaderView.Fixed)
self.header_main.setSectionsMovable(True)
self.high_table.verticalHeader().setSectionResizeMode(QHeaderView.Fixed)
self.header_high_view.setSectionsMovable(True)
self.splitter.setCollapsible(0, False)
self.splitter.setCollapsible(1, False)
self.info_fields = [self.title_txt, self.author_txt, self.series_txt,
self.lang_txt, self.pages_txt, self.tags_txt]
self.info_keys = ["title", "authors", "series", "language", "pages", "keywords"]
self.kor_text = _("Scanning for KOReader metadata files")
self.ico_file_save = QIcon(":/stuff/file_save.png")
self.ico_files_merge = QIcon(":/stuff/files_merge.png")
self.ico_files_delete = QIcon(":/stuff/files_delete.png")
self.ico_file_exists = QIcon(":/stuff/file_exists.png")
self.ico_file_missing = QIcon(":/stuff/file_missing.png")
self.ico_file_edit = QIcon(":/stuff/file_edit.png")
self.ico_copy = QIcon(":/stuff/copy.png")
self.ico_delete = QIcon(":/stuff/delete.png")
self.ico_label_green = QIcon(":/stuff/label_green.png")
self.ico_view_books = QIcon(":/stuff/view_books.png")
self.ico_db_add = QIcon(":/stuff/db_add.png")
self.ico_db_open = QIcon(":/stuff/db_open.png")
self.ico_app = QIcon(":/stuff/logo64.png")
self.ico_empty = QIcon(":/stuff/trans32.png")
self.ico_refresh = QIcon(":/stuff/refresh16.png")
self.ico_folder_open = QIcon(":/stuff/folder_open.png")
# noinspection PyArgumentList
self.clip = QApplication.clipboard()
self.about = About(self)
self.auto_info = AutoInfo(self)
self.toolbar = ToolBar(self)
self.tool_bar.addWidget(self.toolbar)
self.toolbar.open_btn.setEnabled(False)
self.toolbar.merge_btn.setEnabled(False)
self.toolbar.delete_btn.setEnabled(False)
self.status = Status(self)
self.statusbar.addPermanentWidget(self.status)
self.edit_high = TextDialog(self)
self.edit_high.on_ok = self.edit_comment_ok
self.edit_high.setWindowTitle(_("Comments"))
self.description = TextDialog(self)
self.description.setWindowTitle(_("Description"))
self.description.high_edit_txt.setReadOnly(True)
self.description.btn_box.hide()
self.description_btn.setEnabled(False)
self.review_lbl.setVisible(False)
self.review_txt.setVisible(False)
# noinspection PyTypeChecker,PyCallByClass
QTimer.singleShot(10000, self.auto_check4update) # check for updates
main_timer = QTimer(self) # cleanup threads for ever
main_timer.timeout.connect(self.thread_cleanup)
main_timer.start(2000)
# noinspection PyTypeChecker,PyCallByClass
QTimer.singleShot(0, self.on_load)
def on_load(self):
""" Things that must be done after the initialization
"""
self.settings_load()
self.init_db()
if FIRST_RUN: # on first run
self.toolbar.loaded_btn.click()
self.splitter.setSizes((500, 250))
# self.toolbar.export_btn.setMenu(self.save_menu()) # assign/create menu
self.toolbar.merge_btn.setMenu(self.merge_menu()) # assign/create menu
self.toolbar.delete_btn.setMenu(self.delete_menu()) # assign/create menu
self.connect_gui()
self.passed_files()
if len(sys.argv) > 1: # command line arguments exist, open in Loaded mode
self.toolbar.loaded_btn.click()
else: # no extra command line arguments
if not self.db_mode:
self.toolbar.loaded_btn.setChecked(True) # open in Loaded mode
else:
self.toolbar.db_btn.setChecked(True) # open in Archived mode
text = _("Loading {} database").format(APP_NAME)
self.loading_thread(DBLoader, self.books, text)
self.read_books_from_db() # always load db on start
if self.current_view == BOOKS_VIEW:
self.toolbar.books_view_btn.click() # open in Books view
else:
self.toolbar.high_view_btn.click() # open in Highlights view
self.show()
# ___ ___________________ EVENTS STUFF __________________________
def connect_gui(self):
""" Make all the extra signal/slots connections
"""
self.file_selection = self.file_table.selectionModel()
self.file_selection.selectionChanged.connect(self.file_selection_update)
self.header_main.sectionClicked.connect(self.on_column_clicked)
self.header_main.customContextMenuRequested.connect(self.on_column_right_clicked)
self.high_list_selection = self.high_list.selectionModel()
self.high_list_selection.selectionChanged.connect(self.high_list_selection_update)
self.high_view_selection = self.high_table.selectionModel()
self.high_view_selection.selectionChanged.connect(self.high_view_selection_update)
self.header_high_view.sectionClicked.connect(self.on_highlight_column_clicked)
self.header_high_view.sectionResized.connect(self.on_highlight_column_resized)
sys.stdout = LogStream()
sys.stdout.setObjectName("out")
sys.stdout.append_to_log.connect(self.write_to_log)
sys.stderr = LogStream()
sys.stderr.setObjectName("err")
sys.stderr.append_to_log.connect(self.write_to_log)
def keyPressEvent(self, event):
""" Handles the key press events
:type event: QKeyEvent
:param event: The key press event
"""
key, mod = event.key(), event.modifiers()
# print(key, mod, QKeySequence(key).toString())
if mod == Qt.ControlModifier: # if control is pressed
if key == Qt.Key_Backspace:
self.toolbar.on_clear_btn_clicked()
return True
if key == Qt.Key_L:
self.toolbar.on_select_btn_clicked()
return True
if key == Qt.Key_S:
self.on_export()
return True
if key == Qt.Key_O:
self.toolbar.on_info_btn_clicked()
return True
if key == Qt.Key_Q:
self.close()
if self.current_view == HIGHLIGHTS_VIEW and self.sel_high_view:
if key == Qt.Key_C:
self.copy_text_2clip(self.get_highlights()[0])
return True
if mod == Qt.AltModifier: # if alt is pressed
if key == Qt.Key_A:
self.on_archive()
return True
if self.current_view == HIGHLIGHTS_VIEW and self.sel_high_view:
if key == Qt.Key_C:
self.copy_text_2clip(self.get_highlights()[1])
return True
if key == Qt.Key_Escape:
self.close()
return True
if key == Qt.Key_Delete:
self.delete_actions(0)
return True
def closeEvent(self, event):
""" Accepts or rejects the `exit` command
:type event: QCloseEvent
:param event: The `exit` event
"""
if not self.exit_msg:
self.bye_bye_stuff()
event.accept()
return
popup = self.popup(_("Confirmation"), _("Exit {}?").format(APP_NAME), buttons=2,
check_text=_("Don't show this again"))
self.exit_msg = not popup.checked
if popup.buttonRole(popup.clickedButton()) == QMessageBox.AcceptRole:
self.bye_bye_stuff()
event.accept() # let the window close
else:
event.ignore()
def bye_bye_stuff(self):
""" Things to do before exit
"""
self.settings_save()
self.delete_logs()
# ___ ____________________ DATABASE STUFF __________________________
def init_db(self):
""" Initialize the database tables
"""
# noinspection PyTypeChecker,PyCallByClass
self.db = QSqlDatabase.addDatabase("QSQLITE")
self.db.setDatabaseName(self.db_path)
if not self.db.open():
print("Could not open database!")
return
self.query = QSqlQuery()
if app_config:
pass
# self.query.exec_("""PRAGMA user_version""") # 2do: enable if db changes
# while self.query.next():
# self.check_db_version(self.query.value(0)) # check the db version
self.set_db_version() if not isfile(self.db_path) else None
self.create_books_table()
def check_db_version(self, version):
""" Updates the db to the last version
:type version: int
:param version: The db file version
"""
if version == DB_VERSION or not isfile(self.db_path):
return # the db is up to date or does not exists yet
self.update_db(version)
def set_db_version(self):
""" Set the current database version
"""
self.query.exec_("""PRAGMA user_version = {}""".format(DB_VERSION))
def change_db(self, mode):
""" Changes the current db file
:type mode: int
:param mode: Change, create new or reload the current db
"""
if mode == NEW_DB:
# noinspection PyCallByClass
filename = QFileDialog.getSaveFileName(self, _("Type the name of the new db"),
self.db_path,
(_("database files (*.db)")))[0]
elif mode == CHANGE_DB:
# noinspection PyCallByClass
filename = QFileDialog.getOpenFileName(self, _("Select a database file"),
self.db_path,
(_("database files (*.db)")))[0]
elif mode == RELOAD_DB:
filename = self.db_path
else:
return
if filename:
# self.toolbar.loaded_btn.click()
if self.toolbar.db_btn.isChecked():
self.toolbar.update_loaded()
self.delete_data()
self.db_path = filename
self.db_mode = False
self.init_db()
self.read_books_from_db()
if self.toolbar.db_btn.isChecked():
# noinspection PyTypeChecker,PyCallByClass
QTimer.singleShot(0, self.toolbar.update_archived)
def delete_data(self):
""" Deletes the database data
"""
self.db.close() # close the db
self.db = None
self.query = None
# print(self.db.connectionNames())
# self.db.removeDatabase(self.db.connectionName())
def create_books_table(self):
""" Create the books table
"""
self.query.exec_("""CREATE TABLE IF NOT EXISTS books (id INTEGER PRIMARY KEY,
md5 TEXT UNIQUE NOT NULL, date TEXT, path TEXT, data TEXT)""")
def add_books2db(self, books):
""" Add some books to the books db table
:type books: list
:param books: The books to add in the db
"""
self.db.transaction()
self.query.prepare("""INSERT OR REPLACE into books (md5, date, path, data)
VALUES (:md5, :date, :path, :data)""")
for book in books:
self.query.bindValue(":md5", book["md5"])
self.query.bindValue(":date", book["date"])
self.query.bindValue(":path", book["path"])
self.query.bindValue(":data", book["data"])
self.query.exec_()
self.db.commit()
def read_books_from_db(self):
""" Reads the contents of the books' db table
"""
del self.books[:]
self.query.setForwardOnly(True)
self.query.exec_("""SELECT * FROM books""")
while self.query.next():
book = [self.query.value(i) for i in range(1, 5)] # don't read the id
data = json.loads(book[DB_DATA], object_hook=self.keys2int)
self.books.append({"md5": book[DB_MD5], "date": book[DB_DATE],
"path": book[DB_PATH], "data": data})
@staticmethod
def keys2int(data):
""" ReConverts the numeric keys of the Highlights in the data dictionary
that are converted to strings because of json serialization
:type data: dict
:param data: The books to add in the db
"""
if isinstance(data, dict):
return {int(k) if k.isdigit() else k: v for k, v in data.items()}
return data
def update_book2db(self, data):
""" Updates the data of a book in the db
:type data: dict
:param data: The changed data
"""
self.query.prepare("""UPDATE books SET data = :data WHERE md5 = :md5""")
self.query.bindValue(":md5", data["partial_md5_checksum"])
self.query.bindValue(":data", json.dumps(data))
self.query.exec_()
def delete_books_from_db(self, ids):
""" Deletes multiple books from the db
:type ids: list
:param ids: The md5s of the books to be deleted
"""
if ids:
self.db.transaction()
self.query.prepare("""DELETE FROM books WHERE md5 = :md5""")
for md5 in ids:
self.query.bindValue(":md5", md5)
self.query.exec_()
self.db.commit()
def get_db_book_count(self):
""" Get the count of the books in the db
"""
self.query.exec_("""SELECT Count(*) FROM books""")
self.query.next()
return self.query.value(0)
def vacuum_db(self, info=True):
self.query.exec_("""VACUUM""")
if info:
self.popup(_("Information"), _("The database is compacted!"),
QMessageBox.Information)
# ___ ___________________ FILE TABLE STUFF ______________________
@Slot(list)
def on_file_table_fileDropped(self, dropped):
""" When some items are dropped to the TableWidget
:type dropped: list
:param dropped: The items dropped
"""
# self.file_table.setSortingEnabled(False)
for i in dropped:
if splitext(i)[1] == ".lua":
self.create_row(i)
# self.file_table.setSortingEnabled(True)
folders = [j for j in dropped if isdir(j)]
for folder in folders:
self.loading_thread(Scanner, folder, self.kor_text, clear=False)
# @Slot(QTableWidgetItem) # called indirectly from self.file_selection_update
def on_file_table_itemClicked(self, item, reset=True):
""" When an item of the FileTable is clicked
:type item: QTableWidgetItem
:param item: The item (cell) that is clicked
:type reset: bool
:param reset: Select the first highlight in the list
"""
if not item: # empty list
return
row = item.row()
data = self.file_table.item(row, TITLE).data(Qt.UserRole)
path = self.file_table.item(row, PATH).data(Qt.UserRole)
self.high_list.clear()
self.populate_high_list(data, path)
self.populate_book_info(data, row)
description_state = False
if "doc_props" in data and "description" in data["doc_props"]:
description_state = bool(data["doc_props"]["description"])
self.description_btn.setEnabled(description_state)
# self.high_list.sortItems() # using XListWidgetItem for custom sorting
self.high_list.setCurrentRow(0) if reset else None
def populate_book_info(self, data, row):
""" Fill in the `Book Info` fields
:type data: dict
:param data: The item's data
:type row: int
:param row: The item's row number
"""
for key, field in zip(self.info_keys, self.info_fields):
try:
if key == "title" and not data["stats"][key]:
path = self.file_table.item(row, PATH).data(0)
try:
name = path.split("#] ")[1]
value = splitext(name)[0]
except IndexError: # no "#] " in filename
value = ""
elif key == "keywords":
keywords = data["doc_props"][key].split("\n")
value = "; ".join([i.rstrip("\\") for i in keywords])
else:
value = data["stats"][key]
try:
field.setText(value)
except TypeError: # Needs string only
field.setText(str(value) if value else "") # "" if 0
except KeyError: # older type file or other problems
path = self.file_table.item(row, PATH).data(0)
stats = self.get_item_stats(path, data)
if key == "title":
field.setText(stats[1])
elif key == "authors":
field.setText(stats[2])
else:
field.setText("")
review = data.get("summary", {}).get("note", "")
self.review_lbl.setVisible(bool(review))
self.review_txt.setVisible(bool(review))
self.review_txt.setText(review)
@Slot()
def on_description_btn_clicked(self):
""" The book's `Description` button is pressed
"""
data = self.file_table.item(self.sel_idx.row(), TITLE).data(Qt.UserRole)
description = data["doc_props"]["description"]
self.description.high_edit_txt.setHtml(description)
self.description.show()
@Slot(QPoint)
def on_file_table_customContextMenuRequested(self, point):
""" When an item of the FileTable is right-clicked
:type point: QPoint
:param point: The point where the right-click happened
"""
if not len(self.file_selection.selectedRows()): # no items selected
return
menu = QMenu(self.file_table)
row = self.file_table.itemAt(point).row()
self.act_view_book.setEnabled(self.toolbar.open_btn.isEnabled())
self.act_view_book.setData(row)
menu.addAction(self.act_view_book)
action = QAction(_("Export"), menu)
action.setIcon(self.ico_file_save)
action.triggered.connect(self.on_export)
menu.addAction(action)
# if len(self.sel_indexes) > 1: # many items selected
# save_menu = self.save_menu()
# save_menu.setIcon(self.ico_file_save)
# save_menu.setTitle(_("Export"))
# menu.addMenu(save_menu)
# else: # only one item selected
# action = QAction(_("Export to text"), menu)
# action.setIcon(self.ico_file_save)
# action.triggered.connect(self.on_save_actions)
# action.setData(MANY_TEXT)
# menu.addAction(action)
#
# action = QAction(_("Export to html"), menu)
# action.setIcon(self.ico_file_save)
# action.triggered.connect(self.on_save_actions)
# action.setData(MANY_HTML)
# menu.addAction(action)
if not self.db_mode:
action = QAction(_("Archive") + "\tAlt+A", menu)
action.setIcon(self.ico_db_add)
action.triggered.connect(self.on_archive)
menu.addAction(action)
if len(self.sel_indexes) == 1:
sync_group = QMenu(self)
sync_group.setTitle(_("Sync"))
sync_group.setIcon(self.ico_files_merge)
if self.check4archive_merge() is not False:
sync_menu = self.create_archive_merge_menu()
sync_menu.setTitle(_("Sync with archived"))
sync_menu.setIcon(self.ico_files_merge)
sync_group.addMenu(sync_menu)
action = QAction(_("Sync with file"), sync_group)
action.setIcon(self.ico_files_merge)
action.triggered.connect(self.use_meta_files)
sync_group.addAction(action)
book_path, book_exists = self.file_table.item(row, TYPE).data(Qt.UserRole)
if book_exists:
action = QAction(_("ReCalculate MD5"), sync_group)
action.setIcon(self.ico_refresh)
action.triggered.connect(partial(self.recalculate_md5, book_path))
sync_group.addAction(action)
menu.addMenu(sync_group)
action = QAction(_("Open location"), menu)
action.setIcon(self.ico_folder_open)
folder_path = dirname(self.file_table.item(row, PATH).text())
action.triggered.connect(partial(self.open_file, folder_path))
menu.addAction(action)
delete_menu = self.delete_menu()
delete_menu.setIcon(self.ico_files_delete)
delete_menu.setTitle(_("Delete") + "\tDel")
menu.addMenu(delete_menu)
else:
action = QAction(_("Delete") + "\tDel", menu)
action.setIcon(self.ico_files_delete)
action.triggered.connect(partial(self.delete_actions, 0))
menu.addAction(action)
# # noinspection PyArgumentList
# menu.exec_(QCursor.pos())
menu.exec_(self.file_table.mapToGlobal(point))
@Slot(QTableWidgetItem)
def on_file_table_itemDoubleClicked(self, item):
""" When an item of the FileTable is double-clicked
:type item: QTableWidgetItem
:param item: The item (cell) that is double-clicked
"""
row = item.row()
meta_path = splitext(self.file_table.item(row, PATH).data(0))[0]
book_path = self.get_book_path(meta_path)
self.open_file(book_path)
@staticmethod
def get_book_path(path):
""" Returns the filename of the book that the metadata refers to
:type path: str|unicode
:param path: The path of the metadata file
"""
path, ext = splitext(path)
path = splitext(split(path)[0])[0] + ext
return path
@Slot()
def on_act_view_book_triggered(self):
""" The View Book menu entry is pressed
"""
row = self.sender().data()
if self.current_view == BOOKS_VIEW:
item = self.file_table.itemAt(row, 0)
self.on_file_table_itemDoubleClicked(item)
elif self.current_view == HIGHLIGHTS_VIEW:
data = self.high_table.item(row, HIGHLIGHT_H).data(Qt.UserRole)
self.open_file(data["path"])
# noinspection PyUnusedLocal
def file_selection_update(self, selected, deselected):
""" When a row in FileTable gets selected
:type selected: QModelIndex
:parameter selected: The selected row
:type deselected: QModelIndex
:parameter deselected: The deselected row
"""
try:
self.sel_indexes = self.file_selection.selectedRows()
self.sel_idx = self.sel_indexes[-1]
except IndexError: # empty table
self.sel_indexes = []
self.sel_idx = None
# if self.file_selection.selectedRows():
# idx = selected.indexes()[0]
if self.sel_indexes:
item = self.file_table.item(self.sel_idx.row(), self.sel_idx.column())
self.on_file_table_itemClicked(item)
else:
self.high_list.clear()
self.description_btn.setEnabled(False)
for field in self.info_fields:
field.setText("")
self.toolbar.activate_buttons()
def on_column_clicked(self, column):
""" Sets the current sorting column
:type column: int
:parameter column: The column where the filtering is applied
"""
if column == self.col_sort:
self.col_sort_asc = not self.col_sort_asc
else:
self.col_sort_asc = True
self.col_sort = column
def on_column_right_clicked(self, pos):
""" Creates a sorting menu for the "Title" column
:type pos: QPoint
:parameter pos: The position of the right click
"""
column = self.header_main.logicalIndexAt(pos)
name = self.file_table.horizontalHeaderItem(column).text()
if name == _("Title"):
menu = QMenu(self)
action = QAction(_("Ignore english articles"), menu)
action.setCheckable(True)
action.setChecked(self.alt_title_sort)
action.triggered.connect(self.toggle_title_sort)
menu.addAction(action)
menu.exec_(self.file_table.mapToGlobal(pos))
def toggle_title_sort(self):
""" Toggles the way titles are sorted (use or not A/The)
"""
self.alt_title_sort = not self.alt_title_sort
text = _("ReSorting books...")
if not self.db_mode:
self.loading_thread(ReLoader, self.loaded_paths.copy(), text)
else:
self.loading_thread(DBLoader, self.books, text)
@Slot(bool)
def on_fold_btn_toggled(self, pressed):
""" Open/closes the Book info panel
:type pressed: bool
:param pressed: The arrow button"s status
"""
if pressed: # Closed
self.fold_btn.setText(_("Show Book Info"))
self.fold_btn.setArrowType(Qt.RightArrow)
else: # Opened
self.fold_btn.setText(_("Hide Book Info"))
self.fold_btn.setArrowType(Qt.DownArrow)
self.book_info.setHidden(pressed)
def on_archive(self):
""" Add the selected books to the archive db
"""
if not self.sel_indexes:
return
if self.archive_warning: # warn about book replacement in archive
extra = _("these books") if len(self.sel_indexes) > 1 else _("this book")
popup = self.popup(_("Question!"),
_("Add or replace {} in the archive?").format(extra),
buttons=2, icon=QMessageBox.Question,
check_text=_("Don't show this again"))
self.archive_warning = not popup.checked
if popup.buttonRole(popup.clickedButton()) == QMessageBox.RejectRole:
return
empty = 0
older = 0
added = 0
books = []
for idx in self.sel_indexes:
row = idx.row()
path = self.file_table.item(row, PATH).text()
date = self.file_table.item(row, MODIFIED).text()
data = self.file_table.item(row, TITLE).data(Qt.UserRole)
if not data["highlight"]: # no highlights, don't add
empty += 1
continue
try:
md5 = data["partial_md5_checksum"]
except KeyError: # older metadata, don't add
older += 1
continue
data["stats"]["performance_in_pages"] = {} # can be cluttered
data["page_positions"] = {} # can be cluttered
books.append({"md5": md5, "path": path, "date": date,
"data": json.dumps(data)})
added += 1
self.add_books2db(books)
extra = ""
if empty:
extra += _("\nNot added {} books with no highlights.").format(empty)
if older:
extra += _("\nNot added {} books with old type metadata.").format(older)
self.popup(_("Added!"),
_("{} books were added/updated to the Archive from the {} processed.")
.format(added, len(self.sel_indexes)) + extra,
icon=QMessageBox.Information)
def loading_thread(self, worker, args, text, clear=True):
""" Populates the file_table with different contents
"""
if clear:
self.toolbar.on_clear_btn_clicked()
self.file_table.setSortingEnabled(False) # re-enable it after populating table
self.status.animation(True)
self.auto_info.set_text(_("{}.\nPlease Wait...").format(text))
self.auto_info.show()
scan_thread = QThread()
loader = worker(args)
loader.moveToThread(scan_thread)
loader.found.connect(self.create_row)
loader.finished.connect(self.loading_finished)
loader.finished.connect(scan_thread.quit)
loader.finished.connect(self.thread_cleanup)
scan_thread.loader = loader
scan_thread.started.connect(loader.process)
scan_thread.start(QThread.IdlePriority)
self.threads.append(scan_thread)
def loading_finished(self):
""" What will happen after the populating of the file_table ends
"""
if self.current_view == HIGHLIGHTS_VIEW:
self.scan_highlights_thread()
else: # Books view
self.status.animation(False)
self.auto_info.hide()
self.file_table.clearSelection()
self.sel_idx = None
self.sel_indexes = []
self.file_table.resizeColumnsToContents()
self.toolbar.activate_buttons()
self.file_table.setSortingEnabled(True)
order = Qt.AscendingOrder if self.col_sort_asc else Qt.DescendingOrder
self.file_table.sortByColumn(self.col_sort, order)
def create_row(self, filename, data=None, date=None):
""" Creates a table row from the given file
:type filename: str|unicode
:param filename: The metadata file to be read
"""
if not self.db_mode: # for files
# if exists(filename) and splitext(filename)[1].lower() == '.lua':
if filename in self.loaded_paths:
return # already loaded file
self.loaded_paths.add(filename)
data = decode_data(filename)
if not data:
print("No data here!", filename)
return
date = str(datetime.fromtimestamp(getmtime(filename))).split(".")[0]
stats = self.get_item_stats(filename, data)
icon, title, authors, percent, rating, status, high_count = stats
else: # for db entries
stats = self.get_item_db_stats(data)
icon, title, authors, percent, rating, status, high_count = stats
color = ("#660000" if status == "abandoned" else
# "#005500" if status == "complete" else
None)
self.file_table.setSortingEnabled(False)
self.file_table.insertRow(0)
Item = QTableWidgetItem if not self.alt_title_sort else XTableWidgetTitleItem
title_item = Item(icon, title)
title_item.setToolTip(title)
title_item.setData(Qt.UserRole, data)
self.file_table.setItem(0, TITLE, title_item)
author_item = QTableWidgetItem(authors)
author_item.setToolTip(authors)
self.file_table.setItem(0, AUTHOR, author_item)
ext = splitext(splitext(filename)[0])[1][1:]
book_path = splitext(self.get_book_path(filename))[0] + "." + ext
book_exists = isfile(book_path)
book_icon = self.ico_file_exists if book_exists else self.ico_file_missing
type_item = QTableWidgetItem(book_icon, ext)
type_item.setToolTip(book_path if book_exists else
_("The {} file is missing!").format(ext))
type_item.setData(Qt.UserRole, (book_path, book_exists))
self.file_table.setItem(0, TYPE, type_item)
percent_item = XTableWidgetPercentItem(percent)
percent_item.setToolTip(percent)
percent_item.setTextAlignment(Qt.AlignRight)
self.file_table.setItem(0, PERCENT, percent_item)
rating_item = QTableWidgetItem(rating)
rating_item.setToolTip(rating)
self.file_table.setItem(0, RATING, rating_item)
count_item = XTableWidgetIntItem(high_count)
count_item.setToolTip(high_count)
# count_item.setTextAlignment(Qt.AlignRight)
self.file_table.setItem(0, HIGH_COUNT, count_item)
date_item = QTableWidgetItem(date)
date_item.setToolTip(date)
self.file_table.setItem(0, MODIFIED, date_item)
path_item = QTableWidgetItem(filename)
path_item.setToolTip(filename)
self.file_table.setItem(0, PATH, path_item)
for i in range(7): # colorize row
item = self.file_table.item(0, i)
item.setForeground(QBrush(QColor(color)))
self.file_table.setSortingEnabled(True)
def get_item_db_stats(self, data):
""" Returns the title and authors of a history file
:type data: dict
:param data: The dict converted lua file
"""
if data["highlight"]:
icon = self.ico_label_green
high_count = str(len(data["highlight"]))
else:
icon = self.ico_empty
high_count = ""
title = data["stats"]["title"]
authors = data["stats"]["authors"]
title = title if title else _("NO TITLE FOUND")
authors = authors if authors else _("NO AUTHOR FOUND")
try:
percent = str(int(data["percent_finished"] * 100)) + "%"
except KeyError:
percent = ""
if "summary" in data:
rating = data["summary"].get("rating")
rating = rating * "*" if rating else ""
status = data["summary"].get("status")
else:
rating = ""
status = None
return icon, title, authors, percent, rating, status, high_count
def get_item_stats(self, filename, data):
""" Returns the title and authors of a metadata file
:type filename: str|unicode
:param filename: The filename to get the stats for
:type data: dict
:param data: The dict converted lua file
"""
if data["highlight"]:
icon = self.ico_label_green
high_count = str(len(data["highlight"]))
else:
icon = self.ico_empty
high_count = ""
try:
title = data["stats"]["title"]
authors = data["stats"]["authors"]
except KeyError: # older type file
title = splitext(basename(filename))[0]
try:
name = title.split("#] ")[1]
title = splitext(name)[0]
except IndexError: # no "#] " in filename
pass
authors = _("OLD TYPE FILE")
if not title:
try:
name = filename.split("#] ")[1]
title = splitext(name)[0]
except IndexError: # no "#] " in filename
title = _("NO TITLE FOUND")
authors = authors if authors else _("NO AUTHOR FOUND")
try:
percent = str(int(data["percent_finished"] * 100)) + "%"
except KeyError:
percent = None
if "summary" in data:
rating = data["summary"].get("rating")
rating = rating * "*" if rating else ""
status = data["summary"].get("status")
else:
rating = ""
status = None
return icon, title, authors, percent, rating, status, high_count
# ___ ___________________ HIGHLIGHT TABLE STUFF _________________
@Slot(QTableWidgetItem)
def on_high_table_itemClicked(self, item):
""" When an item of the high_table is clicked
:type item: QTableWidgetItem
:param item: The item (cell) that is clicked
"""
row = item.row()
data = self.high_table.item(row, HIGHLIGHT_H).data(Qt.UserRole)
# needed for edit "Comments" or "Find in Books" in Highlight View
for row in range(self.file_table.rowCount()): # 2check: need to optimize?
if data["path"] == self.file_table.item(row, TYPE).data(Qt.UserRole)[0]:
self.parent_book_data = self.file_table.item(row, TITLE).data(Qt.UserRole)
break
@Slot(QModelIndex)
def on_high_table_doubleClicked(self, index):
""" When an item of the high_table is double-clicked
:type index: QTableWidgetItem
:param index: The item (cell) that is clicked
"""
column = index.column()
if column == COMMENT_H:
self.on_edit_comment()
@Slot(QPoint)
def on_high_table_customContextMenuRequested(self, point):
""" When an item of the high_table is right-clicked
:type point: QPoint
:param point: The point where the right-click happened
"""
if not len(self.sel_high_view): # no items selected
return
menu = QMenu(self.high_table)
row = self.high_table.itemAt(point).row()
self.act_view_book.setData(row)
self.act_view_book.setEnabled(self.toolbar.open_btn.isEnabled())
menu.addAction(self.act_view_book)
highlights, comments = self.get_highlights()
high_text = _("Copy Highlights")
com_text = _("Copy Comments")
if len(self.sel_high_view) == 1: # single selection
high_text = _("Copy Highlight")
com_text = _("Copy Comment")
text = _("Find in Archive") if self.db_mode else _("Find in Books")
action = QAction(text, menu)
action.triggered.connect(partial(self.find_in_books, highlights))
action.setIcon(self.ico_view_books)
menu.addAction(action)
action = QAction(_("Comments"), menu)
action.triggered.connect(self.on_edit_comment)
action.setIcon(self.ico_file_edit)
menu.addAction(action)
action = QAction(high_text + "\tCtrl+C", menu)
action.triggered.connect(partial(self.copy_text_2clip, highlights))
action.setIcon(self.ico_copy)
menu.addAction(action)
action = QAction(com_text + "\tAlt+C", menu)
action.triggered.connect(partial(self.copy_text_2clip, comments))
action.setIcon(self.ico_copy)
menu.addAction(action)
action = QAction(_("Export to file"), menu)
action.triggered.connect(self.on_export)
action.setData(2)
action.setIcon(self.ico_file_save)
menu.addAction(action)
menu.exec_(self.high_table.mapToGlobal(point))
def get_highlights(self):
""" Returns the selected highlights and the comments texts
"""
highlights = ""
comments = ""
for idx in self.sel_high_view:
item_row = idx.row()
data = self.high_table.item(item_row, HIGHLIGHT_H).data(Qt.UserRole)
highlight = data["text"]
if highlight:
highlights += highlight + "\n\n"
comment = data["comment"]
if comment:
comments += comment + "\n\n"
highlights = highlights.rstrip("\n").replace("\n", os.linesep)
comments = comments.rstrip("\n").replace("\n", os.linesep)
return highlights, comments
def scan_highlights_thread(self):
""" Gets all the loaded highlights
"""
self.high_table.model().removeRows(0, self.high_table.rowCount())
self.status.animation(True)
self.auto_info.set_text(_("Creating Highlights table.\n"
"Please Wait..."))
self.auto_info.show()
scan_thread = QThread()
scanner = HighlightScanner()
scanner.moveToThread(scan_thread)
scanner.found.connect(self.create_highlight_row)
scanner.finished.connect(self.scan_highlights_finished)
scanner.finished.connect(scan_thread.quit)
scan_thread.scanner = scanner
scan_thread.started.connect(scanner.process)
scan_thread.start(QThread.IdlePriority)
self.threads.append(scan_thread)
def scan_highlights_finished(self):
""" What will happen after the scanning for history files ends
"""
self.auto_info.hide()
self.status.animation(False)
for col in [PAGE_H, DATE_H, AUTHOR_H, TITLE_H, PATH_H]:
self.high_table.resizeColumnToContents(col)
self.toolbar.activate_buttons()
self.reload_highlights = False
self.high_table.setSortingEnabled(True) # re-enable, after populating table
order = Qt.AscendingOrder if self.col_sort_asc_h else Qt.DescendingOrder
self.high_table.sortByColumn(self.col_sort_h, order)
def create_highlight_row(self, data):
""" Creates a highlight table row from the given data
:type data: dict
:param data: The highlight data
"""
self.high_table.setSortingEnabled(False)
self.high_table.insertRow(0)
text = data["text"]
item = QTableWidgetItem(text)
item.setToolTip("<p>{}</p>".format(text))
item.setData(Qt.UserRole, data)
self.high_table.setItem(0, HIGHLIGHT_H, item)
comment = data["comment"]
item = QTableWidgetItem(comment)
item.setToolTip("<p>{}</p>".format(comment)) if comment else None
self.high_table.setItem(0, COMMENT_H, item)
date = data["date"]
item = QTableWidgetItem(date)
item.setToolTip(date)
item.setTextAlignment(Qt.AlignRight)
self.high_table.setItem(0, DATE_H, item)
title = data["title"]
item = QTableWidgetItem(title)
item.setToolTip(title)
self.high_table.setItem(0, TITLE_H, item)
authors = data["authors"]
item = QTableWidgetItem(authors)
item.setToolTip(authors)
self.high_table.setItem(0, AUTHOR_H, item)
page = str(data["page"])
item = XTableWidgetIntItem(page)
item.setToolTip(page)
item.setTextAlignment(Qt.AlignRight)
self.high_table.setItem(0, PAGE_H, item)
path = data["path"]
item = QTableWidgetItem(path)
item.setToolTip(path)
self.high_table.setItem(0, PATH_H, item)
self.high_table.setSortingEnabled(True)
# noinspection PyUnusedLocal
def high_view_selection_update(self, selected, deselected):
""" When a row in high_table gets selected
:type selected: QModelIndex
:parameter selected: The selected row
:type deselected: QModelIndex
:parameter deselected: The deselected row
"""
try:
self.sel_high_view = self.high_view_selection.selectedRows()
except IndexError: # empty table
self.sel_high_view = []
self.toolbar.activate_buttons()
def on_highlight_column_clicked(self, column):
""" Sets the current sorting column
:type column: int
:parameter column: The column where the filtering is applied
"""
if column == self.col_sort_h:
self.col_sort_asc_h = not self.col_sort_asc_h
else:
self.col_sort_asc_h = True
self.col_sort_h = column
# noinspection PyUnusedLocal
def on_highlight_column_resized(self, column, oldSize, newSize):
""" Gets the column size
:type column: int
:parameter column: The resized column
:type oldSize: int
:parameter oldSize: The old size
:type newSize: int
:parameter newSize: The new size
"""
if column == HIGHLIGHT_H:
self.highlight_width = newSize
elif column == COMMENT_H:
self.comment_width = newSize
def find_in_books(self, highlight):
""" Finds the current highlight in the "Books View"
:type highlight: str|unicode
:parameter highlight: The highlight we searching for
"""
data = self.parent_book_data
for row in range(self.file_table.rowCount()):
item = self.file_table.item(row, TITLE)
row_data = item.data(Qt.UserRole)
try: # find the book row
if data["stats"]["title"] == row_data["stats"]["title"]:
self.views.setCurrentIndex(BOOKS_VIEW)
self.toolbar.books_view_btn.setChecked(True)
self.toolbar.setup_buttons()
self.toolbar.activate_buttons()
self.file_table.selectRow(row) # select the book
self.on_file_table_itemClicked(item)
for high_row in range(self.high_list.count()): # find the highlight
if (self.high_list.item(high_row)
.data(Qt.UserRole)[HIGHLIGHT_TEXT] == highlight):
self.high_list.setCurrentRow(high_row) # select the highlight
return
except KeyError: # old metadata with no "stats"
continue
# ___ ___________________ HIGHLIGHTS LIST STUFF _________________
def populate_high_list(self, data, path=""):
""" Populates the Highlights list of `Book` view
:type data: dict
:param data: The item/book's data
:type path: str|unicode
:param path: The item/book's path
"""
space = (" " if self.status.act_page.isChecked() and
self.status.act_date.isChecked() else "")
line_break = (":\n" if self.status.act_page.isChecked() or
self.status.act_date.isChecked() else "")
highlights = self.parse_highlights(data, path)
for i in sorted(highlights, key=self.sort_high4view):
page_text = (_("Page ") + str(i["page"])
if self.status.act_page.isChecked() else "")
date_text = "[" + i["date"] + "]" if self.status.act_date.isChecked() else ""
high_text = i["text"] if self.status.act_text.isChecked() else ""
line_break2 = ("\n" if self.status.act_comment.isChecked() and i["comment"]
else "")
high_comment = line_break2 + "● " + i["comment"] if line_break2 else ""
highlight = (page_text + space + date_text + line_break +
high_text + high_comment + "\n")
highlight_item = QListWidgetItem(highlight, self.high_list)
highlight_item.setData(Qt.UserRole, i)
def parse_highlights(self, data, path=""):
""" Get the HighLights from the .sdr data
:type data: dict
:param data: The lua converted book data
:type path: str|unicode
:param path: The book's path
"""
authors = data.get("stats", {}).get("authors", "NO AUTHOR FOUND")
title = data.get("stats", {}).get("title", "NO TITLE FOUND")
highlights = []
for page in data["highlight"]:
for page_id in data["highlight"][page]:
highlight = self.get_highlight_info(data, page, page_id)
if highlight:
highlight.update({"authors": authors, "title": title,
"path": path})
highlights.append(highlight)
return highlights
@staticmethod
def get_highlight_info(data, page, page_id):
""" Get the highlight's info (text, comment, date and page)
:type data: dict
:param data: The highlight's data
:type page: int
:param page The page where the highlight starts
:type page_id: int
:param page_id The count of this page's highlight
"""
highlight = {}
try:
date = data["highlight"][page][page_id]["datetime"]
text4check = data["highlight"][page][page_id]["text"]
text = text4check.replace("\\\n", "\n")
comment = ""
for idx in data["bookmarks"]: # check for comment text
if text4check == data["bookmarks"][idx]["notes"]:
bkm_text = data["bookmarks"][idx].get("text", "")
if not bkm_text or (bkm_text == text4check):
break
bkm_text = re.sub(r"Page \d+ "
r"(.+?) @ \d+-\d+-\d+ \d+:\d+:\d+", r"\1",
bkm_text, 1, re.DOTALL | re.MULTILINE)
if text4check != bkm_text: # there is a comment
comment = bkm_text.replace("\\\n", "\n")
break
highlight["date"] = date
highlight["text"] = text
highlight["comment"] = comment
highlight["page"] = page
highlight["page_id"] = page_id
except KeyError: # blank highlight
return
return highlight
@staticmethod
def get_high_data(data, page, page_id): # 2check: is it better than the prev
""" Get the highlight's info (text, comment, date and page)
:type data: dict
:param data: The highlight's data
:type page: int
:param page The page where the highlight starts
:type page_id: int
:param page_id The count of this page's highlight
"""
date = data["highlight"][page][page_id]["datetime"]
high_text = data["highlight"][page][page_id]["text"]
pos_0 = data["highlight"][page][page_id]["pos0"]
pos_1 = data["highlight"][page][page_id]["pos1"]
comment = ""
for idx in data["bookmarks"]:
try:
book_pos0 = data["bookmarks"][idx]["pos0"]
except KeyError: # no [idx]["pos0"] exists (blank highlight)
continue
book_pos1 = data["bookmarks"][idx]["pos1"]
if (pos_0 == book_pos0) and (pos_1 == book_pos1):
bkm_text = data["bookmarks"][idx].get("text", "")
if not bkm_text or (bkm_text == high_text):
break
bkm_text = re.sub(r"Page \d+ (.+?) @ \d+-\d+-\d+ \d+:\d+:\d+", r"\1",
bkm_text, 1, re.DOTALL | re.MULTILINE)
if high_text != bkm_text:
comment = bkm_text
break
return comment, date, high_text
@Slot(QPoint)
def on_high_list_customContextMenuRequested(self, point):
""" When a highlight is right-clicked
:type point: QPoint
:param point: The point where the right-click happened
"""
if self.sel_high_list:
menu = QMenu(self.high_list)
action = QAction(_("Comments"), menu)
action.triggered.connect(self.on_edit_comment)
action.setIcon(self.ico_file_edit)
menu.addAction(action)
action = QAction(_("Copy"), menu)
action.triggered.connect(self.on_copy_highlights)
action.setIcon(self.ico_copy)
menu.addAction(action)
action = QAction(_("Delete"), menu)
action.triggered.connect(self.on_delete_highlights)
action.setIcon(self.ico_delete)
menu.addAction(action)
menu.exec_(self.high_list.mapToGlobal(point))
@Slot()
def on_high_list_itemDoubleClicked(self):
""" An item on the Highlight List is double-clicked
"""
self.on_edit_comment()
def on_edit_comment(self):
""" Opens a window to edit the selected highlight's comment
"""
if self.file_table.isVisible(): # edit comments from Book View
row = self.sel_high_list[-1].row()
comment = self.high_list.item(row).data(Qt.UserRole)["comment"]
elif self.high_table.isVisible(): # edit comments from Highlights View
row = self.sel_high_view[-1].row()
high_data = self.high_table.item(row, HIGHLIGHT_H).data(Qt.UserRole)
comment = high_data["comment"]
else:
return
self.edit_high.high_edit_txt.setText(comment)
# self.edit_high.high_edit_txt.setFocus()
self.edit_high.exec_()
def edit_comment_ok(self):
""" Change the selected highlight's comment
"""
text = self.edit_high.high_edit_txt.toPlainText()
if self.file_table.isVisible():
high_index = self.sel_high_list[-1]
high_row = high_index.row()
high_data = self.high_list.item(high_row).data(Qt.UserRole)
high_text = high_data["text"].replace("\n", "\\\n")
row = self.sel_idx.row()
item = self.file_table.item
data = item(row, TITLE).data(Qt.UserRole)
for bookmark in data["bookmarks"].keys():
if high_text == data["bookmarks"][bookmark]["notes"]:
data["bookmarks"][bookmark]["text"] = text.replace("\n", "\\\n")
break
item(row, TITLE).setData(Qt.UserRole, data)
if not self.db_mode: # Loaded mode
path = item(row, PATH).text()
self.save_book_data(path, data)
else: # Archived mode
self.update_book2db(data)
self.on_file_table_itemClicked(item(row, 0), reset=False)
elif self.high_table.isVisible():
data = self.parent_book_data
row = self.sel_high_view[-1].row()
high_data = self.high_table.item(row, HIGHLIGHT_H).data(Qt.UserRole)
high_text = high_data["text"].replace("\n", "\\\n")
for bookmark in data["bookmarks"].keys():
if high_text == data["bookmarks"][bookmark]["notes"]:
data["bookmarks"][bookmark]["text"] = text.replace("\n", "\\\n")
high_data["comment"] = text
break
self.high_table.item(row, HIGHLIGHT_H).setData(Qt.UserRole, high_data)
self.high_table.item(row, COMMENT_H).setText(text)
if not self.db_mode: # Loaded mode
book_path, ext = splitext(high_data["path"])
path = join(book_path + ".sdr", "metadata{}.lua".format(ext))
self.save_book_data(path, data)
else: # Archived mode
self.update_book2db(data)
path = self.high_table.item(row, PATH_H).text()
for row in range(self.file_table.rowCount()):
if path == self.file_table.item(row, TYPE).data(Qt.UserRole)[0]:
self.file_table.item(row, TITLE).setData(Qt.UserRole, data)
break
self.reload_highlights = True
def on_copy_highlights(self):
""" Copy the selected highlights to clipboard
"""
clipboard_text = ""
for highlight in sorted(self.sel_high_list):
row = highlight.row()
text = self.high_list.item(row).text()
clipboard_text += text + "\n"
self.copy_text_2clip(clipboard_text)
def on_delete_highlights(self):
""" The delete highlights action was invoked
"""
if not self.db_mode:
if self.edit_lua_file_warning:
text = _("This is an one-time warning!\n\nIn order to delete highlights "
"from a book, its \"metadata\" file must be edited. This "
"contains a small risk of corrupting that file and lose all the "
"settings and info of that book.\n\nDo you still want to do it?")
popup = self.popup(_("Warning!"), text, buttons=2,
button_text=(_("Yes"), _("No")))
if popup.buttonRole(popup.clickedButton()) == QMessageBox.RejectRole:
return
else:
self.edit_lua_file_warning = False
text = _("This will delete the selected highlights!\nAre you sure?")
else:
text = _("This will remove the selected highlights from the Archive!\n"
"Are you sure?")
popup = self.popup(_("Warning!"), text, buttons=2,
button_text=(_("Yes"), _("No")))
if popup.buttonRole(popup.clickedButton()) == QMessageBox.RejectRole:
return
self.delete_highlights()
def delete_highlights(self):
""" Delete the selected highlights
"""
row = self.sel_idx.row()
data = self.file_table.item(row, TITLE).data(Qt.UserRole)
for highlight in self.sel_high_list:
high_row = highlight.row()
high_data = self.high_list.item(high_row).data(Qt.UserRole)
pprint(high_data)
page = high_data["page"]
page_id = high_data["page_id"]
del data["highlight"][page][page_id] # delete the highlight
# delete the associated bookmark
text = high_data["text"]
for bookmark in data["bookmarks"].keys():
if text == data["bookmarks"][bookmark]["notes"]:
del data["bookmarks"][bookmark]
for i in data["highlight"].keys():
if not data["highlight"][i]: # delete page dicts with no highlights
del data["highlight"][i]
else: # renumbering the highlight keys
contents = [data["highlight"][i][j] for j in sorted(data["highlight"][i])]
if contents:
for l in data["highlight"][i].keys(): # delete all the items and
del data["highlight"][i][l]
for k in range(len(contents)): # rewrite them with the new keys
data["highlight"][i][k + 1] = contents[k]
contents = [data["bookmarks"][bookmark] for bookmark in sorted(data["bookmarks"])]
if contents: # renumbering the bookmarks keys
for bookmark in data["bookmarks"].keys(): # delete all the items and
del data["bookmarks"][bookmark]
for content in range(len(contents)): # rewrite them with the new keys
data["bookmarks"][content + 1] = contents[content]
if not data["highlight"]: # change icon if no highlights
item = self.file_table.item(row, 0)
item.setIcon(self.ico_empty)
if not self.db_mode:
path = self.file_table.item(row, PATH).text()
self.save_book_data(path, data)
else:
self.update_book2db(data)
item = self.file_table.item
item(row, TITLE).setData(Qt.UserRole, data)
self.on_file_table_itemClicked(item(row, 0), reset=False)
self.reload_highlights = True
def save_book_data(self, path, data):
""" Saves the data of a book to its lua file
:type path: str|unicode
:param path: The path to the book's data file
:type data: dict
:param data: The book's data
"""
times = os.stat(path) # read the file's created/modified times
encode_data(path, data)
os.utime(path, (times.st_ctime, times.st_mtime)) # reapply original times
if self.file_table.isVisible():
self.on_file_table_itemClicked(self.file_table.item(self.sel_idx.row(), 0),
reset=False)
# noinspection PyUnusedLocal
def high_list_selection_update(self, selected, deselected):
""" When a highlight in gets selected
:type selected: QModelIndex
:parameter selected: The selected highlight
:type deselected: QModelIndex
:parameter deselected: The deselected highlight
"""
self.sel_high_list = self.high_list_selection.selectedRows()
def set_highlight_sort(self):
""" Sets the sorting method of displayed highlights
"""
self.high_by_page = self.sender().data()
try:
row = self.sel_idx.row()
self.on_file_table_itemClicked(self.file_table.item(row, 0), False)
except AttributeError: # no book selected
pass
def sort_high4view(self, data):
""" Sets the sorting method of displayed highlights
:type data: tuple
param: data: The highlight's data
"""
return int(data["page"]) if self.high_by_page else data["date"]
def sort_high4write(self, data):
""" Sets the sorting method of written highlights
:type data: tuple
param: data: The highlight's data
"""
if self.high_by_page and self.status.act_page.isChecked():
page = data[3]
if page.startswith("Page"):
page = page[5:]
return int(page)
else:
return data[0]
# ___ ___________________ MERGING - SYNCING STUFF _______________
def same_book(self, data1, data2, book1="", book2=""):
""" Check if the supplied metadata comes from the same book
:type data1: dict
:param data1: The data of the first book
:type data2: dict
:param data2: The data of the second book
:type book1: str|unicode
:param book1: The path to the first book
:type book2: str|unicode
:param book2: The path to the second book
"""
md5_1 = data1.get("partial_md5_checksum", data1["stats"].get("md5", None)
if "stats" in data1 else None)
if not md5_1 and book1:
md5_1 = self.md5_from_file(book1)
if md5_1: # got the first MD5, check for the second
md5_2 = data2.get("partial_md5_checksum", data2["stats"].get("md5", None)
if "stats" in data2 else None)
if not md5_2 and book2:
md5_2 = self.md5_from_file(book2)
if md5_2 and md5_1 == md5_2: # same MD5 for both books
return True
return False
def wrong_book(self):
""" Shows an info dialog if the book MD5 of two metadata are different
"""
text = _("It seems that the selected metadata file belongs to a different book..")
self.popup(_("Book mismatch!"), text, icon=QMessageBox.Critical)
@staticmethod
def same_cre_version(data):
""" Check if the supplied metadata have the same CRE version
:type data: list[dict]
:param data: The data to get checked
"""
try:
if data[0]["cre_dom_version"] == data[1]["cre_dom_version"]:
return True
except KeyError: # no "cre_dom_version" key (older metadata)
pass
return False
def wrong_cre_version(self):
""" Shows an info dialog if the CRE version of two metadata are different
"""
text = _("Can not merge these highlights, because they are produced with a "
"different version of the reader engine!\n\n"
"The reader engine and the way it renders the text is responsible "
"for the positioning of highlights. Some times, code changes are "
"made that change its behavior. Its version is written in the "
"metadata of a book the first time is opened and can only change "
"if the metadata are cleared (loosing all highlights) and open the "
"book again as new.\n\n"
"The reader's engine version is independent of the KOReader version "
"and does not change that often.")
self.popup(_("Version mismatch!"), text, icon=QMessageBox.Critical)
def check4archive_merge(self):
""" Check if the selected books' highlights can be merged
with its archived version
"""
idx = self.sel_idx
data1 = self.file_table.item(idx.row(), idx.column()).data(Qt.UserRole)
book_path = self.file_table.item(idx.row(), TYPE).data(Qt.UserRole)[0]
for index, book in enumerate(self.books):
data2 = book["data"]
if self.same_book(data1, data2, book_path):
if self.same_cre_version([data1, data2]):
return index
return False
def merge_menu(self):
""" Creates the `Merge/Sync` button menu
"""
menu = QMenu(self)
action = QAction(self.ico_files_merge, _("Merge highlights"), menu)
action.triggered.connect(self.toolbar.on_merge_btn_clicked)
menu.addAction(action)
action = QAction(self.ico_files_merge, _("Sync position only"), menu)
action.triggered.connect(partial(self.merge_highlights, True, False))
menu.addAction(action)
return menu
def create_archive_merge_menu(self):
""" Creates the `Sync` sub-menu
"""
menu = QMenu(self)
action = QAction(self.ico_files_merge, _("Merge highlights"), menu)
action.triggered.connect(partial(self.on_merge_highlights, True))
menu.addAction(action)
action = QAction(self.ico_files_merge, _("Sync position only"), menu)
action.triggered.connect(partial(self.merge_highlights, True, False, True))
menu.addAction(action)
return menu
def on_merge_highlights(self, to_archived=False, filename=""):
""" Tries to merge/sync highlights
:type to_archived: bool
:param to_archived: Merge a book with its archived version
:type filename: str|unicode
:param filename: The path to the metadata file to merge the book with
"""
if self.high_merge_warning:
text = _("Merging highlights is experimental so, always do backups ;o)\n"
"Because of the different page formats and sizes, some page "
"numbers in {} might be inaccurate. "
"Do you want to continue?").format(APP_NAME)
popup = self.popup(_("Warning!"), text, buttons=2,
button_text=(_("Yes"), _("No")),
check_text=_("Don't show this again"))
self.high_merge_warning = not popup.checked
if popup.buttonRole(popup.clickedButton()) == QMessageBox.RejectRole:
return
popup = self.popup(_("Warning!"),
_("The highlights of the selected entries will be merged.\n"
"This can not be undone! Continue?"), buttons=2,
button_text=(_("Yes"), _("No")),
check_text=_("Sync the reading position too"))
if popup.buttonRole(popup.clickedButton()) == QMessageBox.AcceptRole:
self.merge_highlights(popup.checked, True, to_archived, filename)
def merge_highlights(self, sync, merge, to_archived=False, filename=""):
""" Merge highlights from the same book in two different devices
:type sync: bool
:param sync: Sync reading position
:type merge: bool
:param merge: Merge the highlights
:type to_archived: bool
:param to_archived: Merge a book with its archived version
:type filename: str|unicode
:param filename: The path to the metadata file to merge the book with
"""
if to_archived: # Merge/Sync a book with archive
idx1, idx2 = self.sel_idx, None
data1 = self.file_table.item(idx1.row(), TITLE).data(Qt.UserRole)
data2 = self.books[self.check4archive_merge()]["data"]
path1, path2 = self.file_table.item(idx1.row(), PATH).text(), None
elif filename: # Merge/Sync a book with a metadata file
idx1, idx2 = self.sel_idx, None
data1 = self.file_table.item(idx1.row(), TITLE).data(Qt.UserRole)
book1 = self.file_table.item(idx1.row(), TYPE).data(Qt.UserRole)[0]
data2 = decode_data(filename)
name2 = splitext(dirname(filename))[0]
book2 = name2 + splitext(book1)[1]
if not self.same_book(data1, data2, book1, book2):
self.wrong_book()
return
if not self.same_cre_version([data1, data2]):
self.wrong_cre_version()
return
path1, path2 = self.file_table.item(idx1.row(), PATH).text(), None
else: # Merge/Sync two different book files
idx1, idx2 = self.sel_indexes
data1, data2 = [self.file_table.item(idx.row(), TITLE).data(Qt.UserRole)
for idx in [idx1, idx2]]
path1, path2 = [self.file_table.item(idx.row(), PATH).text()
for idx in [idx1, idx2]]
if merge: # merge highlights
args = (data1["highlight"], data2["highlight"],
data1["bookmarks"], data2["bookmarks"])
high1, high2, bkm1, bkm2 = self.get_unique_highlights(*args)
self.update_data(data1, high2, bkm2)
self.update_data(data2, high1, bkm1)
if data1["highlight"] or data2["highlight"]: # since there are highlights
for index in [idx1, idx2]: # set the green icon
if index:
item = self.file_table.item(idx1.row(), TITLE)
item.setIcon(self.ico_label_green)
if sync: # sync position and percent
if data1["percent_finished"] > data2["percent_finished"]:
data2["percent_finished"] = data1["percent_finished"]
data2["last_xpointer"] = data1["last_xpointer"]
else:
data1["percent_finished"] = data2["percent_finished"]
data1["last_xpointer"] = data2["last_xpointer"]
percent = str(int(data1["percent_finished"] * 100)) + "%"
self.file_table.item(idx1.row(), PERCENT).setText(percent)
if not to_archived and not filename:
self.file_table.item(idx2.row(), PERCENT).setToolTip(percent)
self.file_table.item(idx1.row(), TITLE).setData(Qt.UserRole, data1)
self.save_book_data(path1, data1)
if to_archived: # update the db item
self.update_book2db(data2)
elif filename: # do nothing with the loaded file
pass
else: # update the second item
self.file_table.item(idx2.row(), TITLE).setData(Qt.UserRole, data2)
self.save_book_data(path2, data2)
self.reload_highlights = True
@staticmethod
def get_unique_highlights(high1, high2, bkm1, bkm2):
""" Get the highlights, bookmarks from the first book
that do not exist in the second book and vice versa
:type high1: dict
:param high1: The first book's highlights
:type high2: dict
:param high2: The second book's highlights
:type bkm1: dict
:param bkm1: The first book's bookmarks
:type bkm2: dict
:param bkm2: The second book's bookmarks
"""
unique_high1 = defaultdict(dict)
for page1 in high1:
for page_id1 in high1[page1]:
text1 = high1[page1][page_id1]["text"]
for page2 in high2:
for page_id2 in high2[page2]:
if text1 == high2[page2][page_id2]["text"]:
break # highlight found in book2
else: # highlight was not found yet in book2
continue # no break in the inner loop, keep looping
break # highlight already exists in book2 (there was a break)
else: # text not in book2 highlights, add to unique
unique_high1[page1][page_id1] = high1[page1][page_id1]
unique_bkm1 = {}
for page1 in unique_high1:
for page_id1 in unique_high1[page1]:
text1 = unique_high1[page1][page_id1]["text"]
for idx in bkm1:
if text1 == bkm1[idx]["notes"]: # add highlight's bookmark to unique
unique_bkm1[idx] = bkm1[idx]
break
unique_high2 = defaultdict(dict)
for page2 in high2:
for page_id2 in high2[page2]:
text2 = high2[page2][page_id2]["text"]
for page1 in high1:
for page_id1 in high1[page1]:
if text2 == high1[page1][page_id1]["text"]:
break # highlight found in book1
else: # highlight was not found yet in book1
continue # no break in the inner loop, keep looping
break # highlight already exists in book1 (there was a break)
else: # text not in book1 highlights, add to unique
unique_high2[page2][page_id2] = high2[page2][page_id2]
unique_bkm2 = {}
for page2 in unique_high2:
for page_id2 in unique_high2[page2]:
text2 = unique_high2[page2][page_id2]["text"]
for idx in bkm2:
if text2 == bkm2[idx]["notes"]: # add highlight's bookmark to unique
unique_bkm2[idx] = bkm2[idx]
break
return unique_high1, unique_high2, unique_bkm1, unique_bkm2
@staticmethod
def update_data(data, extra_highlights, extra_bookmarks):
""" Adds the new highlights to the book's data
:type data: dict
:param data: The book's data
:type extra_highlights: dict
:param extra_highlights: The other book's highlights
:type extra_bookmarks: dict
:param extra_bookmarks: The other book's bookmarks
"""
highlights = data["highlight"]
for page in extra_highlights:
if page in highlights: # change page number if already exists
new_page = page
while new_page in highlights:
new_page += 1
highlights[new_page] = extra_highlights[page]
else:
highlights[page] = extra_highlights[page]
bookmarks = data["bookmarks"]
original = bookmarks.copy()
bookmarks.clear()
counter = 1
for key in original.keys():
bookmarks[counter] = original[key]
counter += 1
for key in extra_bookmarks.keys():
bookmarks[counter] = extra_bookmarks[key]
counter += 1
def use_meta_files(self):
""" Selects a metadata files to sync/merge
"""
# noinspection PyCallByClass
filenames = QFileDialog.getOpenFileNames(self, _("Select metadata file"),
self.last_dir,
(_("metadata files (*.lua *.old)")))[0]
if filenames:
self.last_dir = dirname(filenames[0])
for filename in filenames:
self.on_merge_highlights(filename=filename)
# ___ ___________________ DELETING STUFF ________________________
def delete_menu(self):
""" Creates the `Delete` button menu
"""
menu = QMenu(self)
for idx, title in enumerate([_("Selected books' info"),
_("Selected books"),
_("All missing books' info")]):
action = QAction(self.ico_files_delete, title, menu)
action.triggered.connect(self.on_delete_actions)
action.setData(idx)
menu.addAction(action)
return menu
def on_delete_actions(self):
""" When a `Delete action` is selected
"""
idx = self.sender().data()
self.delete_actions(idx)
def delete_actions(self, idx):
""" Execute the selected `Delete action`
:type idx: int
:param idx: The action type
"""
if not self.db_mode: # Loaded mode
if not self.sel_indexes and idx in [0, 1]:
return
text = ""
if idx == 0:
text = _("This will delete the selected books' information\n"
"but will keep the equivalent books.")
elif idx == 1:
text = _("This will delete the selected books and their information.")
elif idx == 2:
text = _("This will delete all the books' information "
"that refers to missing books.")
popup = self.popup(_("Warning!"), text, buttons=2)
if popup.buttonRole(popup.clickedButton()) == QMessageBox.RejectRole:
return
if idx == 0: # delete selected books' info
self.remove_sel_books()
elif idx == 1: # delete selected books
self.remove_sel_books(delete=True)
elif idx == 2: # delete all missing books info
self.clear_missing_info()
else: # Archived mode
text = _("Delete the selected books from the Archive?")
popup = self.popup(_("Warning!"), text, buttons=2, icon=QMessageBox.Question)
if popup.buttonRole(popup.clickedButton()) == QMessageBox.RejectRole:
return
ids = []
for idx in sorted(self.sel_indexes, reverse=True):
data = self.file_table.item(idx.row(), TITLE).data(Qt.UserRole)
ids.append(data["partial_md5_checksum"])
self.file_table.removeRow(idx.row())
self.delete_books_from_db(ids)
self.file_table.clearSelection()
self.reload_highlights = True
def remove_sel_books(self, delete=False):
""" Remove the selected book entries from the file_table
:type delete: bool
:param delete: Delete the book file too
"""
for index in sorted(self.sel_indexes)[::-1]:
row = index.row()
path = self.get_sdr_folder(row)
shutil.rmtree(path) if isdir(path) else os.remove(path)
if delete: # delete the book file too
try:
book_path = self.file_table.item(row, TYPE).data(Qt.UserRole)[0]
os.remove(book_path) if isfile(book_path) else None
self.remove_book_row(row)
except AttributeError: # empty entry
pass
self.remove_book_row(row) # remove file_table entry
def clear_missing_info(self):
""" Delete the book info of all entries that have no book file
"""
for row in range(self.file_table.rowCount())[::-1]:
try:
book_exists = self.file_table.item(row, TYPE).data(Qt.UserRole)[1]
except AttributeError: # empty entry
continue
if not book_exists:
path = self.get_sdr_folder(row)
shutil.rmtree(path) if isdir(path) else os.remove(path)
self.remove_book_row(row)
def remove_book_row(self, row):
""" Remove a book entry from the file table
:type row: int
:param row: The entry's row
"""
self.loaded_paths.remove(self.file_table.item(row, PATH).data(0))
self.file_table.removeRow(row)
def get_sdr_folder(self, row):
""" Get the .sdr folder path for a book entry
:type row: int
:param row: The entry's row
"""
path = split(self.file_table.item(row, PATH).data(0))[0]
if not path.lower().endswith(".sdr"):
path = self.file_table.item(row, PATH).data(0)
return path
# ___ ___________________ SAVING STUFF __________________________
def save_menu(self):
""" Creates the `Export Files` button menu
"""
menu = QMenu(self)
for idx, item in enumerate([_("To individual text files"),
_("Combined to one text file"),
_("To individual html files"),
_("Combined to one html file")
]):
action = QAction(item, menu)
action.triggered.connect(self.on_save_actions)
action.setData(idx)
action.setIcon(self.ico_file_save)
menu.addAction(action)
return menu
def on_save_actions(self):
""" A `Export selected...` menu item is clicked
"""
idx = self.sender().data()
self.export(idx)
# noinspection PyCallByClass
def on_export(self):
""" Export the selected highlights to file(s)
"""
if self.current_view == BOOKS_VIEW:
if not self.sel_indexes:
return
elif self.current_view == HIGHLIGHTS_VIEW: # Save from high_table,
if self.save_sel_highlights(): # combine to one file
self.popup(_("Finished!"),
_("The Highlights were exported successfully!"),
icon=QMessageBox.Information)
return
multi = False
title = _("Exporting..")
if len(self.sel_indexes) > 1:
popup = self.popup(title, _("How should the Highlights be exported?"),
button_text=(_("As individual book files"), _("Cancel")),
buttons=2, extra_text=_("Combined to one file"),
icon=QMessageBox.Question)
if popup.buttonRole(popup.clickedButton()) == QMessageBox.AcceptRole:
multi = True
elif popup.buttonRole(popup.clickedButton()) == QMessageBox.RejectRole:
return
popup = self.popup(title, _("Using what file format?"), icon=QMessageBox.Question,
buttons=2, button_text=(_("Text"), _("Html")),
extra_text=_("CSV"))
if popup.buttonRole(popup.clickedButton()) == QMessageBox.AcceptRole:
idx = MANY_TEXT if multi else ONE_TEXT
elif popup.buttonRole(popup.clickedButton()) == QMessageBox.RejectRole:
idx = MANY_HTML if multi else ONE_HTML
elif popup.buttonRole(popup.clickedButton()) == QMessageBox.ApplyRole:
idx = MANY_CSV if multi else ONE_CSV
else:
return
self.export(idx)
# noinspection PyCallByClass
def export(self, idx):
""" Execute the selected `Export action`
:type idx: int
:param idx: The action type
"""
saved = 0
# Save from file_table to different files
if idx in [MANY_TEXT, MANY_HTML, MANY_CSV]:
text = _("Select destination folder for the exported file(s)")
dir_path = QFileDialog.getExistingDirectory(self, text, self.last_dir,
QFileDialog.ShowDirsOnly)
if not dir_path:
return
self.last_dir = dir_path
saved = self.save_multi_files(dir_path, idx)
# Save from file_table, combine to one file
elif idx in [ONE_TEXT, ONE_HTML, ONE_CSV]:
if idx == ONE_TEXT:
ext = "txt"
elif idx == ONE_HTML:
ext = "html"
elif idx == ONE_CSV:
ext = "csv"
else:
return
filename = QFileDialog.getSaveFileName(self,
_("Export to {} file").format(ext),
self.last_dir, "*.{}".format(ext))[0]
if not filename:
return
self.last_dir = dirname(filename)
saved = self.save_merged_file(filename, format_=idx)
self.status.animation(False)
all_files = len(self.file_table.selectionModel().selectedRows())
self.popup(_("Finished!"), _("{} texts were exported from the {} processed.\n"
"{} files with no highlights.")
.format(saved, all_files, all_files - saved),
icon=QMessageBox.Information)
def save_multi_files(self, dir_path, format_):
""" Save each selected book's highlights to a different file
:type dir_path: str|unicode
:param dir_path: The directory where the files will be saved
:type format_: int
:param format_: The file format to save
"""
self.status.animation(True)
saved = 0
title_counter = 0 # needed for the Book's title if none found
space = (" " if self.status.act_page.isChecked() and
self.status.act_date.isChecked() else "")
line_break = (":" + os.linesep if self.status.act_page.isChecked() or
self.status.act_date.isChecked() else "")
encoding = "utf-8-sig" if format_ == MANY_CSV else "utf-8"
for idx in self.sel_indexes:
(authors, title, highlights,
title_counter) = self.get_item_data(idx, format_, title_counter)
if not highlights: # no highlights in book
continue
name = title
if authors:
name = "{} - {}".format(authors, title)
if format_ == MANY_TEXT:
ext = ".txt"
text = ""
elif format_ == MANY_HTML:
ext = ".html"
text = HTML_HEAD + BOOK_BLOCK % {"title": title, "authors": authors}
elif format_ == MANY_CSV:
ext = ".csv"
text = CSV_HEAD
else:
return
filename = join(dir_path, sanitize_filename(name) + ext)
with open(filename, "w+", encoding=encoding, newline="") as text_file:
for highlight in sorted(highlights, key=self.sort_high4write):
date_text, high_comment, high_text, page_text = highlight
if format_ == MANY_HTML:
text += HIGH_BLOCK % {"page": page_text, "date": date_text,
"highlight": high_text,
"comment": high_comment}
elif format_ == MANY_TEXT:
text += (
page_text + space + date_text + line_break +
high_text + high_comment)
text += 2 * os.linesep
elif format_ == MANY_CSV:
data = {"title": title, "authors": authors, "page": page_text,
"date": date_text, "text": high_text,
"comment": high_comment}
text += get_csv_row(data) + "\n"
else:
return
if format_ == MANY_HTML:
text += "\n</div>\n</body>\n</html>"
text_file.write(text)
saved += 1
return saved
def save_merged_file(self, filename, format_):
""" Save the selected books' highlights to a single file
:type filename: str|unicode
:param filename: The name of the file we export the highlights
:type format_: int
:param format_: The filetype to export
"""
self.status.animation(True)
saved = 0
title_counter = 0 # needed for the Book's title if none found
space = (" " if self.status.act_page.isChecked() and
self.status.act_date.isChecked() else "")
line_break = (":" + os.linesep if self.status.act_page.isChecked() or
self.status.act_date.isChecked() else "")
html = format_ == ONE_HTML
text = HTML_HEAD if html else CSV_HEAD if format_ == ONE_CSV else ""
encoding = "utf-8-sig" if format_ == ONE_CSV else "utf-8"
for idx in sorted(self.sel_indexes):
(authors, title, highlights,
title_counter) = self.get_item_data(idx, format_, title_counter)
if not highlights: # no highlights
continue
highlights = sorted(highlights, key=self.sort_high4write)
if html:
text += BOOK_BLOCK % {"title": title, "authors": authors}
for high in highlights:
date_text, high_comment, high_text, page_text = high
text += HIGH_BLOCK % {"page": page_text, "date": date_text,
"highlight": high_text, "comment": high_comment}
text += "</div>\n"
elif format_ == ONE_TEXT:
name = title
if authors:
name = "{} - {}".format(authors, title)
line = "-" * 80
text += line + os.linesep + name + os.linesep + line + os.linesep
highlights = [i[3] + space + i[0] + line_break + i[2] + i[1]
for i in highlights]
text += (os.linesep * 2).join(highlights) + os.linesep * 2
elif format_ == ONE_CSV:
for high in highlights:
date_text, high_comment, high_text, page_text = high
data = {"title": title, "authors": authors, "page": page_text,
"date": date_text, "text": high_text, "comment": high_comment}
# data = {k.encode("utf8"): v.encode("utf8") for k, v in data.items()}
text += get_csv_row(data) + "\n"
else:
return
saved += 1
text += "\n</body>\n</html>" if html else ""
with open(filename, "w+", encoding=encoding, newline="") as text_file:
text_file.write(text)
return saved
def get_item_data(self, idx, format_, title_counter):
""" Get the highlight data for an item
:type idx: QModelIndex
:param idx: The item's index
:type format_: int
:param format_: The output format idx
:type title_counter: int
:param title_counter: The non-found Title counter
"""
row = idx.row()
data = self.file_table.item(row, 0).data(Qt.UserRole)
highlights = []
for page in data["highlight"]:
for page_id in data["highlight"][page]:
highlights.append(self.analyze_high(data, page, page_id, format_))
title = self.file_table.item(row, 0).data(0)
if title == _("NO TITLE FOUND"):
title += str(title_counter)
title_counter += 1
authors = self.file_table.item(row, 1).data(0)
if authors in [_("OLD TYPE FILE"), _("NO AUTHOR FOUND")]:
authors = ""
return authors, title, highlights, title_counter
def save_sel_highlights(self):
""" Save the selected highlights to a text file (from high_table)
"""
if not self.sel_high_view:
return
# noinspection PyCallByClass
filename = QFileDialog.getSaveFileName(self, _("Export to file"), self.last_dir,
"text file (*.txt);;html file (*.html);;"
"csv file (*.csv)")
if filename[0]:
filename, extra = filename
text_out = extra.startswith("text")
html_out = extra.startswith("html")
csv_out = extra.startswith("csv")
ext = ".html" if html_out else ".csv" if csv_out else ".txt"
filename = splitext(filename)[0] + ext
self.last_dir = dirname(filename)
else:
return
text = HTML_HEAD if html_out else CSV_HEAD if csv_out else ""
encoding = "utf-8-sig" if csv_out else "utf-8"
for i in sorted(self.sel_high_view):
row = i.row()
data = self.high_table.item(row, HIGHLIGHT_H).data(Qt.UserRole)
comment = "\n● " + data["comment"] if data["comment"] else ""
if text_out:
txt = ("{} [{}]\nPage {} [{}]\n{}{}"
.format(data["title"], data["authors"], data["page"],
data["date"], data["text"], comment))
text += txt + "\n\n"
elif html_out:
left = "{} [{}]".format(data["title"], data["authors"])
right = "Page {} [{}]".format(data["page"], data["date"])
text += HIGH_BLOCK % {"page": left, "date": right,
"highlight": data["text"], "comment": comment}
text += "</div>\n"
elif csv_out:
text += get_csv_row(data) + "\n"
else:
print("Unknown format export!")
return
if text_out or csv_out:
text.replace("\n", os.linesep)
with open(filename, "w+", encoding=encoding, newline="") as file2save:
file2save.write(text)
return True
def analyze_high(self, data, page, page_id, format_):
""" Create the highlight's texts
:type data: dict
:param data: The highlight's data
:type page: int
:param page The page where the highlight starts
:type page_id: int
:param page_id The count of this page's highlight
:type format_: int
:param format_ The output format idx
"""
highlight = self.get_highlight_info(data, page, page_id)
linesep = "<br/>" if format_ in [ONE_HTML, MANY_HTML] else os.linesep
comment = highlight["comment"].replace("\n", linesep)
high_text = (highlight["text"].replace("\n", linesep)
if self.status.act_text.isChecked() else "")
date = highlight["date"]
line_break2 = (os.linesep if self.status.act_text.isChecked() and comment else "")
if format_ in [ONE_CSV, MANY_CSV]:
page_text = str(page) if self.status.act_page.isChecked() else ""
date_text = date if self.status.act_date.isChecked() else ""
high_comment = (comment if self.status.act_comment.isChecked()
and comment else "")
else:
page_text = "Page " + str(page) if self.status.act_page.isChecked() else ""
date_text = "[" + date + "]" if self.status.act_date.isChecked() else ""
high_comment = (line_break2 + "● " + comment
if self.status.act_comment.isChecked() and comment else "")
return date_text, high_comment, high_text, page_text
# ___ ___________________ SETTINGS STUFF ________________________
def settings_load(self):
""" Loads the jason based configuration settings
"""
if app_config:
self.restoreGeometry(self.unpickle("geometry"))
self.restoreState(self.unpickle("state"))
self.splitter.restoreState(self.unpickle("splitter"))
self.about.restoreGeometry(self.unpickle("about_geometry"))
self.col_sort = app_config.get("col_sort", MODIFIED)
self.col_sort_asc = app_config.get("col_sort_asc", False)
self.col_sort_h = app_config.get("col_sort_h", DATE_H)
self.col_sort_asc_h = app_config.get("col_sort_asc_h", False)
self.highlight_width = app_config.get("highlight_width", None)
self.comment_width = app_config.get("comment_width", None)
self.last_dir = app_config.get("last_dir", os.getcwd())
self.current_view = app_config.get("current_view", BOOKS_VIEW)
self.db_path = app_config.get("db_path", join(SETTINGS_DIR, "data.db"))
self.db_mode = app_config.get("db_mode", False)
self.fold_btn.setChecked(app_config.get("show_info", True))
self.opened_times = app_config.get("opened_times", 0)
self.alt_title_sort = app_config.get("alt_title_sort", False)
self.toolbar_size = app_config.get("toolbar_size", 48)
self.skip_version = app_config.get("skip_version", None)
self.date_vacuumed = app_config.get("date_vacuumed", self.date_vacuumed)
self.archive_warning = app_config.get("archive_warning", True)
self.exit_msg = app_config.get("exit_msg", True)
self.high_merge_warning = app_config.get("high_merge_warning", True)
self.edit_lua_file_warning = app_config.get("edit_lua_file_warning", True)
checked = app_config.get("show_items", (True, True, True, True))
# noinspection PyTypeChecker
checked = checked if len(checked) == 4 else checked + [True] # 4compatibility
self.status.act_page.setChecked(checked[0])
self.status.act_date.setChecked(checked[1])
self.status.act_text.setChecked(checked[2])
self.status.act_comment.setChecked(checked[3])
self.high_by_page = app_config.get("high_by_page", False)
else:
self.resize(800, 600)
if self.highlight_width:
self.header_high_view.resizeSection(HIGHLIGHT_H, self.highlight_width)
if self.comment_width:
self.header_high_view.resizeSection(COMMENT_H, self.comment_width)
self.toolbar.set_btn_size(self.toolbar_size)
def settings_save(self):
""" Saves the jason based configuration settings
"""
config = {"geometry": self.pickle(self.saveGeometry()),
"state": self.pickle(self.saveState()),
"splitter": self.pickle(self.splitter.saveState()),
"about_geometry": self.pickle(self.about.saveGeometry()),
"col_sort_asc": self.col_sort_asc, "col_sort": self.col_sort,
"col_sort_asc_h": self.col_sort_asc_h, "col_sort_h": self.col_sort_h,
"highlight_width": self.highlight_width, "db_path": self.db_path,
"comment_width": self.comment_width, "toolbar_size": self.toolbar_size,
"last_dir": self.last_dir, "alt_title_sort": self.alt_title_sort,
"archive_warning": self.archive_warning, "exit_msg": self.exit_msg,
"current_view": self.current_view, "db_mode": self.db_mode,
"high_by_page": self.high_by_page, "date_vacuumed": self.date_vacuumed,
"show_info": self.fold_btn.isChecked(),
"show_items": (self.status.act_page.isChecked(),
self.status.act_date.isChecked(),
self.status.act_text.isChecked(),
self.status.act_comment.isChecked()),
"skip_version": self.skip_version, "opened_times": self.opened_times,
"edit_lua_file_warning": self.edit_lua_file_warning,
"high_merge_warning": self.high_merge_warning,
}
try:
if not PYTHON2:
# noinspection PyUnresolvedReferences
for k, v in config.items():
if type(v) == bytes:
# noinspection PyArgumentList
config[k] = str(v, encoding="utf8")
config_json = json.dumps(config, sort_keys=True, indent=4)
with gzip.GzipFile(join(SETTINGS_DIR, "settings.json.gz"), "w+") as gz_file:
try:
gz_file.write(config_json)
except TypeError: # Python3
gz_file.write(config_json.encode("utf8"))
except IOError as error:
print("On saving settings:", error)
@staticmethod
def pickle(array):
""" Serialize some binary settings
:type array: QByteArray
:param array: The data
"""
if PYTHON2:
return pickle.dumps(array.data())
# noinspection PyArgumentList
return str(pickle.dumps(array.data()), encoding="unicode_escape") # Python3
@staticmethod
def unpickle(key):
""" Un-serialize some binary settings
:type key: str|unicode
:param key: The dict key to be un-pickled
"""
try:
if PYTHON2:
try:
value = pickle.loads(str(app_config.get(key)))
except UnicodeEncodeError: # settings file from Python3
return
else:
try:
# noinspection PyArgumentList
pickled = pickle.loads(bytes(app_config.get(key), encoding="latin"))
value = QByteArray(pickled)
except (UnicodeDecodeError, ImportError): # settings file from Python2
return
except pickle.UnpicklingError as err:
print("While unPickling:", err)
return
return value
# ___ ___________________ UTILITY STUFF _________________________
def thread_cleanup(self):
""" Deletes the finished threads
"""
for thread in self.threads:
if thread.isFinished():
self.threads.remove(thread)
def popup(self, title, text, icon=QMessageBox.Warning, buttons=1,
extra_text="", button_text=(_("OK"), _("Cancel")), check_text=""):
""" Creates and returns a Popup dialog
:type title: str|unicode
:parameter title: The Popup's title
:type text: str|unicode
:parameter text: The Popup's text
:type icon: int|unicode|QPixmap
:parameter icon: The Popup's icon
:type buttons: int
:parameter buttons: The number of the Popup's buttons
:type extra_text: str|unicode
:parameter extra_text: The extra button's text (button is omitted if "")
:type check_text: str|unicode
:parameter check_text: The checkbox's text (checkbox is omitted if "")
"""
popup = XMessageBox(self)
popup.setWindowIcon(self.ico_app)
if type(icon) == QMessageBox.Icon:
popup.setIcon(icon)
elif type(icon) == unicode:
popup.setIconPixmap(QPixmap(icon))
elif type(icon) == QPixmap:
popup.setIconPixmap(icon)
else:
raise TypeError("Wrong icon type!")
popup.setWindowTitle(title)
popup.setText(text + "\n" if check_text else text)
if buttons == 1:
popup.addButton(_("Close"), QMessageBox.RejectRole)
elif buttons == 2:
popup.addButton(button_text[0], QMessageBox.AcceptRole)
popup.addButton(button_text[1], QMessageBox.RejectRole)
if extra_text: # add an extra button
popup.addButton(extra_text, QMessageBox.ApplyRole)
if check_text: # hide check_box if no text for it
popup.check_box.setText(check_text)
else:
popup.check_box.hide()
popup.checked = popup.exec_()[1]
return popup
def passed_files(self):
""" Command line parameters that are passed to the program.
"""
# args = QApplication.instance().arguments()
try:
if sys.argv[1]:
self.on_file_table_fileDropped(sys.argv[1:])
except IndexError:
pass
def open_file(self, path):
""" Opens a file with its associated app
:type path: str|unicode
:param path: The path to the file to be opened
"""
try:
if sys.platform == "win32":
os.startfile(path)
else:
opener = "open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, path])
except OSError:
self.popup(_("Error opening target!"),
_('"{}" does not exists!').format(path))
def copy_text_2clip(self, text):
""" Copy a text to clipboard
:type text: str|unicode
"""
if text:
data = QMimeData()
data.setText(text)
self.clip.setMimeData(data)
def recalculate_md5(self, file_path):
""" Recalculates the MD5 for a book and saves it to the metadata file
:type file_path: str|unicode
:param file_path: The path to the book
"""
popup = self.popup(_("Confirmation"),
_("This action can not be undone.\nContinue?"), buttons=2)
if popup.buttonRole(popup.clickedButton()) == QMessageBox.AcceptRole:
row = self.sel_idx.row()
data = self.file_table.item(row, TITLE).data(Qt.UserRole)
path = self.file_table.item(row, PATH).text()
old_md5 = ""
md5 = self.md5_from_file(file_path)
if "partial_md5_checksum" in data:
old_md5 = data["partial_md5_checksum"]
data["partial_md5_checksum"] = md5
if "stats" in data and "md5" in data["stats"]:
old_md5 = data["stats"]["md5"]
data["stats"]["md5"] = md5
if old_md5:
text = _("The MD5 was originally\n{}\nA recalculation produces\n{}\n"
"The MD5 was replaced and saved!").format(old_md5, md5)
self.file_table.item(row, TITLE).setData(Qt.UserRole, data)
self.save_book_data(path, data)
else:
text = _("Metadata file has no MD5 information!")
self.popup(_("Information"), text, QMessageBox.Information)
@staticmethod
def md5_from_file(file_path):
""" Calculates the MD5 for a file
:type file_path: str|unicode
:param file_path: The path to the file
:return: str|unicode|None
"""
if isfile(file_path):
with open(file_path, "rb") as file_:
# noinspection PyDeprecation
md5 = hashlib.md5()
sample = file_.read(1024)
if sample:
md5.update(sample)
for i in range(11):
file_.seek((4 ** i) * 1024)
sample = file_.read(1024)
if sample:
md5.update(sample)
else:
break
return md5.hexdigest()
@staticmethod
def get_time_str(sec):
""" Takes seconds and returns the formatted time value
:type sec: int
:param sec: The seconds
"""
return "{:02}:{:02}:{:02}".format(int(sec / 3600),
int(sec % 3600 / 60),
int(sec % 60))
def auto_check4update(self):
""" Checks online for an updated version
"""
self.db_maintenance()
self.opened_times += 1
if self.opened_times == 20:
text = _("Since you are using {} for some time now, perhaps you find it "
"useful enough to consider a donation.\nWould you like to visit "
"the PayPal donation page?\n\nThis is a one-time message. "
"It will never appear again!").format(APP_NAME)
popup = self.popup(_("A reminder..."), text,
icon=":/stuff/paypal76.png", buttons=3)
if popup.buttonRole(popup.clickedButton()) == QMessageBox.AcceptRole:
webbrowser.open("https://www.paypal.com/cgi-bin/webscr?"
"cmd=_s-xclick%20&hosted_button_id=MYV4WLTD6PEVG")
return
# noinspection PyBroadException
try:
version_new = self.about.get_online_version()
# except URLError: # can not connect
except Exception:
return
if not version_new:
return
version = LooseVersion(self.version)
skip_version = LooseVersion(self.skip_version)
if version_new > version and version_new != skip_version:
popup = self.popup(_("Newer version exists!"),
_("There is a newer version (v.{}) online.\n"
"Open the site to download it now?")
.format(version_new),
icon=QMessageBox.Information, buttons=2,
check_text=_("Don\"t alert me for this version again"))
if popup.checked:
self.skip_version = version_new
if popup.clickedButton().text() == "OK":
webbrowser.open("http://www.noembryo.com/apps.php?kohighlights")
def db_maintenance(self):
""" Compacts db every three months
"""
if self.get_db_book_count(): # db has books
now = datetime.now()
delta = now - datetime.strptime(self.date_vacuumed, DATE_FORMAT)
if delta.days > 90: # after three months
self.vacuum_db(info=False) # compact db
self.date_vacuumed = now.strftime(DATE_FORMAT) # reset vacuumed date
def write_to_log(self, text):
""" Append text to the QTextEdit.
"""
# self.about.log_txt.appendPlainText(text)
cursor = self.about.log_txt.textCursor()
cursor.movePosition(QTextCursor.End)
cursor.insertText(text)
self.about.log_txt.setTextCursor(cursor)
self.about.log_txt.ensureCursorVisible()
if self.sender().objectName() == "err":
text = "\033[91m" + text + "\033[0m"
# noinspection PyBroadException
try:
sys.__stdout__.write(text)
except Exception: # a problematic print that WE HAVE to ignore or we LOOP
pass
@staticmethod
def delete_logs():
""" Keeps the number of log texts steady.
"""
_, _, files = next(os.walk(SETTINGS_DIR))
files = sorted(i for i in files if i.startswith("error_log"))
if len(files) > 3:
for name in files[:-3]:
try:
os.remove(join(SETTINGS_DIR, name))
except WindowsError: # the file is locked
pass
def on_check_btn(self):
pass
class KOHighlights(QApplication):
def __init__(self, *args, **kwargs):
super(KOHighlights, self).__init__(*args, **kwargs)
# decode app's arguments
# try:
# sys.argv = [i.decode(sys.getfilesystemencoding()) for i in sys.argv]
# except AttributeError: # i.decode does not exists in Python 3
# pass
sys.argv = self.arguments()
self.parser = argparse.ArgumentParser(prog=APP_NAME,
description=_("{} v{} - A KOReader's "
"highlights converter")
.format(APP_NAME, __version__),
epilog=_("Thanks for using %s!") % APP_NAME)
self.parser.add_argument("-v", "--version", action="version",
version="%(prog)s v{}".format(__version__))
self.base = Base()
if getattr(sys, "frozen", False): # the app is compiled
if not sys.platform.lower().startswith("win"):
self.parse_args()
else:
self.parse_args()
# # hide console window, but only under Windows and only if app is frozen
# on_windows = sys.platform.lower().startswith("win")
# compiled = getattr(sys, 'frozen', False)
# if on_windows and compiled:
# hide_console()
# self.parse_args()
# else:
# self.parse_args()
self.base.setWindowTitle(APP_NAME)
self.exec_()
# show_console() if on_windows and compiled else None
# ___ ___________________ CLI STUFF _____________________________
def parse_args(self):
""" Parse the command line parameters that are passed to the program.
"""
self.parser.add_argument("paths", nargs="*",
help="The paths to input files or folder")
self.parser.add_argument("-x", "--use_cli", required="-o" in sys.argv,
help="Use the command line interface only (exit the "
"app after finishing)", action="store_true",
default=False)
self.parser.add_argument("-s", "--sort_page", action="store_true", default=False,
help="Sort highlights by page, otherwise sort by date")
self.parser.add_argument("-m", "--merge", action="store_true", default=False,
help="Merge the highlights of all input books in a "
"single file, otherwise exports every book's "
"highlights to a different file")
self.parser.add_argument("-f", "--html", action="store_true", default=False,
help="Exports highlights in .html format "
"instead of .txt")
self.parser.add_argument("-c", "--csv", action="store_true", default=False,
help="Exports highlights in .csv format "
"instead of .txt")
self.parser.add_argument("-np", "--no_page", action="store_true", default=False,
help="Exclude the page number of the highlight")
self.parser.add_argument("-nd", "--no_date", action="store_true", default=False,
help="Exclude the date of the highlight")
self.parser.add_argument("-nh", "--no_highlight",
action="store_true", default=False,
help="Exclude the highlighted text of the highlight")
self.parser.add_argument("-nc", "--no_comment",
action="store_true", default=False,
help="Exclude the comment of the highlight")
self.parser.add_argument("-o", "--output", required="-x" in sys.argv,
help="The filename of the file (in merge mode) or "
"the directory for saving the highlight files")
# args, paths = self.parser.parse_known_args()
args = self.parser.parse_args()
if args.use_cli:
self.cli_save_highlights(args)
sys.exit(0) # quit the app if cli execution
def cli_save_highlights(self, args):
""" Saves highlights using the command line interface
:type args: argparse.Namespace
:param args: The parsed cli args
"""
files = self.get_lua_files(args.paths)
if not files:
return
path = abspath(args.output)
if not args.merge: # save to different files
if not isdir(path):
self.parser.error("The output path (-o/--output) must point "
"to an existing directory!")
saved = self.cli_save_multi_files(args, files)
else: # save combined highlights to one file
if isdir(path):
ext = "an .html" if args.html else "a .csv" if args.csv else "a .txt"
self.parser.error("The output path (-o/--output) must be {} filename "
"not a directory!".format(ext))
return
saved = self.cli_save_merged_file(args, files)
all_files = len(files)
sys.stdout.write(_("\n{} files were exported from the {} processed.\n"
"{} files with no highlights.\n").format(saved, all_files,
all_files - saved))
def cli_save_multi_files(self, args, files):
""" Save each selected book's highlights to a different file
:type args: argparse.Namespace
:param args: The parsed cli args
:type files: list
:param files: A list with the metadata files to get converted
"""
saved = 0
title_counter = 0
space = " " if not args.no_page and not args.no_date else ""
line_break = ":" + os.linesep if not args.no_page or not args.no_date else ""
encoding = "utf-8-sig" if args.csv else "utf-8"
path = abspath(args.output)
for file_ in files:
(authors, title, highlights,
title_counter) = self.cli_get_item_data(file_, args, title_counter)
if not highlights: # no highlights
continue
name = title
if authors:
name = "{} - {}".format(authors, title)
if args.html:
ext = ".html"
text = HTML_HEAD + BOOK_BLOCK % {"title": title, "authors": authors}
elif args.csv:
ext = ".csv"
text = CSV_HEAD
else:
ext = ".txt"
text = ""
filename = join(path, sanitize_filename(name) + ext)
with open(filename, "w+", encoding=encoding, newline="") as text_file:
# noinspection PyTypeChecker
for highlight in sorted(highlights, key=partial(self.cli_sort, args)):
date_text, high_comment, high_text, page_text = highlight
if args.html:
text += HIGH_BLOCK % {"page": page_text, "date": date_text,
"highlight": high_text,
"comment": high_comment}
elif args.csv:
data = {"title": title, "authors": authors, "page": page_text,
"date": date_text, "text": high_text,
"comment": high_comment}
text += get_csv_row(data) + "\n"
else:
text += (page_text + space + date_text +
line_break + high_text + high_comment)
text += 2 * os.linesep
if args.html:
text += "\n</div>\n</body>\n</html>"
text_file.write(text)
sys.stdout.write(str("Created {}\n").format(basename(filename)))
saved += 1
return saved
def cli_save_merged_file(self, args, files):
""" Save the selected book's highlights to a single html file
:type args: argparse.Namespace
:param args: The parsed cli args
:type files: list
:param files: A list with the metadata files to get converted
"""
saved = 0
title_counter = 0
space = " " if not args.no_page and not args.no_date else ""
line_break = ":" + os.linesep if not args.no_page or not args.no_date else ""
text = HTML_HEAD if args.html else CSV_HEAD if args.csv else ""
encoding = "utf-8-sig" if args.csv else "utf-8"
for file_ in files:
(authors, title, highlights,
title_counter) = self.cli_get_item_data(file_, args, title_counter)
if not highlights: # no highlights
continue
if args.html:
text += BOOK_BLOCK % {"title": title, "authors": authors}
# noinspection PyTypeChecker
for high in sorted(highlights, key=partial(self.cli_sort, args)):
date_text, high_comment, high_text, page_text = high
text += HIGH_BLOCK % {"page": page_text, "date": date_text,
"highlight": high_text, "comment": high_comment}
text += "</div>\n"
elif args.csv:
for high in highlights:
date_text, high_comment, high_text, page_text = high
data = {"title": title, "authors": authors, "page": page_text,
"date": date_text, "text": high_text, "comment": high_comment}
# data = {k.encode("utf8"): v.encode("utf8") for k, v in data.items()}
text += get_csv_row(data) + "\n"
else:
name = title
if authors:
name = "{} - {}".format(authors, title)
line = "-" * 80
text += line + os.linesep + name + os.linesep + line + os.linesep
# noinspection PyTypeChecker
highlights = [i[3] + space + i[0] + line_break + i[2] + i[1] for i in
sorted(highlights, key=partial(self.cli_sort, args))]
text += (os.linesep * 2).join(highlights) + os.linesep * 2
saved += 1
text += "\n</body>\n</html>" if args.html else ""
path = abspath(args.output)
name, ext = splitext(path)
new_ext = ".html" if args.html else ".csv" if args.csv else ".txt"
if ext.lower() != new_ext:
path = name + new_ext
with open(path, "w+", encoding=encoding, newline="") as text_file:
text_file.write(text)
sys.stdout.write(str("Created {}\n\n").format(path))
return saved
def cli_get_item_data(self, file_, args, title_counter):
""" Get the highlight data for an item
:type file_: str|unicode
:param file_: The item's path
:type args: argparse.Namespace
:param args: The item's arguments
:type title_counter: int
:param title_counter: The non-found Title counter
"""
data = decode_data(file_)
highlights = []
for page in data["highlight"]:
for page_id in data["highlight"][page]:
highlights.append(self.cli_analyze_high(data, page, page_id, args))
authors = ""
try:
title = data["stats"]["title"]
authors = data["stats"]["authors"]
except KeyError: # older type file
title = splitext(basename(file_))[0]
try:
name = title.split("#] ")[1]
title = splitext(name)[0]
except IndexError: # no "#] " in filename
pass
if not title:
try:
name = file_.split("#] ")[1]
title = splitext(name)[0]
except IndexError: # no "#] " in filename
title = _("NO TITLE FOUND") + str(title_counter)
title_counter += 1
return authors, title, highlights, title_counter
@staticmethod
def get_lua_files(dropped):
""" Return the paths to the .lua metadata files
:type dropped: list
:param dropped: The input paths
"""
paths = []
fount_txt = str("Found: {}\n")
for path in dropped:
if isfile(path) and splitext(path)[1] == ".lua":
paths.append(abspath(path))
sys.stdout.write(fount_txt.format(path))
folders = [i for i in dropped if isdir(i)]
for folder in folders:
try:
for dir_tuple in os.walk(folder):
dir_path = dir_tuple[0]
if dir_path.lower().endswith(".sdr"): # a book's metadata folder
if dir_path.lower().endswith("evernote.sdr"):
continue
for file_ in dir_tuple[2]: # get the .lua file not the .old
if splitext(file_)[1].lower() == ".lua":
path = abspath(join(dir_path, file_))
paths.append(path)
sys.stdout.write(fount_txt.format(path))
break
# older metadata storage or android history folder
elif (dir_path.lower().endswith(join("koreader", "history"))
or basename(dir_path).lower() == "history"):
for file_ in dir_tuple[2]:
if splitext(file_)[1].lower() == ".lua":
path = abspath(join(dir_path, file_))
paths.append(path)
sys.stdout.write(fount_txt.format(path))
continue
except UnicodeDecodeError: # os.walk error
pass
return paths
@staticmethod
def cli_sort(args, data):
""" Sets the sorting method of written highlights
:type args: argparse.Namespace
:param args: The parsed cli args
:type data: tuple
param: data: The highlight's data
"""
if args.sort_page and not args.no_page:
page = data[3]
if page.startswith("Page"):
page = page[5:]
return int(page)
else:
return data[0]
def cli_analyze_high(self, data, page, page_id, args):
""" Get the highlight's info (text, comment, date and page)
:type data: dict
:param data: The highlight's data
:type page: int
:param page The page where the highlight starts
:type page_id: int
:param page_id The count of this page's highlight
:type args: argparse.Namespace
:param args: The parsed cli args
"""
highlight = self.base.get_highlight_info(data, page, page_id)
linesep = "<br/>" if args.html else os.linesep
high_text = highlight["text"]
high_text = high_text.replace("\n", linesep) if not args.no_highlight else ""
comment = highlight["comment"].replace("\n", linesep)
date = highlight["date"]
line_break2 = os.linesep if not args.no_highlight and comment else ""
if args.csv:
page_text = str(page) if not args.no_page else ""
date_text = date if not args.no_date else ""
high_comment = comment if not args.no_comment and comment else ""
else:
page_text = "Page " + str(page) if not args.no_page else ""
date_text = "[" + date + "]" if not args.no_date else ""
high_comment = (line_break2 + "● " + comment
if not args.no_comment and comment else "")
return date_text, high_comment, high_text, page_text
@staticmethod
def get_name(data, meta_path, title_counter):
""" Return the name of the book entry
:type data: dict
:param data: The book's metadata
:type meta_path: str|unicode
:param meta_path: The book's metadata path
:type title_counter: list
:param title_counter: A list with the current NO TITLE counter
"""
authors = ""
try:
title = data["stats"]["title"]
authors = data["stats"]["authors"]
except KeyError: # older type file
title = splitext(basename(meta_path))[0]
try:
name = title.split("#] ")[1]
title = splitext(name)[0]
except IndexError: # no "#] " in filename
pass
if not title:
try:
name = meta_path.split("#] ")[1]
title = splitext(name)[0]
except IndexError: # no "#] " in filename
title = _("NO TITLE FOUND") + str(title_counter[0])
title_counter[0] += 1
name = title
if authors:
name = "{} - {}".format(authors, title)
return name
if __name__ == "__main__":
app = KOHighlights(sys.argv)
|
noembryo/KoHighlights
|
main.py
|
Python
|
mit
| 134,224
|
[
"VisIt"
] |
c2f3474769684765b26dadb4acd05a5e73712cc736975638270b890ed8bd47fc
|
# -*- coding: utf-8 -*-
from django.contrib.auth.models import AnonymousUser, User
from django.db import IntegrityError
from mock import Mock, patch
from nose.tools import eq_
from pyquery import PyQuery as pq
import amo
from amo.tests import TestCase
from users.models import UserProfile
from .backends import SessionBackend
from .models import Session
from .helpers import cake_csrf_token, remora_url
class CakeTestCase(TestCase):
fixtures = ['cake/sessions', 'base/global-stats']
def test_cookie_cleaner(self):
"Test that this removes locale-only cookie."
c = self.client
c.cookies['locale-only'] = 'XENOPHOBIA 4 EVAR'
r = c.get('/', follow=True)
eq_(r.cookies.get('locale-only'), None)
def test_login(self):
"""
Given a known remora cookie, can we visit the homepage and appear
logged in?
"""
profile = UserProfile.objects.get(pk=1)
# log in using cookie -
client = self.client
client.cookies['AMOv3'] = "17f051c99f083244bf653d5798111216"
r = client.get('/en-US/firefox/')
eq_(pq(r.content.decode('utf-8'))('.account .user').text(),
profile.display_name)
eq_(pq(r.content)('.account .user').attr('title'), profile.email)
# test that the data copied over correctly.
profile = UserProfile.objects.get(pk=1)
user = profile.user
self.assertEqual(profile.email, user.username)
self.assertEqual(profile.email, user.email)
self.assertEqual(profile.created, user.date_joined)
self.assertEqual(profile.password, user.password)
self.assertEqual(profile.id, user.id)
def test_stale_session(self):
# what happens if the session we reference is expired
session = Session.objects.get(pk='27f051c99f083244bf653d5798111216')
self.assertEqual(False, self.client.login(session=session))
# check that it's no longer in the db
f = lambda: Session.objects.get(pk='27f051c99f083244bf653d5798111216')
self.assertRaises(Session.DoesNotExist, f)
def test_invalid_session_reference(self):
self.assertEqual(False, self.client.login(session=Session(pk='abcd')))
def test_invalid_session_data(self):
# what happens if the session we reference refers to a missing user
session = Session.objects.get(pk='37f051c99f083244bf653d5798111216')
self.assertEqual(False, self.client.login(session=session))
# check that it's no longer in the db
f = lambda: Session.objects.get(pk='37f051c99f083244bf653d5798111216')
self.assertRaises(Session.DoesNotExist, f)
def test_broken_session_data(self):
"""Bug 553397"""
backend = SessionBackend()
session = Session.objects.get(pk='17f051c99f083244bf653d5798111216')
session.data = session.data.replace('"', 'breakme', 5)
self.assertEqual(None, backend.authenticate(session=session))
def test_utf8_session_data(self):
"""Bug 566377."""
backend = SessionBackend()
session = Session.objects.get(pk='47f051c99f083244bf653d5798111216')
user = backend.authenticate(session=session)
assert user != None, "We should get a user."
def test_backend_get_user(self):
s = SessionBackend()
self.assertEqual(None, s.get_user(12))
def test_middleware_invalid_session(self):
client = self.client
client.cookies['AMOv3'] = "badcookie"
response = client.get('/en-US/firefox/')
assert isinstance(response.context['user'], AnonymousUser)
def test_logout(self):
# login with a cookie and verify we are logged in
client = self.client
client.cookies['AMOv3'] = "17f051c99f083244bf653d5798111216"
r = client.get('/en-US/firefox/')
profile = UserProfile.objects.get(pk=1)
eq_(pq(r.content.decode('utf-8'))('.account .user').text(),
profile.display_name)
eq_(pq(r.content)('.account .user').attr('title'), profile.email)
# logout and verify we are logged out and our AMOv3 cookie is gone
r = client.get('/en-US/firefox/users/logout')
r = client.get('/en-US/firefox/')
assert isinstance(r.context['user'], AnonymousUser)
self.assertEqual(client.cookies.get('AMOv3').value, '')
@patch('django.db.models.fields.related.'
'ReverseSingleRelatedObjectDescriptor.__get__')
def test_backend_profile_exceptions(self, p_mock):
# We have a legitimate profile, but for some reason the user_id is
# phony.
s = SessionBackend()
session = Session.objects.get(pk='17f051c99f083244bf653d5798111216')
p_mock.side_effect = User.DoesNotExist()
eq_(None, s.authenticate(session))
p_mock.side_effect = IntegrityError()
eq_(None, s.authenticate(session))
p_mock.side_effect = Exception()
eq_(None, s.authenticate(session))
class TestHelpers(TestCase):
fixtures = ['cake/sessions']
def test_csrf_token(self):
mysessionid = "17f051c99f083244bf653d5798111216"
s = SessionBackend()
session = Session.objects.get(pk=mysessionid)
user = s.authenticate(session=session)
request = Mock()
request.user = user
request.COOKIES = {'AMOv3': mysessionid}
ctx = {'request': request}
doc = pq(cake_csrf_token(ctx))
self.assert_(doc.html())
self.assert_(doc('input').attr('value'))
def test_csrf_token_nosession(self):
"""No session cookie, no Cake CSRF token."""
mysessionid = "17f051c99f083244bf653d5798111216"
s = SessionBackend()
session = Session.objects.get(pk=mysessionid)
user = s.authenticate(session=session)
request = Mock()
request.user = user
request.COOKIES = {}
ctx = {'request': request}
token = cake_csrf_token(ctx)
assert not token
def test_remora_url(self):
"""Build remora URLs."""
ctx = {
'LANG': 'en-us',
'APP': amo.FIREFOX}
url = remora_url(ctx, '/addon/1234')
eq_(url, '/en-US/firefox/addon/1234')
url = remora_url(ctx, '/addon/1234', 'pt-BR', 'thunderbird')
eq_(url, '/pt-BR/thunderbird/addon/1234')
url = remora_url(ctx, '/devhub/something', app='', prefix='remora')
eq_(url, '/remora/en-US/devhub/something')
# UTF-8 strings
url = remora_url(ctx, u'/tags/Hallo und tschüß')
eq_(url, '/en-US/firefox/tags/Hallo%20und%20tsch%C3%BC%C3%9F')
# Trailing slashes are kept if present.
eq_(remora_url(ctx, '/foo'), '/en-US/firefox/foo')
eq_(remora_url(ctx, '/foo/'), '/en-US/firefox/foo/')
|
jbalogh/zamboni
|
apps/cake/tests.py
|
Python
|
bsd-3-clause
| 6,802
|
[
"VisIt"
] |
416bae144983f156660b03274d4a428d9ad365555784f1ddfe95780041e980e9
|
#!/usr/bin/env python
'''
File name: main_ripp_mod.py
Author: Guillaume Viejo
Date created: 16/08/2017
Python Version: 3.5.2
'''
import sys
import numpy as np
import pandas as pd
import scipy.io
from functions import *
# from pylab import *
# import ipyparallel
from multiprocessing import Pool
import os
import neuroseries as nts
from time import time
from pylab import *
from sklearn.manifold import Isomap
from mpl_toolkits.mplot3d import Axes3D
from numba import jit
import _pickle as cPickle
####################################################################################################################
# FUNCTIONS
####################################################################################################################
@jit(nopython=True)
def histo(spk, obins):
n = len(obins)
count = np.zeros(n)
for i in range(n):
count[i] = np.sum((spk>obins[i,0]) * (spk < obins[i,1]))
return count
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
# for session in datasets:
for session in ['Mouse32/Mouse32-140822']:
hd_info = scipy.io.loadmat(data_directory+session+'/Analysis/HDCells.mat')['hdCellStats'][:,-1]
if np.sum(hd_info == 1)>10:
generalinfo = scipy.io.loadmat(data_directory+session+'/Analysis/GeneralInfo.mat')
shankStructure = loadShankStructure(generalinfo)
if len(generalinfo['channelStructure'][0][0][1][0]) == 2:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][1][0][0] - 1
else:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][0][0][0] - 1
spikes,shank = loadSpikeData(data_directory+session+'/Analysis/SpikeData.mat', shankStructure['thalamus'])
n_channel,fs, shank_to_channel = loadXML(data_directory+session+"/"+session.split("/")[1]+'.xml')
wake_ep = loadEpoch(data_directory+session, 'wake')
sleep_ep = loadEpoch(data_directory+session, 'sleep')
sws_ep = loadEpoch(data_directory+session, 'sws')
rem_ep = loadEpoch(data_directory+session, 'rem')
sleep_ep = sleep_ep.merge_close_intervals(threshold=1.e3)
sws_ep = sleep_ep.intersect(sws_ep)
rem_ep = sleep_ep.intersect(rem_ep)
rip_ep,rip_tsd = loadRipples(data_directory+session)
rip_ep = sws_ep.intersect(rip_ep)
rip_tsd = rip_tsd.restrict(sws_ep)
speed = loadSpeed(data_directory+session+'/Analysis/linspeed.mat').restrict(wake_ep)
hd_info = scipy.io.loadmat(data_directory+session+'/Analysis/HDCells.mat')['hdCellStats'][:,-1]
hd_info_neuron = np.array([hd_info[n] for n in spikes.keys()])
spikes = {k:spikes[k] for k in np.where(hd_info_neuron==1)[0] if k not in []}
neurons = np.sort(list(spikes.keys()))
print(session, len(neurons))
bin_size = 50
# left_bound = np.arange(-500-bin_size/2, 500 - bin_size/4,bin_size/4) # 75% overlap
left_bound = np.arange(-1000-bin_size/2, 1000 - bin_size/2, bin_size/2) # 50% overlap
obins = np.vstack((left_bound, left_bound+bin_size)).T
times = obins[:,0]+(np.diff(obins)/2).flatten()
# cutting times between -500 to 500
times = times[np.logical_and(times>=-500, times<=500)]
# datatosave = {'times':times, 'swr':{}, 'rnd':{}, 'bin_size':bin_size}
datatosave = {'times':times, 'imaps':{}, 'bin_size':bin_size}
n_ex = 50
n_rip = len(rip_tsd)
n_loop = n_rip//n_ex
idx = np.random.randint(0, n_loop, n_rip)
####################################################################################################################
# WAKE
####################################################################################################################
bin_size_wake = 200
bins = np.arange(wake_ep.as_units('ms').start.iloc[0], wake_ep.as_units('ms').end.iloc[-1]+bin_size_wake, bin_size_wake)
spike_counts = pd.DataFrame(index = bins[0:-1]+np.diff(bins)/2, columns = neurons)
for i in neurons:
spks = spikes[i].as_units('ms').index.values
spike_counts[i], _ = np.histogram(spks, bins)
rates_wak = np.sqrt(spike_counts/(bin_size_wake))
sys.exit()
# for i in range(n_loop):
for i in range(10):
print(i, '/', n_loop)
####################################################################################################################
# SWR
####################################################################################################################
# BINNING
tmp = rip_tsd.index.values[idx == i]
subrip_tsd = pd.Series(index = tmp, data = np.nan)
rates_swr = []
tmp2 = subrip_tsd.index.values/1e3
for j, t in enumerate(tmp2):
tbins = t + obins
spike_counts = pd.DataFrame(index = obins[:,0]+(np.diff(obins)/2).flatten(), columns = neurons)
for k in neurons:
spks = spikes[k].as_units('ms').index.values
spike_counts[k] = histo(spks, tbins)
rates_swr.append(np.sqrt(spike_counts/(bin_size)))
####################################################################################################################
# RANDOM
####################################################################################################################
# BINNING
rnd_tsd = nts.Ts(t = np.sort(np.hstack([np.random.randint(sws_ep.loc[j,'start']+500000, sws_ep.loc[j,'end']+500000, np.maximum(1,n_ex//len(sws_ep))) for j in sws_ep.index])))
if len(rnd_tsd) > n_ex:
rnd_tsd = rnd_tsd[0:n_ex]
rates_rnd = []
tmp3 = rnd_tsd.index.values/1000
for j, t in enumerate(tmp3):
tbins = t + obins
spike_counts = pd.DataFrame(index = obins[:,0]+(np.diff(obins)/2).flatten(), columns = neurons)
for k in neurons:
spks = spikes[k].as_units('ms').index.values
spike_counts[k] = histo(spks, tbins)
rates_rnd.append(np.sqrt(spike_counts/(bin_size)))
###########
# SMOOTHING
tmp1 = rates_wak.values
tmp1 = tmp1.astype(np.float32)
# SMOOTHING
tmp3 = []
for rates in rates_swr:
# tmp3.append(rates.rolling(window=100,win_type='gaussian',center=True,min_periods=1,axis=0).mean(std=4).loc[-500:500].values)
tmp3.append(rates.loc[-500:500].values)
tmp3 = np.vstack(tmp3)
tmp3 = tmp3.astype(np.float32)
#SMOOTHING
tmp2 = []
for rates in rates_rnd:
# tmp2.append(rates.rolling(window=100,win_type='gaussian',center=True,min_periods=1,axis=0).mean(std=4).loc[-500:500].values)
tmp2.append(rates.loc[-500:500].values)
tmp2 = np.vstack(tmp2)
tmp2 = tmp2.astype(np.float32)
n = len(tmp3)
m = len(tmp1)
tmp = np.vstack((tmp1, tmp3, tmp2))
sys.exit()
# ISOMAP
imap = Isomap(n_neighbors = 20, n_components = 2).fit_transform(tmp)
iwak = imap[0:m]
iswr = imap[m:m+n].reshape(len(subrip_tsd),len(times),2)
irnd = imap[m+n:].reshape(len(rnd_tsd),len(times),2)
datatosave['imaps'][i] = {'swr':iswr, 'rnd':irnd, 'wak':iwak}
####################################################################################################################
# SAVING
####################################################################################################################
cPickle.dump(datatosave, open('../figures/figures_articles_v4/figure1/hd_isomap_50ms_mixed_swr_rnd_wake/'+session.split("/")[1]+'.pickle', 'wb'))
|
gviejo/ThalamusPhysio
|
python/main_make_ISOMAP_HD.py
|
Python
|
gpl-3.0
| 7,288
|
[
"Gaussian"
] |
01be55d9a4a0a5ffd578bac0e82927440ace1f6639dbb7baae5d2a9ebf4a7436
|
../../../../../../../share/pyshared/orca/scripts/apps/nautilus/script.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/nautilus/script.py
|
Python
|
gpl-3.0
| 72
|
[
"ORCA"
] |
c7b1cbb9f134fb83769040d299b8b5dac3c301446e2df3e1045dc0465f70bf52
|
import tkSimpleDialog
import tkMessageBox
#import p3d.protein
#import p3d.geo
from pymol.wizard import Wizard
from pymol import cmd, util
from pymol.controlling import mode_dict
class Bond(object):
def __init__(self,bond1,bond2,resid1,resid2):
if bond2 > bond1:
self.bond1=bond1
self.bond2=bond2
self.resid1=resid1
self.resid2=resid2
else:
self.bond1=bond2
self.bond2=bond1
self.resid1=resid2
self.resid2=resid1
self.indexes=[self.bond1,self.bond2]
class selector(Wizard):
def __init__(self,name,chain,resid,resid2,_self=cmd):
Wizard.__init__(self,_self)
self.resid = resid
self.resid2 = resid2
self.name = name
self.chain = chain
self.extend = 1
self.bonds=[]
self.resids=[]
self.indexes=[]
self.load=None
self.lead=0
def get_panel(self):
label = 'No Mutation'
return [
[ 1, 'Select Rotatable Bonds',''],
[ 1, 'for Residue '+ self.resid ,''],
[ 2, 'Pick Bond' , 'cmd.get_wizard().apply()'],
[ 2, 'Rotate View' , 'cmd.get_wizard().rotate()'],
[ 2, 'Show More Bonds' , 'cmd.get_wizard().show()'],
[ 2, 'Pick Rotatable Section' , 'cmd.get_wizard().srot()'],
[ 2, 'Write Bonds' , 'cmd.get_wizard().set_bonds()'],
[ 2, 'Reset Selected Bonds' , 'cmd.get_wizard().reset()'],
[ 2, 'Finished' , 'cmd.get_wizard().clear()'],
]
def srot(self):
cmd.deselect()
#self.pk2_st=None
self.load=1
self.get_prompt()
print "Testing", self.lead
cmd.config_mouse('three_button_editing')
def show(self):
left = str(int(self.resid)-self.extend)
right = str(int(self.resid)+self.extend)
cmd.show('lines','resid '+left+':'+right)
cmd.zoom('resid '+left+':'+right)
self.extend = self.extend+1
#def isbonded(self,bond0,bond1,stems):
# nextres = 0
# for stem in stems:
# if bond0==stem:
# nextres=bond1
# if bond1==stem:
# nextres=bond0
# return nextres
def get_bonds(self,stems,allbonds,rot_bonds=[]):
nextbonds = []
for stem in stems:
print "STEM", stem
for bond in allbonds:
if stem in bond.index: #save next bond
print bond.index,"matched bond"
for n in bond.index:
if n != stem: #find next atom
if n not in rot_bonds: #if atom is new:
nextbonds.append(n)
#return indexes connected to stem
return nextbonds
def is_in_bonds(self,stem,bonds):
yes = 0
for bond in bonds:
if stem in bond.indexes:
yes = 1
return yes
def is_in_multiple_bonds(self,stem,bonds):
count = 0
for bond in bonds:
if stem in bond.indexes:
count = count + 1
if count == 2:
return True
else:
return False
#def reset_bond(self,known,bonds): #reset bond, if repeated index save repeat
# ret = []
# print "reset_bond"
# print known, "known"
# for rbon in bonds: #for each rot bond
# if known[0] in rbon.indexes:
# if known[1] not in rbon.indexes:
# ret = [known[1]]
# if known[1] in rbon.indexes:
# if known[0] not in rbon.indexes:
# ret = [known[0]]
# return ret
def set_bonds(self):
startingbond=[]
rangev = []
if self.lead==0:
print "Must select rotatable section first"
elif len(self.bonds)==0:
print "Must select at least one rotatable bonds"
else:
mres = min(self.resids)
xres = max(self.resids)
model = cmd.get_model('all')
#print(model)
allbonds = model.bond
for b in allbonds:
print b.index
#print("MODEL")
'''
Removed efficiency code to test end residue labeling - will be slow
if mres != xres: #multires case
mind = min(self.indexes)
xind = max(self.indexes)
irange = [mind,xind] #range of indexes we care about for bonding pattern
if self.lead < mind:
irange = [self.lead,xind]
if self.lead > xind:
irange = [mind,self.lead]
limitedset = []
we want to limit allbonds to a limited index range
for efficiency-may be problem if indexes are really screwed up
for b in allbonds:
if b.index[0] in range(irange[0],irange[1]) or \
b.index[1] in range(irange[0],irange[1]):
limitedset.append(b)
allbonds = limitedset
'''
#Remove dummy atom-for bonding only, will still be rotated
dummy = 'ZZ'
reduced = []
for b in allbonds:
d = False
if self.get_atom(b.index[0])[2] == dummy or self.get_atom(b.index[1])[2] == dummy:
d = True
if d == False:
reduced.append(b)
#print self.get_atom(b.index[0]),self.get_atom(b.index[1])
#print "DONE"
allbonds = reduced
#start from rotatable selection point and find what atoms are always rotatable
rot_bonds = [self.lead]
print rot_bonds,"LEAD"
#print self.bonds
#for b in allbonds:
# print b.index
stems = self.get_bonds(rot_bonds,allbonds,rot_bonds)
nextstep=[]
while len(stems) != 0: #while a bond remains
next_stem = set() #Internal
for s in stems: #check if at rotation
if self.is_in_bonds(s,self.bonds):
if len(nextstep) == 0:
print s, "NEXTSTEP"
nextstep.append(s) #don't move beyond rotation
rot_bonds.append(s)
next_stem.add(s)
#No else - We discard any other rotatable bonds - deal with later
else:
print s, "ROT BOND"
rot_bonds.append(s)
next_stem.add(s)
stems = self.get_bonds(next_stem,allbonds,rot_bonds)
outstring = "!Rotation of dye\n"
lenv = len(self.bonds)
outstring = outstring + '!NROT '+str(lenv)+'\n'
outstring = outstring + 'cons fix sele dbackbone .or. .not. '+\
'(resid @res .and. segid @chain) end\n\n'
#now we look along rest of chain
botbonds = []
count = 0
excluded = rot_bonds #We don't want to select rotatable bonds
stems = self.get_bonds(nextstep,allbonds,excluded)
bond=nextstep #This is a rotatable object
while len(stems) != 0:
excluded=excluded+stems#don't go to a stem two times
for stem in stems:
if self.is_in_bonds(stem,self.bonds): #only care about bonds
if len(bond)==0: #we have a new end of a bond
bond.append(stem)
elif stem != bond[0]:#We have second half of new bond
new_bond = stem
bond.append(new_bond)
count = count + 1
#We need to tease out other rotatable atoms from those in stems
for stem in stems:
if self.is_in_bonds(stem,self.bonds) == False:
#Just looking at other stems-none of these
# have rotatable elements
botbonds = botbonds+[stem]
nexts = list(set(self.get_bonds([stem],allbonds,excluded)))
while len(nexts) != 0:
botbonds = botbonds+nexts
excluded = excluded+nexts #don't go to stem two times
nexts = list(set(self.get_bonds(nexts,allbonds,excluded)))
#Now write output for rotation
outstring = outstring + 'label loop'+str(count)+'\n'
outstring = outstring + self.rotate_axis(bond[0],bond[1])
outstring = outstring + self.rotate_sel(120,botbonds)
outstring = outstring + 'incr '+str(count)+' by '+str(count)+'\n'
outstring = outstring + 'goto mini \n \n'
#We check if the new_bond atom is shared
#The old atom is discarded because we don't go backwards
if self.is_in_multiple_bonds(new_bond,self.bonds):
bond = [new_bond]
else:
bond = []
botbonds=botbonds+stems
stems = list(set(self.get_bonds(stems,allbonds,excluded)))
outfile = open('../../inputs/'+self.name+'_rot.str','w')
outfile.write(outstring)
#write .str file
stream = '!The atoms that are the end of the dye\n'
stream = stream + "define dyefix sele .NOT. ( "
for bindex in botbonds:
atom = self.get_atom(bindex)
stream = stream + " chain @chain .and. resi @resi .and. name "+atom[2]+ " .OR. "
stream = stream + ' ) end\n'
outfile = open('../../inputs/'+self.name+'.str','w')
outfile.write(stream)
print "All files written for ",self.name
def get_atom(self,index):
cmd.select("_p","index "+str(index+1))#convert from internal back to
#label numbering
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_at',""name)")
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_ac',""chain)")
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_ar',""resi)")
return [str(self.pk_ac),str(self.pk_ar),str(self.pk_at)]
def rotate_axis(self,index1,index2):#print axis output
atom1=self.get_atom(index1)
atom2=self.get_atom(index2)
return "coor axis sele atom @chain @res "+atom1[2]+\
" end sele atom @chain @res "+atom2[2]+" end \n"
def rotate_sel(self,angle,flexbonds):#print selection output
outstring = 'coor rota axis PHI '+str(angle)+' sele dyefix '
atoms = []
print "rotate_sel", flexbonds
for index in flexbonds:
cmd.select("_p","index "+str(index+1))#convert from internal back
#to label numbering
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_at',""name)")
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_ac',""chain)")
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_ar',""resi)")
atoms.append([str(self.pk_at),str(self.pk_ac),str(self.pk_ar)])
for atom in atoms: #set(atoms): #ensure every atom is only included once
outstring = outstring + ' .or. '
outstring = outstring+'atom @chain @res '+atom[0]
return outstring+' end \n'
def do_select(self,selection):
cmd.deselect()
def rotate(self):
mode_dict['three_button_viewing'] = [ ('l','none','rota')]
cmd.config_mouse('three_button_viewing')
def reset(self):
#cmd.color("atomic")
#cmd.set_bond("line_color","atomic","all")
#util.cbag("all")
self.bonds=[]
cmd.set_bond("line_color","green","all")
def apply(self):
mode_dict['three_button_viewing'] = [ ('l','none','PkTB')]
cmd.config_mouse('three_button_viewing')
print "Apply"
def clear(self):
cmd.quit()
def get_prompt(self):
if self.load!=None:
return ["Please pick the atom in the direction of the section you want to rotate"]
if self.pk2_st!=None:
return ["You picked the bond between %s and %s"%(self.pk1_st, self.pk2_st)]
else:
return ["Please pick an atom or a bond..."]
def do_pick(self,picked_bond):
cmd.iterate("pk1","setattr(cmd.get_wizard(),'pk1_st',""'%s/%s/%s/%s/%s/%s'%(model,segi,chain,resi,name,index))")
print "Picking Loop"
if picked_bond:
cmd.iterate("pk2","setattr(cmd.get_wizard(),'pk2_st',""'%s/%s/%s/%s/%s/%s'%(model,segi,chain,resi,name,index))")
cmd.set_bond("line_color","orange","pk1","pk2")
print [self.pk1_st,self.pk2_st],'bond'
self.resids.append(int(self.pk1_st.split('/')[3])-1)
self.resids.append(int(self.pk2_st.split('/')[3])-1)
self.indexes.append(int(self.pk1_st.split('/')[5])-1)
self.indexes.append(int(self.pk2_st.split('/')[5])-1)
self.bonds.append(Bond(int(self.pk1_st.split('/')[5])-1,int(self.pk2_st.split('/')[5])-1,int(self.pk1_st.split('/')[3])-1,int(self.pk2_st.split('/')[3])-1))
# -1 converts to 0 start index, which is used for bonds - This will be one off from labels in pymol
cmd.unpick()
else:
# for single atom, also get 3D coordinates (EXAMPLE)
print "Single Atom"
self.load=None
cmd.iterate("pk1","setattr(cmd.get_wizard(),'pk1_r',""index)")
self.lead=self.pk1_r-1 #Converting to 0 start index, which is used for bonds
#This will be one off from labels in pymol
cmd.iterate_state(cmd.get_state(),"pk1","setattr(cmd.get_wizard(),'pk1_xyz',(x,y,z))")
#cmd.unpick()
cmd.refresh_wizard()
|
tmorrell/SamStruct
|
inputs/selector.py
|
Python
|
gpl-2.0
| 14,445
|
[
"PyMOL"
] |
b38aec23f83c72956e053dec76141a6a7b30d4323955b02c1dbb16daea638b54
|
__author__ = "Andre Merzky, Ole Weidner, Mark Santcroos"
__copyright__ = "Copyright 2012-2015, The SAGA Project"
__license__ = "MIT"
""" PBSPro job adaptor implementation
"""
import threading
import saga.url as surl
import saga.utils.pty_shell as sups
import saga.adaptors.base
import saga.adaptors.cpi.job
from saga.job.constants import *
import re
import os
import time
import threading
from cgi import parse_qs
SYNC_CALL = saga.adaptors.cpi.decorators.SYNC_CALL
ASYNC_CALL = saga.adaptors.cpi.decorators.ASYNC_CALL
SYNC_WAIT_UPDATE_INTERVAL = 1 # seconds
MONITOR_UPDATE_INTERVAL = 60 # seconds
# --------------------------------------------------------------------
#
class _job_state_monitor(threading.Thread):
""" thread that periodically monitors job states
"""
def __init__(self, job_service):
self.logger = job_service._logger
self.js = job_service
self._stop = threading.Event()
super(_job_state_monitor, self).__init__()
self.setDaemon(True)
def stop(self):
self._stop.set()
def run(self):
# we stop the monitoring thread when we see the same error 3 times in
# a row...
error_type_count = dict()
while not self._stop.is_set ():
try:
# FIXME: do bulk updates here! we don't want to pull information
# job by job. that would be too inefficient!
jobs = self.js.jobs
for job_id in jobs.keys() :
job_info = jobs[job_id]
# we only need to monitor jobs that are not in a
# terminal state, so we can skip the ones that are
# either done, failed or canceled
if job_info['state'] not in [saga.job.DONE, saga.job.FAILED, saga.job.CANCELED] :
new_job_info = self.js._job_get_info(job_id, reconnect=False)
self.logger.info ("Job monitoring thread updating Job %s (state: %s)" \
% (job_id, new_job_info['state']))
# fire job state callback if 'state' has changed
if new_job_info['state'] != job_info['state']:
job_obj = job_info['obj']
job_obj._attributes_i_set('state', new_job_info['state'], job_obj._UP, True)
# update job info
jobs[job_id] = new_job_info
except Exception as e:
import traceback
traceback.print_exc ()
self.logger.warning("Exception caught in job monitoring thread: %s" % e)
# check if we see the same error again and again
error_type = str(e)
if error_type not in error_type_count :
error_type_count = dict()
error_type_count[error_type] = 1
else :
error_type_count[error_type] += 1
if error_type_count[error_type] >= 3 :
self.logger.error("too many monitoring errors -- stopping job monitoring thread")
return
finally :
time.sleep (MONITOR_UPDATE_INTERVAL)
# --------------------------------------------------------------------
#
def log_error_and_raise(message, exception, logger):
""" logs an 'error' message and subsequently throws an exception
"""
logger.error(message)
raise exception(message)
# --------------------------------------------------------------------
#
def _pbs_to_saga_jobstate(pbsjs):
""" translates a pbs one-letter state to saga
"""
if pbsjs == 'C': # Torque "Job is completed after having run."
return saga.job.DONE
elif pbsjs == 'F': # PBS Pro "Job is finished."
return saga.job.DONE
elif pbsjs == 'H': # PBS Pro and TORQUE "Job is held."
return saga.job.PENDING
elif pbsjs == 'Q': # PBS Pro and TORQUE "Job is queued(, eligible to run or routed.)
return saga.job.PENDING
elif pbsjs == 'S': # PBS Pro and TORQUE "Job is suspended."
return saga.job.PENDING
elif pbsjs == 'W': # PBS Pro and TORQUE "Job is waiting for its execution time to be reached."
return saga.job.PENDING
elif pbsjs == 'R': # PBS Pro and TORQUE "Job is running."
return saga.job.RUNNING
elif pbsjs == 'E': # PBS Pro and TORQUE "Job is exiting after having run"
return saga.job.RUNNING
elif pbsjs == 'T': # PBS Pro and TORQUE "Job is being moved to new location."
# TODO: PENDING?
return saga.job.RUNNING
elif pbsjs == 'X': # PBS Pro "Subjob has completed execution or has been deleted."
return saga.job.CANCELED
else:
return saga.job.UNKNOWN
# --------------------------------------------------------------------
#
def _pbscript_generator(url, logger, jd, ppn, gres, pbs_version, is_cray=False, queue=None, ):
""" generates a PBS Pro script from a SAGA job description
"""
pbs_params = str()
exec_n_args = str()
exec_n_args += 'export SAGA_PPN=%d\n' % ppn
if jd.executable:
exec_n_args += "%s " % (jd.executable)
if jd.arguments:
for arg in jd.arguments:
exec_n_args += "%s " % (arg)
if jd.name:
pbs_params += "#PBS -N %s \n" % jd.name
if (is_cray is "") or not('Version: 4.2.7' in pbs_version):
# qsub on Cray systems complains about the -V option:
# Warning:
# Your job uses the -V option, which requests that all of your
# current shell environment settings (9913 bytes) be exported to
# it. This is not recommended, as it causes problems for the
# batch environment in some cases.
pbs_params += "#PBS -V \n"
if jd.environment:
pbs_params += "#PBS -v %s\n" % \
','.join (["%s=%s" % (k,v)
for k,v in jd.environment.iteritems()])
# apparently this doesn't work with older PBS installations
# if jd.working_directory:
# pbs_params += "#PBS -d %s \n" % jd.working_directory
# a workaround is to do an explicit 'cd'
if jd.working_directory:
workdir_directives = 'export PBS_O_WORKDIR=%s \n' % jd.working_directory
workdir_directives += 'mkdir -p %s\n' % jd.working_directory
workdir_directives += 'cd %s\n' % jd.working_directory
else:
workdir_directives = ''
if jd.output:
# if working directory is set, we want stdout to end up in
# the working directory as well, unless it containes a specific
# path name.
if jd.working_directory:
if os.path.isabs(jd.output):
pbs_params += "#PBS -o %s \n" % jd.output
else:
# user provided a relative path for STDOUT. in this case
# we prepend the workind directory path before passing
# it on to PBS
pbs_params += "#PBS -o %s/%s \n" % (jd.working_directory, jd.output)
else:
pbs_params += "#PBS -o %s \n" % jd.output
if jd.error:
# if working directory is set, we want stderr to end up in
# the working directory as well, unless it contains a specific
# path name.
if jd.working_directory:
if os.path.isabs(jd.error):
pbs_params += "#PBS -e %s \n" % jd.error
else:
# user provided a realtive path for STDERR. in this case
# we prepend the workind directory path before passing
# it on to PBS
pbs_params += "#PBS -e %s/%s \n" % (jd.working_directory, jd.error)
else:
pbs_params += "#PBS -e %s \n" % jd.error
if jd.wall_time_limit:
hours = jd.wall_time_limit / 60
minutes = jd.wall_time_limit % 60
pbs_params += "#PBS -l walltime=%s:%s:00 \n" \
% (str(hours), str(minutes))
if jd.queue and queue:
pbs_params += "#PBS -q %s \n" % queue
elif jd.queue and not queue:
pbs_params += "#PBS -q %s \n" % jd.queue
elif queue and not jd.queue:
pbs_params += "#PBS -q %s \n" % queue
if jd.project:
if 'PBSPro_1' in pbs_version:
# On PBS Pro we set both -P(roject) and -A(accounting),
# as we don't know what the admins decided, and just
# pray that this doesn't create problems.
pbs_params += "#PBS -P %s \n" % str(jd.project)
pbs_params += "#PBS -A %s \n" % str(jd.project)
else:
# Torque
pbs_params += "#PBS -A %s \n" % str(jd.project)
if jd.job_contact:
pbs_params += "#PBS -m abe \n"
# if total_cpu_count is not defined, we assume 1
if not jd.total_cpu_count:
jd.total_cpu_count = 1
# Request enough nodes to cater for the number of cores requested
nnodes = jd.total_cpu_count / ppn
if jd.total_cpu_count % ppn > 0:
nnodes += 1
# We use the ncpus value for systems that need to specify ncpus as multiple of PPN
ncpus = nnodes * ppn
# Node properties are appended to the nodes argument in the resource_list.
node_properties = []
# Parse candidate_hosts
#
# Currently only implemented for "bigflash" on Gordon@SDSC
# https://github.com/radical-cybertools/saga-python/issues/406
#
if jd.candidate_hosts:
if 'BIG_FLASH' in jd.candidate_hosts:
node_properties.append('bigflash')
else:
raise saga.NotImplemented("This type of 'candidate_hosts' not implemented: '%s'" % jd.candidate_hosts)
if is_cray is not "":
# Special cases for PBS/TORQUE on Cray. Different PBSes,
# different flags. A complete nightmare...
if 'PBSPro_10' in pbs_version:
logger.info("Using Cray XT (e.g. Hopper) specific '#PBS -l mppwidth=xx' flags (PBSPro_10).")
pbs_params += "#PBS -l mppwidth=%s \n" % jd.total_cpu_count
elif 'PBSPro_12' in pbs_version:
logger.info("Using Cray XT (e.g. Archer) specific '#PBS -l select=xx' flags (PBSPro_12).")
pbs_params += "#PBS -l select=%d\n" % nnodes
elif '4.2.6' in pbs_version:
logger.info("Using Titan (Cray XP) specific '#PBS -l nodes=xx'")
pbs_params += "#PBS -l nodes=%d\n" % nnodes
elif '4.2.7' in pbs_version:
logger.info("Using Cray XT @ NERSC (e.g. Edison) specific '#PBS -l mppwidth=xx' flags (PBSPro_10).")
pbs_params += "#PBS -l mppwidth=%s \n" % jd.total_cpu_count
elif 'Version: 5.' in pbs_version:
logger.info("Using TORQUE 5.x notation '#PBS -l procs=XX' ")
pbs_params += "#PBS -l procs=%d\n" % jd.total_cpu_count
else:
logger.info("Using Cray XT (e.g. Kraken, Jaguar) specific '#PBS -l size=xx' flags (TORQUE).")
pbs_params += "#PBS -l size=%s\n" % jd.total_cpu_count
elif 'version: 2.3.13' in pbs_version:
# e.g. Blacklight
# TODO: The more we add, the more it screams for a refactoring
pbs_params += "#PBS -l ncpus=%d\n" % ncpus
elif '4.2.7' in pbs_version:
logger.info("Using Cray XT @ NERSC (e.g. Hopper) specific '#PBS -l mppwidth=xx' flags (PBSPro_10).")
pbs_params += "#PBS -l mppwidth=%s \n" % jd.total_cpu_count
elif 'PBSPro_12' in pbs_version:
logger.info("Using PBSPro 12 notation '#PBS -l select=XX' ")
pbs_params += "#PBS -l select=%d\n" % (nnodes)
else:
# Default case, i.e, standard HPC cluster (non-Cray)
# If we want just a slice of one node
if jd.total_cpu_count < ppn:
ppn = jd.total_cpu_count
pbs_params += "#PBS -l nodes=%d:ppn=%d%s\n" % (
nnodes, ppn, ''.join([':%s' % prop for prop in node_properties]))
# Process Generic Resource specification request
if gres:
pbs_params += "#PBS -l gres=%s\n" % gres
# escape all double quotes and dollarsigns, otherwise 'echo |'
# further down won't work
# only escape '$' in args and exe. not in the params
exec_n_args = workdir_directives + exec_n_args
exec_n_args = exec_n_args.replace('$', '\\$')
pbscript = "\n#!/bin/bash \n%s%s" % (pbs_params, exec_n_args)
pbscript = pbscript.replace('"', '\\"')
return pbscript
# --------------------------------------------------------------------
# some private defs
#
_PTY_TIMEOUT = 2.0
# --------------------------------------------------------------------
# the adaptor name
#
_ADAPTOR_NAME = "saga.adaptor.pbsprojob"
_ADAPTOR_SCHEMAS = ["pbspro", "pbspro+ssh", "pbspro+gsissh"]
_ADAPTOR_OPTIONS = []
# --------------------------------------------------------------------
# the adaptor capabilities & supported attributes
#
_ADAPTOR_CAPABILITIES = {
"jdes_attributes": [saga.job.NAME,
saga.job.EXECUTABLE,
saga.job.ARGUMENTS,
saga.job.CANDIDATE_HOSTS,
saga.job.ENVIRONMENT,
saga.job.INPUT,
saga.job.OUTPUT,
saga.job.ERROR,
saga.job.QUEUE,
saga.job.PROJECT,
saga.job.WALL_TIME_LIMIT,
saga.job.WORKING_DIRECTORY,
saga.job.WALL_TIME_LIMIT,
saga.job.SPMD_VARIATION, # TODO: 'hot'-fix for BigJob
saga.job.PROCESSES_PER_HOST,
saga.job.TOTAL_CPU_COUNT],
"job_attributes": [saga.job.EXIT_CODE,
saga.job.EXECUTION_HOSTS,
saga.job.CREATED,
saga.job.STARTED,
saga.job.FINISHED],
"metrics": [saga.job.STATE],
"callbacks": [saga.job.STATE],
"contexts": {"ssh": "SSH public/private keypair",
"x509": "GSISSH X509 proxy context",
"userpass": "username/password pair (ssh)"}
}
# --------------------------------------------------------------------
# the adaptor documentation
#
_ADAPTOR_DOC = {
"name": _ADAPTOR_NAME,
"cfg_options": _ADAPTOR_OPTIONS,
"capabilities": _ADAPTOR_CAPABILITIES,
"description": """
The PBSPro adaptor allows to run and manage jobs on `PBS <http://www.pbsworks.com/>`_
controlled HPC clusters.
""",
"example": "examples/jobs/pbsjob.py",
"schemas": {"pbspro": "connect to a local cluster",
"pbspro+ssh": "connect to a remote cluster via SSH",
"pbspro+gsissh": "connect to a remote cluster via GSISSH"}
}
# --------------------------------------------------------------------
# the adaptor info is used to register the adaptor with SAGA
#
_ADAPTOR_INFO = {
"name" : _ADAPTOR_NAME,
"version" : "v0.1",
"schemas" : _ADAPTOR_SCHEMAS,
"capabilities": _ADAPTOR_CAPABILITIES,
"cpis": [
{
"type": "saga.job.Service",
"class": "PBSProJobService"
},
{
"type": "saga.job.Job",
"class": "PBSProJob"
}
]
}
###############################################################################
# The adaptor class
class Adaptor (saga.adaptors.base.Base):
""" this is the actual adaptor class, which gets loaded by SAGA (i.e. by
the SAGA engine), and which registers the CPI implementation classes
which provide the adaptor's functionality.
"""
# ----------------------------------------------------------------
#
def __init__(self):
saga.adaptors.base.Base.__init__(self, _ADAPTOR_INFO, _ADAPTOR_OPTIONS)
self.id_re = re.compile('^\[(.*)\]-\[(.*?)\]$')
self.opts = self.get_config (_ADAPTOR_NAME)
# ----------------------------------------------------------------
#
def sanity_check(self):
# FIXME: also check for gsissh
pass
# ----------------------------------------------------------------
#
def parse_id(self, id):
# split the id '[rm]-[pid]' in its parts, and return them.
match = self.id_re.match(id)
if not match or len(match.groups()) != 2:
raise saga.BadParameter("Cannot parse job id '%s'" % id)
return (match.group(1), match.group(2))
###############################################################################
#
class PBSProJobService (saga.adaptors.cpi.job.Service):
""" implements saga.adaptors.cpi.job.Service
"""
# ----------------------------------------------------------------
#
def __init__(self, api, adaptor):
self._mt = None
_cpi_base = super(PBSProJobService, self)
_cpi_base.__init__(api, adaptor)
self._adaptor = adaptor
# ----------------------------------------------------------------
#
def __del__(self):
self.close()
# ----------------------------------------------------------------
#
def close(self):
if self.mt :
self.mt.stop()
self.mt.join(10) # don't block forever on join()
self._logger.info("Job monitoring thread stopped.")
self.finalize(True)
# ----------------------------------------------------------------
#
def finalize(self, kill_shell=False):
if kill_shell :
if self.shell :
self.shell.finalize (True)
# ----------------------------------------------------------------
#
@SYNC_CALL
def init_instance(self, adaptor_state, rm_url, session):
""" service instance constructor
"""
self.rm = rm_url
self.session = session
self.ppn = None
self.is_cray = ""
self.queue = None
self.shell = None
self.jobs = dict()
self.gres = None
# the monitoring thread - one per service instance
self.mt = _job_state_monitor(job_service=self)
self.mt.start()
rm_scheme = rm_url.scheme
pty_url = surl.Url(rm_url)
# this adaptor supports options that can be passed via the
# 'query' component of the job service URL.
if rm_url.query:
for key, val in parse_qs(rm_url.query).iteritems():
if key == 'queue':
self.queue = val[0]
elif key == 'craytype':
self.is_cray = val[0]
elif key == 'ppn':
self.ppn = int(val[0])
elif key == 'gres':
self.gres = val[0]
# we need to extract the scheme for PTYShell. That's basically the
# job.Service Url without the pbs+ part. We use the PTYShell to execute
# pbs commands either locally or via gsissh or ssh.
if rm_scheme == "pbspro":
pty_url.scheme = "fork"
elif rm_scheme == "pbspro+ssh":
pty_url.scheme = "ssh"
elif rm_scheme == "pbspro+gsissh":
pty_url.scheme = "gsissh"
# these are the commands that we need in order to interact with PBS.
# the adaptor will try to find them during initialize(self) and bail
# out in case they are note available.
self._commands = {'pbsnodes': None,
'qstat': None,
'qsub': None,
'qdel': None}
self.shell = sups.PTYShell(pty_url, self.session)
# self.shell.set_initialize_hook(self.initialize)
# self.shell.set_finalize_hook(self.finalize)
self.initialize()
return self.get_api()
# ----------------------------------------------------------------
#
def initialize(self):
# check if all required pbs tools are available
for cmd in self._commands.keys():
ret, out, _ = self.shell.run_sync("which %s " % cmd)
if ret != 0:
message = "Error finding PBS tools: %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
path = out.strip() # strip removes newline
if cmd == 'qdel': # qdel doesn't support --version!
self._commands[cmd] = {"path": path,
"version": "?"}
elif cmd == 'qsub': # qsub doesn't always support --version!
self._commands[cmd] = {"path": path,
"version": "?"}
else:
ret, out, _ = self.shell.run_sync("%s --version" % cmd)
if ret != 0:
message = "Error finding PBS tools: %s" % out
log_error_and_raise(message, saga.NoSuccess,
self._logger)
else:
# version is reported as: "version: x.y.z"
version = out#.strip().split()[1]
# add path and version to the command dictionary
self._commands[cmd] = {"path": path,
"version": version}
self._logger.info("Found PBS tools: %s" % self._commands)
#
# TODO: Get rid of this, as I dont think there is any justification that Cray's are special
#
# let's try to figure out if we're working on a Cray machine.
# naively, we assume that if we can find the 'aprun' command in the
# path that we're logged in to a Cray machine.
if self.is_cray == "":
ret, out, _ = self.shell.run_sync('which aprun')
if ret != 0:
self.is_cray = ""
else:
self._logger.info("Host '%s' seems to be a Cray machine." \
% self.rm.host)
self.is_cray = "unknowncray"
else:
self._logger.info("Assuming host is a Cray since 'craytype' is set to: %s" % self.is_cray)
#
# Get number of processes per node
#
if self.ppn:
self._logger.debug("Using user specified 'ppn': %d" % self.ppn)
return
# TODO: this is quite a hack. however, it *seems* to work quite
# well in practice.
if 'PBSPro_12' in self._commands['qstat']['version']:
ret, out, _ = self.shell.run_sync('unset GREP_OPTIONS; %s -a | grep -E "resources_available.ncpus"' % \
self._commands['pbsnodes']['path'])
else:
ret, out, _ = self.shell.run_sync('unset GREP_OPTIONS; %s -a | grep -E "(np|pcpu)[[:blank:]]*=" ' % \
self._commands['pbsnodes']['path'])
if ret != 0:
message = "Error running pbsnodes: %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
# this is black magic. we just assume that the highest occurrence
# of a specific np is the number of processors (cores) per compute
# node. this equals max "PPN" for job scripts
ppn_list = dict()
for line in out.split('\n'):
np = line.split(' = ')
if len(np) == 2:
np_str = np[1].strip()
if np_str == '<various>':
continue
else:
np = int(np_str)
if np in ppn_list:
ppn_list[np] += 1
else:
ppn_list[np] = 1
self.ppn = max(ppn_list, key=ppn_list.get)
self._logger.debug("Found the following 'ppn' configurations: %s. "
"Using %s as default ppn." % (ppn_list, self.ppn))
# ----------------------------------------------------------------
#
def _job_run(self, job_obj):
""" runs a job via qsub
"""
# get the job description
jd = job_obj.get_description()
# normalize working directory path
if jd.working_directory :
jd.working_directory = os.path.normpath (jd.working_directory)
# TODO: Why would one want this?
if self.queue and jd.queue:
self._logger.warning("Job service was instantiated explicitly with \
'queue=%s', but job description tries to a different queue: '%s'. Using '%s'." %
(self.queue, jd.queue, self.queue))
try:
# create a PBS job script from SAGA job description
script = _pbscript_generator(url=self.rm, logger=self._logger,
jd=jd, ppn=self.ppn, gres=self.gres,
pbs_version=self._commands['qstat']['version'],
is_cray=self.is_cray, queue=self.queue,
)
self._logger.info("Generated PBS script: %s" % script)
except Exception, ex:
log_error_and_raise(str(ex), saga.BadParameter, self._logger)
# try to create the working directory (if defined)
# WARNING: this assumes a shared filesystem between login node and
# compute nodes.
if jd.working_directory:
self._logger.info("Creating working directory %s" % jd.working_directory)
ret, out, _ = self.shell.run_sync("mkdir -p %s" % (jd.working_directory))
if ret != 0:
# something went wrong
message = "Couldn't create working directory - %s" % (out)
log_error_and_raise(message, saga.NoSuccess, self._logger)
# Now we want to execute the script. This process consists of two steps:
# (1) we create a temporary file with 'mktemp' and write the contents of
# the generated PBS script into it
# (2) we call 'qsub <tmpfile>' to submit the script to the queueing system
cmdline = """SCRIPTFILE=`mktemp -t SAGA-Python-PBSProJobScript.XXXXXX` && echo "%s" > $SCRIPTFILE && %s $SCRIPTFILE && rm -f $SCRIPTFILE""" % (script, self._commands['qsub']['path'])
ret, out, _ = self.shell.run_sync(cmdline)
if ret != 0:
# something went wrong
message = "Error running job via 'qsub': %s. Commandline was: %s" \
% (out, cmdline)
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
# parse the job id. qsub usually returns just the job id, but
# sometimes there are a couple of lines of warnings before.
# if that's the case, we log those as 'warnings'
lines = out.split('\n')
lines = filter(lambda lines: lines != '', lines) # remove empty
if len(lines) > 1:
self._logger.warning('qsub: %s' % ''.join(lines[:-2]))
# we asssume job id is in the last line
#print cmdline
#print out
job_id = "[%s]-[%s]" % (self.rm, lines[-1].strip().split('.')[0])
self._logger.info("Submitted PBS job with id: %s" % job_id)
state = saga.job.PENDING
# populate job info dict
self.jobs[job_id] = {'obj' : job_obj,
'job_id' : job_id,
'state' : state,
'exec_hosts' : None,
'returncode' : None,
'create_time' : None,
'start_time' : None,
'end_time' : None,
'gone' : False
}
self._logger.info ("assign job id %s / %s / %s to watch list (%s)" \
% (None, job_id, job_obj, self.jobs.keys()))
# set status to 'pending' and manually trigger callback
job_obj._attributes_i_set('state', state, job_obj._UP, True)
# return the job id
return job_id
# ----------------------------------------------------------------
#
def _retrieve_job(self, job_id):
""" see if we can get some info about a job that we don't
know anything about
"""
# rm, pid = self._adaptor.parse_id(job_id)
# # run the PBS 'qstat' command to get some infos about our job
# if 'PBSPro_1' in self._commands['qstat']['version']:
# qstat_flag = '-f'
# else:
# qstat_flag ='-f1'
#
# ret, out, _ = self.shell.run_sync("unset GREP_OPTIONS; %s %s %s | "\
# "grep -E -i '(job_state)|(exec_host)|(exit_status)|(ctime)|"\
# "(start_time)|(comp_time)|(stime)|(qtime)|(mtime)'" \
# % (self._commands['qstat']['path'], qstat_flag, pid))
# if ret != 0:
# message = "Couldn't reconnect to job '%s': %s" % (job_id, out)
# log_error_and_raise(message, saga.NoSuccess, self._logger)
# else:
# # the job seems to exist on the backend. let's gather some data
# job_info = {
# 'job_id': job_id,
# 'state': saga.job.UNKNOWN,
# 'exec_hosts': None,
# 'returncode': None,
# 'create_time': None,
# 'start_time': None,
# 'end_time': None,
# 'gone': False
# }
#
# job_info = self._parse_qstat(out, job_info)
#
# return job_info
# ----------------------------------------------------------------
#
def _job_get_info(self, job_id, reconnect):
""" Get job information attributes via qstat.
"""
# If we don't have the job in our dictionary, we don't want it,
# unless we are trying to reconnect.
if not reconnect and job_id not in self.jobs:
message = "Unknown job id: %s. Can't update state." % job_id
log_error_and_raise(message, saga.NoSuccess, self._logger)
if not reconnect:
# job_info contains the info collect when _job_get_info
# was called the last time
job_info = self.jobs[job_id]
# if the 'gone' flag is set, there's no need to query the job
# state again. it's gone forever
if job_info['gone'] is True:
return job_info
else:
# Create a template data structure
job_info = {
'job_id': job_id,
'state': saga.job.UNKNOWN,
'exec_hosts': None,
'returncode': None,
'create_time': None,
'start_time': None,
'end_time': None,
'gone': False
}
rm, pid = self._adaptor.parse_id(job_id)
# run the PBS 'qstat' command to get some infos about our job
# TODO: create a PBSPRO/TORQUE flag once
if 'PBSPro_1' in self._commands['qstat']['version']:
qstat_flag = '-fx'
else:
qstat_flag ='-f1'
ret, out, _ = self.shell.run_sync("unset GREP_OPTIONS; %s %s %s | "
"grep -E -i '(job_state)|(exec_host)|(exit_status)|"
"(ctime)|(start_time)|(stime)|(mtime)'"
% (self._commands['qstat']['path'], qstat_flag, pid))
if ret != 0:
if reconnect:
message = "Couldn't reconnect to job '%s': %s" % (job_id, out)
log_error_and_raise(message, saga.NoSuccess, self._logger)
if ("Unknown Job Id" in out):
# Let's see if the last known job state was running or pending. in
# that case, the job is gone now, which can either mean DONE,
# or FAILED. the only thing we can do is set it to 'DONE'
job_info['gone'] = True
# TODO: we can also set the end time?
self._logger.warning("Previously running job has disappeared. "
"This probably means that the backend doesn't store "
"information about finished jobs. Setting state to 'DONE'.")
if job_info['state'] in [saga.job.RUNNING, saga.job.PENDING]:
job_info['state'] = saga.job.DONE
else:
# TODO: This is an uneducated guess?
job_info['state'] = saga.job.FAILED
else:
# something went wrong
message = "Error retrieving job info via 'qstat': %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
# The job seems to exist on the backend. let's process some data.
# TODO: make the parsing "contextual", in the sense that it takes
# the state into account.
# parse the egrep result. this should look something like this:
# job_state = C
# exec_host = i72/0
# exit_status = 0
results = out.split('\n')
for line in results:
if len(line.split('=')) == 2:
key, val = line.split('=')
key = key.strip()
val = val.strip()
# The ubiquitous job state
if key in ['job_state']: # PBS Pro and TORQUE
job_info['state'] = _pbs_to_saga_jobstate(val)
# Hosts where the job ran
elif key in ['exec_host']: # PBS Pro and TORQUE
job_info['exec_hosts'] = val.split('+') # format i73/7+i73/6+...
# Exit code of the job
elif key in ['exit_status', # TORQUE
'Exit_status' # PBS Pro
]:
job_info['returncode'] = int(val)
# Time job got created in the queue
elif key in ['ctime']: # PBS Pro and TORQUE
job_info['create_time'] = val
# Time job started to run
elif key in ['start_time', # TORQUE
'stime' # PBS Pro
]:
job_info['start_time'] = val
# Time job ended.
#
# PBS Pro doesn't have an "end time" field.
# It has an "resources_used.walltime" though,
# which could be added up to the start time.
# We will not do that arithmetic now though.
#
# Alternatively, we can use mtime, as the latest
# modification time will generally also be the end time.
#
# TORQUE has an "comp_time" (completion? time) field,
# that is generally the same as mtime at the finish.
#
# For the time being we will use mtime as end time for
# both TORQUE and PBS Pro.
#
if key in ['mtime']: # PBS Pro and TORQUE
job_info['end_time'] = val
# return the updated job info
return job_info
def _parse_qstat(self, haystack, job_info):
# return the new job info dict
return job_info
# ----------------------------------------------------------------
#
def _job_get_state(self, job_id):
""" get the job's state
"""
return self.jobs[job_id]['state']
# ----------------------------------------------------------------
#
def _job_get_exit_code(self, job_id):
""" get the job's exit code
"""
ret = self.jobs[job_id]['returncode']
# FIXME: 'None' should cause an exception
if ret == None : return None
else : return int(ret)
# ----------------------------------------------------------------
#
def _job_get_execution_hosts(self, job_id):
""" get the job's exit code
"""
return self.jobs[job_id]['exec_hosts']
# ----------------------------------------------------------------
#
def _job_get_create_time(self, job_id):
""" get the job's creation time
"""
return self.jobs[job_id]['create_time']
# ----------------------------------------------------------------
#
def _job_get_start_time(self, job_id):
""" get the job's start time
"""
return self.jobs[job_id]['start_time']
# ----------------------------------------------------------------
#
def _job_get_end_time(self, job_id):
""" get the job's end time
"""
return self.jobs[job_id]['end_time']
# ----------------------------------------------------------------
#
def _job_cancel(self, job_id):
""" cancel the job via 'qdel'
"""
rm, pid = self._adaptor.parse_id(job_id)
ret, out, _ = self.shell.run_sync("%s %s\n" \
% (self._commands['qdel']['path'], pid))
if ret != 0:
message = "Error canceling job via 'qdel': %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
# assume the job was succesfully canceled
self.jobs[job_id]['state'] = saga.job.CANCELED
# ----------------------------------------------------------------
#
def _job_wait(self, job_id, timeout):
""" wait for the job to finish or fail
"""
time_start = time.time()
time_now = time_start
rm, pid = self._adaptor.parse_id(job_id)
while True:
state = self.jobs[job_id]['state'] # this gets updated in the bg.
if state == saga.job.DONE or \
state == saga.job.FAILED or \
state == saga.job.CANCELED:
return True
# avoid busy poll
time.sleep(SYNC_WAIT_UPDATE_INTERVAL)
# check if we hit timeout
if timeout >= 0:
time_now = time.time()
if time_now - time_start > timeout:
return False
# ----------------------------------------------------------------
#
@SYNC_CALL
def create_job(self, jd):
""" implements saga.adaptors.cpi.job.Service.get_url()
"""
# this dict is passed on to the job adaptor class -- use it to pass any
# state information you need there.
adaptor_state = {"job_service": self,
"job_description": jd,
"job_schema": self.rm.schema,
"reconnect": False
}
# create and return a new job object
return saga.job.Job(_adaptor=self._adaptor,
_adaptor_state=adaptor_state)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_job(self, job_id):
""" Implements saga.adaptors.cpi.job.Service.get_job()
Re-create job instance from a job-id.
"""
# If we already have the job info, we just pass the current info.
if job_id in self.jobs :
return self.jobs[job_id]['obj']
# Try to get some initial information about this job (again)
job_info = self._job_get_info(job_id, reconnect=True)
# this dict is passed on to the job adaptor class -- use it to pass any
# state information you need there.
adaptor_state = {"job_service": self,
# TODO: fill job description
"job_description": saga.job.Description(),
"job_schema": self.rm.schema,
"reconnect": True,
"reconnect_jobid": job_id
}
job_obj = saga.job.Job(_adaptor=self._adaptor,
_adaptor_state=adaptor_state)
# throw it into our job dictionary.
job_info['obj'] = job_obj
self.jobs[job_id] = job_info
return job_obj
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_url(self):
""" implements saga.adaptors.cpi.job.Service.get_url()
"""
return self.rm
# ----------------------------------------------------------------
#
@SYNC_CALL
def list(self):
""" implements saga.adaptors.cpi.job.Service.list()
"""
ids = []
ret, out, _ = self.shell.run_sync("unset GREP_OPTIONS; %s | grep `whoami`" %
self._commands['qstat']['path'])
if ret != 0 and len(out) > 0:
message = "failed to list jobs via 'qstat': %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
elif ret != 0 and len(out) == 0:
# qstat | grep `` exits with 1 if the list is empty
pass
else:
for line in out.split("\n"):
# output looks like this:
# 112059.svc.uc.futuregrid testjob oweidner 0 Q batch
# 112061.svc.uc.futuregrid testjob oweidner 0 Q batch
if len(line.split()) > 1:
job_id = "[%s]-[%s]" % (self.rm, line.split()[0].split('.')[0])
ids.append(str(job_id))
return ids
# # ----------------------------------------------------------------
# #
# def container_run (self, jobs) :
# self._logger.debug ("container run: %s" % str(jobs))
# # TODO: this is not optimized yet
# for job in jobs:
# job.run ()
#
#
# # ----------------------------------------------------------------
# #
# def container_wait (self, jobs, mode, timeout) :
# self._logger.debug ("container wait: %s" % str(jobs))
# # TODO: this is not optimized yet
# for job in jobs:
# job.wait ()
#
#
# # ----------------------------------------------------------------
# #
# def container_cancel (self, jobs) :
# self._logger.debug ("container cancel: %s" % str(jobs))
# raise saga.NoSuccess ("Not Implemented");
###############################################################################
#
class PBSProJob (saga.adaptors.cpi.job.Job):
""" implements saga.adaptors.cpi.job.Job
"""
def __init__(self, api, adaptor):
# initialize parent class
_cpi_base = super(PBSProJob, self)
_cpi_base.__init__(api, adaptor)
def _get_impl(self):
return self
@SYNC_CALL
def init_instance(self, job_info):
""" implements saga.adaptors.cpi.job.Job.init_instance()
"""
# init_instance is called for every new saga.job.Job object
# that is created
self.jd = job_info["job_description"]
self.js = job_info["job_service"]
if job_info['reconnect'] is True:
self._id = job_info['reconnect_jobid']
self._started = True
else:
self._id = None
self._started = False
return self.get_api()
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_state(self):
""" implements saga.adaptors.cpi.job.Job.get_state()
"""
if self._started is False:
return saga.job.NEW
return self.js._job_get_state(job_id=self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def wait(self, timeout):
""" implements saga.adaptors.cpi.job.Job.wait()
"""
if self._started is False:
log_error_and_raise("Can't wait for job that hasn't been started",
saga.IncorrectState, self._logger)
else:
self.js._job_wait(job_id=self._id, timeout=timeout)
# ----------------------------------------------------------------
#
@SYNC_CALL
def cancel(self, timeout):
""" implements saga.adaptors.cpi.job.Job.cancel()
"""
if self._started is False:
log_error_and_raise("Can't wait for job that hasn't been started",
saga.IncorrectState, self._logger)
else:
self.js._job_cancel(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def run(self):
""" implements saga.adaptors.cpi.job.Job.run()
"""
self._id = self.js._job_run(self._api())
self._started = True
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_service_url(self):
""" implements saga.adaptors.cpi.job.Job.get_service_url()
"""
return self.js.rm
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_id(self):
""" implements saga.adaptors.cpi.job.Job.get_id()
"""
return self._id
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_exit_code(self):
""" implements saga.adaptors.cpi.job.Job.get_exit_code()
"""
if self._started is False:
return None
else:
return self.js._job_get_exit_code(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_created(self):
""" implements saga.adaptors.cpi.job.Job.get_created()
"""
if self._started is False:
return None
else:
return self.js._job_get_create_time(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_started(self):
""" implements saga.adaptors.cpi.job.Job.get_started()
"""
if self._started is False:
return None
else:
return self.js._job_get_start_time(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_finished(self):
""" implements saga.adaptors.cpi.job.Job.get_finished()
"""
if self._started is False:
return None
else:
return self.js._job_get_end_time(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_execution_hosts(self):
""" implements saga.adaptors.cpi.job.Job.get_execution_hosts()
"""
if self._started is False:
return None
else:
return self.js._job_get_execution_hosts(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_description(self):
""" implements saga.adaptors.cpi.job.Job.get_execution_hosts()
"""
return self.jd
|
mehdisadeghi/saga-python
|
src/saga/adaptors/pbspro/pbsprojob.py
|
Python
|
mit
| 47,635
|
[
"Jaguar"
] |
bbd2e85978cf89664a544ade5d52c6bb0605699455ecbf41fea0f3aa165a144a
|
#!/usr/bin/env python
from __future__ import print_function, division
import IMP
import IMP.core
import IMP.isd
import IMP.isd.gmm_tools
import IMP.algebra
import IMP.test
import numpy as np
from math import cos, sin, pi, sqrt, exp, log
from copy import deepcopy
import itertools
def create_test_points(mu, radii):
testers = [[mu[0], mu[1], mu[2]]]
for i in range(3):
t = mu[:]
t[i] += radii[i] + 1 # kluge to ensure good order at the end
testers.append(t)
t = mu[:]
t[i] -= radii[i]
testers.append(t)
return testers
def score_gaussian_overlap(p0, p1):
g0 = IMP.core.Gaussian(p0).get_gaussian()
g1 = IMP.core.Gaussian(p1).get_gaussian()
mass0 = IMP.atom.Mass(p0).get_mass()
mass1 = IMP.atom.Mass(p1).get_mass()
c0 = np.reshape(np.array(IMP.algebra.get_covariance(g0)), (3, 3))
c1 = np.reshape(np.array(IMP.algebra.get_covariance(g1)), (3, 3))
u0 = np.array(list(g0.get_center()))
u1 = np.array(list(g1.get_center()))
c = c0 + c1
u = u1 - u0
det = np.linalg.det(c)
inv = np.linalg.inv(c)
score = mass0 * mass1 * 1 / \
sqrt((2.0 * pi) ** 3 * det) * \
exp(-0.5 * np.dot(u.transpose(), np.dot(inv, u)))
return score
def gem_score(model_ps, density_ps,slope=0.0):
mm_score = 0.0
md_score = 0.0
dd_score = 0.0
nm = len(model_ps)
nd = len(density_ps)
slope_score=0.0
for nd1 in range(len(density_ps)):
for nd2 in range(len(density_ps)):
dd_score += score_gaussian_overlap(density_ps[nd1], density_ps[nd2])
for nm1 in range(len(model_ps)):
for nm2 in range(len(model_ps)):
mm_score += score_gaussian_overlap(model_ps[nm1], model_ps[nm2])
for nd in range(len(density_ps)):
md_score += score_gaussian_overlap(model_ps[nm1], density_ps[nd])
dist = IMP.algebra.get_distance(IMP.core.XYZ(model_ps[nm1]).get_coordinates(),
IMP.core.XYZ(density_ps[nd]).get_coordinates())
slope_score+=dist*slope
cc = 2*md_score/(mm_score+dd_score)
dist = -log(cc) + slope_score
return cc, dist
def create_random_gaussians(m,randstate,num,spherical,rad_scale=1.0):
ret=[]
for n in range(num):
if spherical:
std=[1,1,1]
else:
std=randstate.random_sample(3,) * 5
center=randstate.random_sample(3,) * 5 - [2.5,2.5,2.5]
var=[s**2 for s in std]
rot=IMP.algebra.get_random_rotation_3d()
trans=IMP.algebra.Transformation3D(rot,center)
shape=IMP.algebra.Gaussian3D(IMP.algebra.ReferenceFrame3D(trans),var)
p=IMP.Particle(m)
IMP.core.Gaussian.setup_particle(p,shape)
IMP.atom.Mass.setup_particle(p,1.0/num)
IMP.core.XYZR.setup_particle(p)
IMP.core.XYZR(p).set_radius(max(std)*rad_scale)
ret.append(p)
return ret
def shuffle_particles(ps,t=2.0,r=0.01):
for np,p in enumerate(ps):
trans=IMP.algebra.get_random_local_transformation(IMP.algebra.Vector3D(0,0,0),
t,r)
d=IMP.core.RigidBody(p)
IMP.core.transform(d,trans)
def reset_coords(ps,orig_coords):
for p,c in zip(ps,orig_coords):
IMP.core.XYZ(p).set_coordinates(c)
class Tests(IMP.test.TestCase):
def setUp(self):
IMP.test.TestCase.setUp(self)
# setup problem
ndensity=4
nmodel=10
rs=np.random.RandomState()
self.m = IMP.Model()
itrans = IMP.algebra.get_identity_transformation_3d()
self.density_ps=create_random_gaussians(self.m,rs,ndensity,spherical=False)
self.model_ps=create_random_gaussians(self.m,rs,nmodel,spherical=False)
psigma=IMP.Particle(self.m)
si = IMP.isd.Scale.setup_particle(psigma,1.0)
slope=0.0
model_cutoff_dist=1e8
density_cutoff_dist=1e8
update_model=True
self.gem=IMP.isd.GaussianEMRestraint(self.m,self.model_ps,
self.density_ps,psigma,
model_cutoff_dist,density_cutoff_dist,
slope,
update_model,False)
self.sf = IMP.core.RestraintsScoringFunction([self.gem])
self.orig_coords=[IMP.core.XYZ(p).get_coordinates() for p in self.model_ps]
def test_gem_score(self):
"""test accuracy of GMM score"""
for nt in range(10):
shuffle_particles(self.model_ps)
score = self.sf.evaluate(False)
cc = self.gem.get_cross_correlation_coefficient()
pycc, pyscore = gem_score(self.model_ps, self.density_ps)
self.assertAlmostEqual(score, pyscore, delta=0.02)
self.assertAlmostEqual(cc, pycc, delta=0.02)
def test_gem_score_with_slope(self):
"""test accuracy of GMM score using slope"""
reset_coords(self.model_ps,self.orig_coords)
slope=0.1
self.gem.set_slope(slope)
for nt in range(10):
shuffle_particles(self.model_ps)
score = self.sf.evaluate(False)
cc = self.gem.get_cross_correlation_coefficient()
pycc, pyscore = gem_score(self.model_ps, self.density_ps,
slope=slope)
self.assertAlmostEqual(score, pyscore, delta=0.02)
self.assertAlmostEqual(cc, pycc, delta=0.02)
self.gem.set_slope(0.0)
def test_gem_derivatives(self):
"""test accuracy of GMM derivatives"""
reset_coords(self.model_ps,self.orig_coords)
for i in range(10):
shuffle_particles(self.model_ps)
self.gem.evaluate(True)
for np, p in enumerate(self.model_ps):
d = IMP.core.XYZ(p)
#print 'n', IMP.test.xyz_numerical_derivatives(self.m, d, 0.01), 'a', d.get_derivatives()
self.assertXYZDerivativesInTolerance(self.sf, d, tolerance = 1e-2,percentage=10.0)
def test_gem_derivatives_with_slope(self):
"""test accuracy of GMM derivatives"""
self.gem.set_slope(0.1)
reset_coords(self.model_ps,self.orig_coords)
for i in range(10):
shuffle_particles(self.model_ps)
self.gem.evaluate(True)
for np, p in enumerate(self.model_ps):
d = IMP.core.XYZ(p)
#print 'n', IMP.test.xyz_numerical_derivatives(self.m, d, 0.01), 'a', d.get_derivatives()
self.assertXYZDerivativesInTolerance(self.sf, d, tolerance = 1e-2,percentage=10.0)
self.gem.set_slope(0.0)
def test_rasterize(self):
"""Test making a map from a GMM"""
# Suppress warnings (we don't use the objects set up above)
self.sf.set_was_used(True)
self.gem.set_was_used(True)
dmap = IMP.isd.gmm_tools.gmm2map(self.model_ps,1.0,fast=False)
dmap.set_was_used(True)
class LocalTests(IMP.test.TestCase):
def test_local_score(self):
ndensity=10
nmodel=10
rs=np.random.RandomState()
self.m = IMP.Model()
itrans = IMP.algebra.get_identity_transformation_3d()
self.density_ps=create_random_gaussians(self.m,rs,ndensity,spherical=False)
self.model_ps=create_random_gaussians(self.m,rs,nmodel,spherical=False)
psigma=IMP.Particle(self.m)
si = IMP.isd.Scale.setup_particle(psigma,1.0)
slope=0.0
model_cutoff_dist = 1e8
density_cutoff_dist = 0.0
update_model=True
backbone_slope=False
local=True
self.gem=IMP.isd.GaussianEMRestraint(self.m,self.model_ps,
self.density_ps,psigma,
model_cutoff_dist,density_cutoff_dist,
slope,
update_model,backbone_slope,local)
self.sf = IMP.core.RestraintsScoringFunction([self.gem])
self.orig_coords=[IMP.core.XYZ(p).get_coordinates() for p in self.model_ps]
for nt in range(10):
shuffle_particles(self.model_ps,5.0,1.5)
score = self.sf.evaluate(False)
pycc, pyscore = gem_score(self.model_ps, self.density_ps)
print(score,pycc,pyscore)
if __name__ == '__main__':
IMP.test.main()
|
shanot/imp
|
modules/isd/test/medium_test_gaussian_em_restraint.py
|
Python
|
gpl-3.0
| 8,453
|
[
"Gaussian"
] |
1d3eb54bb37acf3c75e455df29f309fa47d6b578cc3631a87890f48c1959d87d
|
import pymol
from pymol import cmd;
import sys
pymol.finish_launching()
import time ; time.sleep(1);
cmd.load('2XPI.pdb')
cmd.save('2XPI.fasta','chain A')
cmd.quit()
|
smoitra87/mrfs
|
data/multicols/PF12569/get_fasta_from_pdb.py
|
Python
|
apache-2.0
| 166
|
[
"PyMOL"
] |
c30483d7493c52cf792e95209370eb0b3df590a174ff34b34f68da8cb0c76fba
|
import nest
import pong
import numpy as np
import pickle
import time
import os
STDP_AMPLITUDE = 5.0 # arb. unit
STDP_TAU = 20.0 # ms
ONLY_CAUSAL = True
EPSC = 90.0
EPSC_MAX = 200.0
EPSC_BG = 200.0
BG_RATE = 0.5
SYN_DICT = {'weight': EPSC}
MEAN_RUNS = 5.0 # reward averaged over this no. of runs
NEURON_DICT = {"tau_m": 10., "V_th": -68.0}
REWARD_OFFSET = 0
class Network:
def get_weights(self, neuron):
conns = nest.GetConnections(self.input_neurons, target=[self.motor_neurons[neuron]])
conn_vals = nest.GetStatus(conns, ["weight"])
conn_vals = np.array(conn_vals)
return conn_vals
def get_weights_flat(self):
conns = nest.GetConnections(self.input_neurons)
weights = nest.GetStatus(conns, "weight")
#print weights
return weights
def set_weights_flat(self, weights):
conns = nest.GetConnections(self.input_neurons)
nest.SetStatus(conns, [{"weight": w} for w in weights])
def set_weights(self, weights, neuron):
#print weights, np.shape(weights)
conns = nest.GetConnections(self.input_neurons, target=[self.motor_neurons[neuron]])
for conn, wgt in zip(conns, weights):
nest.SetStatus([conn], {"weight": float(wgt)})
def get_rates(self):
events = np.array(nest.GetStatus(self.spikedetector, ["n_events"]))
events = [x[0] for x in events]
#print events
return events
def get_spiketrains(self):
"""Extract spikes from spikedetector and map neuron GIDs to neuron number
inside population nrnpop"""
events = np.array(nest.GetStatus(self.spikedetector, ["events"]))
out = [[] for nrn in range(self.no_neurons)]
for neuron, neuron_events in enumerate(events):
for time in neuron_events[0]['times']:
out[neuron].append(time)
return out
def calculate_stdp(self, pre_spikes, post_spikes, only_causal=True):
facilitation = 0
depression = 0
positions = np.searchsorted(pre_spikes, post_spikes)
for spike, position in zip(post_spikes, positions):
if position > 0:
before_spike = pre_spikes[position-1]
facilitation += STDP_AMPLITUDE * np.exp(-(spike-before_spike)/STDP_TAU)
if position < len(pre_spikes):
after_spike = pre_spikes[position]
depression += STDP_AMPLITUDE * np.exp(-(after_spike-spike)/STDP_TAU)
if only_causal:
return facilitation
else:
return facilitation - depression
def set_input_spiketrain(self, target_cell, type="uniform"):
'''
Set spike train encoding position of ball along y-axis.
Spike train will be exclusively sent via the input neuron corresponding to
the target cell. type can be either "poisson" or "uniform" and spikes
will be distributed accordingly.
'''
no_spikes = int(np.floor(self.poll_time / 10.0)) # Fixed for now
if type=="uniform":
spacing = self.poll_time / no_spikes
spikes = [np.round(1. + x*spacing,1) for x in range(no_spikes)]
#print spikes
if type=="poisson":
pass # TODO
for input_neuron in range(self.no_neurons): # Reset inputs
nest.SetStatus([self.input_generator[input_neuron]], {'spike_times': []})
nest.SetStatus([self.input_generator[target_cell]],
{'spike_times': spikes})
def run_simulation(self):
self.weights = []
for neuron in range(self.no_neurons):
self.weights.append(self.get_weights(neuron))
nest.Simulate(self.poll_time)
#potentials = nest.GetStatus(self.voltmeter, "events")[1]["V_m"]
def apply_reward(self, reward):
self.correlation_array = []
for connection in nest.GetConnections(self.input_neurons):
# iterate all connections originating from input neurons
# connection[0]: source, connection[1]: target
input_neuron = connection[0]
motor_neuron = connection[1]
input_gen = nest.GetConnections(self.input_generator, target=[input_neuron])[0][0]
#print input_gen
pre_spikes = np.array(nest.GetStatus([input_gen], "spike_times"))[0]
post_detector = nest.GetConnections([motor_neuron], target=self.spikedetector)[0][1]
#print post_detector
post_events = nest.GetStatus([post_detector], "events")
post_spikes = []
for time in post_events[0]["times"]:
post_spikes.append(time)
#print pre_spikes, post_spikes
correlation = self.calculate_stdp(pre_spikes, post_spikes, only_causal=ONLY_CAUSAL)
self.correlation_array.append(correlation)
old_weight = np.array(nest.GetStatus([connection], "weight"))[0]
new_weight = old_weight + correlation * reward
if new_weight > EPSC_MAX:
new_weight = EPSC_MAX
#if new_weight != 90.0:
# print new_weight
#print correlation, old_weight, new_weight
nest.SetStatus([connection], {"weight": float(new_weight)})
def reset_rng(self):
old_seed = nest.GetStatus([0])[0]['rng_seeds'][0]
nest.SetStatus([0], {'rng_seeds': (old_seed + 1,)})
#nest.SetStatus(self.spikedetector, {"n_events": 0})
def reset_network(self, initial=False):
if not initial:
weights = self.get_weights_flat()
#print weights
old_seed = nest.GetStatus([0])[0]['rng_seeds'][0]
nest.ResetKernel()
nest.SetStatus([0], {'rng_seeds': (old_seed + 1,)})
self.input_neurons = nest.Create("parrot_neuron", self.no_neurons)
self.input_generator = nest.Create("spike_generator", self.no_neurons)
self.motor_neurons = nest.Create("iaf_neuron", self.no_neurons, params=NEURON_DICT)
self.spikedetector = nest.Create("spike_detector", self.no_neurons)
self.voltmeter = nest.Create("voltmeter", self.no_neurons)
self.background_generator = nest.Create("poisson_generator", self.no_neurons)
self.background_neurons = nest.Create("parrot_neuron", self.no_neurons)
nest.SetStatus(self.background_generator, {"rate": BG_RATE})
nest.SetStatus(self.voltmeter, {"withgid": True, "withtime": True})
nest.Connect(self.motor_neurons, self.spikedetector, {'rule':'one_to_one'})
nest.Connect(self.input_neurons, self.motor_neurons, {"rule": 'all_to_all'}, SYN_DICT)
nest.Connect(self.input_generator, self.input_neurons, {'rule':'one_to_one'})
nest.Connect(self.voltmeter, self.motor_neurons, {'rule':'one_to_one'})
nest.Connect(self.background_generator, self.background_neurons, {'rule':'one_to_one'})
nest.Connect(self.background_neurons, self.motor_neurons, { 'rule':'one_to_one'}, {"weight": EPSC_BG})
nest.set_verbosity("M_WARNING")
if not initial:
self.set_weights_flat(weights)
def __init__(self, poll_time=200, no_neurons=32):
self.no_neurons = no_neurons
self.poll_time = poll_time
self.reset_network(initial=True)
self.correlation_array = []
self.weights = [self.get_weights(x) for x in range(self.no_neurons)]
class AIPong:
'''
'''
def poll_network(self):
'''
Returns grid cell network wants to move to.
Find this cell by finding the winning (highest rate) motor neuron.
'''
if self.debug:
print "Running simulation..."
self.network.run_simulation()
rates = self.network.get_rates()
if self.debug:
print "Got rates: ", rates
winning_neuron = int(np.argmax(rates))
self.target_cell = winning_neuron
def adjust_puck_movement(self):
if self.game.right_puck.get_cell()[1] < self.target_cell:
self.game.right_puck.direction = pong.MOVE_UP
if self.game.right_puck.get_cell()[1] == self.target_cell:
self.game.right_puck.direction = pong.DONT_MOVE
if self.game.right_puck.get_cell()[1] > self.target_cell:
self.game.right_puck.direction = pong.MOVE_DOWN
def reward_network_by_win(self):
'''
Reward network based on winning/losing an entire round.
'''
if self.game.result == pong.LEFT_WIN:
self.network.apply_reward(-1)
if self.game.result == pong.RIGHT_WIN:
self.network.apply_reward(+1)
def reward_network_by_move(self):
'''
Reward network based on whether the correct cell was targeted.
'''
index = self.ball_cell
def calc_reward(bare_reward):
self.reward = bare_reward + REWARD_OFFSET
self.mean_reward[index] = self.mean_reward[index] + (self.reward-self.mean_reward[index])/MEAN_RUNS
self.success = self.reward - self.mean_reward[index]
self.network.apply_reward(self.success)
if self.target_cell == self.ball_cell:
calc_reward(+1)
elif self.target_cell == self.ball_cell+1 or self.target_cell == self.ball_cell-1:
calc_reward(+0.66)
else:
calc_reward(0)
if self.debug:
print "Applying reward=%.3f, mean reward=%.3f, success=%.3f" % (self.reward, self.mean_reward[index], self.success)
def reset_network(self):
nest.ResetKernel()
self.network = Network()
def get_parameters(self):
parameter_dict = {"EPSC": EPSC, "EPSC_MAX": EPSC_MAX, "EPSC_BG": EPSC_BG, \
"STDP_AMPLITUDE": STDP_AMPLITUDE, "STDP_TAU": STDP_TAU, "SYN_DICT": SYN_DICT, \
"NEURON_DICT": NEURON_DICT, "MEAN_RUNS": MEAN_RUNS, "ONLY_CAUSAL": ONLY_CAUSAL,
"REWARD_OFFSET": REWARD_OFFSET, "BG_RATE": BG_RATE}
return parameter_dict
def run_games(self, save=False, save_filename="rewards.pkl", max_runs=np.inf):
run = 0
expdir = str(time.time())
parameters = self.get_parameters()
if save:
os.mkdir(expdir)
file = open("%s/parameters.pkl" % expdir, "w")
pickle.dump(parameters, file)
file.close()
self.correlations = []
self.weight_history = []
self.rewards = []
self.mean_reward = np.array([2./32 for _ in range(self.network.no_neurons)])
for game in range(self.no_games):
self.game = pong.GameOfPong()
self.game.start()
#i = 0
#while i < 100:
#i+=1
while self.game.dead == False and run < max_runs:
if save:
file = open("%s/%s_%d" %(expdir, save_filename, run), "w")
pickle.dump((self.mean_reward, self.network.correlation_array, self.network.weights), file)
file.close()
self.ball_cell = self.game.ball.get_cell()[1]
if self.debug:
print "Run #%d, Ball in cell %d" % (run, self.ball_cell)
self.network.set_input_spiketrain(self.ball_cell)
#self.game.pause = True
self.poll_network()
self.game.pause = False
#print self.network.get_spiketrains()
if self.debug:
print "Network wants to go to cell %d" % self.target_cell
self.adjust_puck_movement()
if self.reward_every_move:
self.reward_network_by_move()
#self.correlations.append(self.network.correlation_array)
#self.weight_history.append(self.network.weights)
#self.rewards.append(self.mean_reward)
self.network.reset_network()
run += 1
if self.debug:
"Game %d ended with %d" % (game, self.game.result)
self.reward_network_by_win()
#exit()
def __init__(self, no_games=100, debug=False, reward_every_move=True):
self.game = pong.GameOfPong()
self.network = Network()
self.debug = debug
self.reward_every_move = reward_every_move
self.no_games = no_games
if __name__ == "__main__":
aipong = AIPong(debug=True)
aipong.run_games(save=True, max_runs=20000, save_filename="data.pkl")
|
yungwundi/pang
|
pang.py
|
Python
|
gpl-3.0
| 12,926
|
[
"NEURON"
] |
f873a04928e8fa6d4e42596f573c060a95105b674271285e2bbfc4f979cfb830
|
import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float64)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
def test_naive_bayes_scale_invariance():
# Scaling the data should not change the prediction results
iris = load_iris()
X, y = iris.data, iris.target
labels = [GaussianNB().fit(f * X, y).predict(f * X)
for f in [1E-10, 1, 1E10]]
assert_array_equal(labels[0], labels[1])
assert_array_equal(labels[1], labels[2])
|
kashif/scikit-learn
|
sklearn/tests/test_naive_bayes.py
|
Python
|
bsd-3-clause
| 17,897
|
[
"Gaussian"
] |
0af47f1a5293872d166557bc760fa67378f4e0f55709b9933207852391757ad2
|
#!/usr/bin/env python
#
# @file GeneralFunctions.py
# @brief class to create general functions
# @author Frank Bergmann
# @author Sarah Keating
#
# <!--------------------------------------------------------------------------
#
# Copyright (c) 2013-2018 by the California Institute of Technology
# (California, USA), the European Bioinformatics Institute (EMBL-EBI, UK)
# and the University of Heidelberg (Germany), with support from the National
# Institutes of Health (USA) under grant R01GM070923. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Neither the name of the California Institute of Technology (Caltech), nor
# of the European Bioinformatics Institute (EMBL-EBI), nor of the University
# of Heidelberg, nor the names of any contributors, may be used to endorse
# or promote products derived from this software without specific prior
# written permission.
# ------------------------------------------------------------------------ -->
from ...util import strFunctions, global_variables, query
class GeneralFunctions():
"""Class for general functions"""
def __init__(self, language, is_cpp_api, is_list_of, class_object, lv_info=[]):
self.language = language
self.cap_language = language.upper()
self.package = class_object['package']
self.class_name = class_object['name']
self.has_std_base = class_object['has_std_base']
self.base_class = class_object['baseClass']
self.is_cpp_api = is_cpp_api
self.is_list_of = is_list_of
self.is_plugin = False
if 'is_plugin' in class_object:
self.is_plugin = class_object['is_plugin']
self.is_doc_plugin = False
if 'is_doc_plugin' in class_object:
self.is_doc_plugin = class_object['is_doc_plugin']
self.ext_class = ''
if self.is_plugin:
self.ext_class = class_object['sbase']
if is_list_of:
self.child_name = class_object['lo_child']
else:
self.child_name = ''
if is_cpp_api:
self.object_name = self.class_name
self.object_child_name = self.child_name
else:
if is_list_of:
self.object_name = 'ListOf_t'
else:
self.object_name = self.class_name + '_t'
self.object_child_name = self.child_name + '_t'
self.element_name = ''
self.override_name = False
if 'elementName' in class_object and not is_list_of:
self.element_name = class_object['elementName']
if self.element_name == '':
self.override_name = False
else:
self.override_name = not \
strFunctions.compare_no_case(self.element_name,
self.class_name)
if not global_variables.is_package:
self.override_name = True
if is_list_of:
self.element_name = \
strFunctions.lower_list_of_name_no_prefix(class_object['lo_child'])
else:
self.element_name = class_object['elementName']
self.typecode = class_object['typecode']
self.attributes = class_object['class_attributes']
self.sid_refs = class_object['sid_refs']
self.unit_sid_refs = class_object['unit_sid_refs']
self.child_lo_elements = class_object['child_lo_elements']
self.child_elements = class_object['child_elements']
self.has_math = class_object['has_math']
self.has_array = class_object['has_array']
self.overwrites_children = class_object['overwrites_children']
# we do overwrite if we have concrete
if not self.overwrites_children and 'concretes' in class_object:
if len(class_object['concretes']) > 0:
self.overwrites_children = True
self.has_children = class_object['has_children']
self.has_only_math = class_object['has_only_math']
self.num_non_std_children = class_object['num_non_std_children']
self.num_children = class_object['num_children']
self.std_base = class_object['std_base']
self.required = 'false'
if 'is_doc_plugin' in class_object:
if class_object['reqd']:
self.required = 'true'
self.version_attributes = []
if 'num_versions' in class_object and class_object['num_versions'] > 1:
self.has_multiple_versions = True
for i in range(0, class_object['num_versions']):
self.version_attributes.append(
query.get_version_attributes(class_object['attribs'], i))
else:
self.has_multiple_versions = False
self.lv_info = lv_info
self.document = False
if 'document' in class_object:
self.document = class_object['document']
# useful variables
if not self.is_cpp_api and self.is_list_of:
self.struct_name = self.object_child_name
else:
self.struct_name = self.object_name
self.abbrev_parent = strFunctions.abbrev_name(self.object_name)
if self.is_cpp_api is False:
self.true = '@c 1 (true)'
self.false = '@c 0 (false)'
else:
self.true = '@c true'
self.false = '@c false'
# status
if self.is_cpp_api:
if self.is_list_of:
self.status = 'cpp_list'
else:
self.status = 'cpp_not_list'
else:
if self.is_list_of:
self.status = 'c_list'
else:
self.status = 'c_not_list'
########################################################################
# Functions for writing renamesidref
# function to write rename_sid_ref
def write_rename_sidrefs(self):
# only write is not list of and has sidrefs
if not self.status == 'cpp_not_list':
return
elif len(self.sid_refs) == 0 and len(self.unit_sid_refs) == 0\
and not self.has_math:
return
# create comment parts
title_line = '@copydoc doc_renamesidref_common'
params = []
return_lines = []
additional = []
# create the function declaration
function = 'renameSIdRefs'
return_type = 'void'
arguments = ['const std::string& oldid', 'const std::string& newid']
# create the function implementation
code = []
for i in range(0, len(self.sid_refs)):
ref = self.sid_refs[i]
implementation = ['isSet{0}() && {1} == '
'oldid'.format(ref['capAttName'],
ref['memberName']),
'set{0}(newid)'.format(ref['capAttName'])]
code.append(dict({'code_type': 'if', 'code': implementation}))
for i in range(0, len(self.unit_sid_refs)):
ref = self.unit_sid_refs[i]
implementation = ['isSet{0}() && {1} == '
'oldid'.format(ref['capAttName'],
ref['memberName']),
'set{0}(newid)'.format(ref['capAttName'])]
code.append(dict({'code_type': 'if', 'code': implementation}))
if self.has_math:
implementation = ['isSetMath()',
'mMath->renameSIdRefs(oldid, newid)']
code.append(self.create_code_block('if', implementation))
# return the parts
return dict({'title_line': title_line,
'params': params,
'return_lines': return_lines,
'additional': additional,
'function': function,
'return_type': return_type,
'arguments': arguments,
'constant': False,
'virtual': True,
'object_name': self.struct_name,
'implementation': code})
########################################################################
# Functions for writing get element/typecode functionss
# function to write getElement
def write_get_element_name(self):
if not self.is_cpp_api:
return
# create comment parts
if self.override_name:
name = self.element_name
else:
name = strFunctions.lower_first(self.object_name)
title_line = 'Returns the XML element name of this {0} object.'\
.format(self.object_name,)
params = ['For {0}, the XML element name is always @c '
'\"{1}\".'.format(self.object_name, name)]
return_lines = ['@return the name of this element, i.e. @c \"{0}\"'
'.'.format(name)]
additional = []
# create the function declaration
arguments = []
function = 'getElementName'
return_type = 'const std::string&'
# create the function implementation
if self.overwrites_children:
implementation = ['return mElementName']
else:
implementation = ['static const string name = \"{0}\"'.format(name),
'return name']
code = [dict({'code_type': 'line', 'code': implementation})]
# return the parts
return dict({'title_line': title_line,
'params': params,
'return_lines': return_lines,
'additional': additional,
'function': function,
'return_type': return_type,
'arguments': arguments,
'constant': True,
'virtual': True,
'object_name': self.struct_name,
'implementation': code})
# function to write getTypeCode
def write_get_typecode(self):
if not self.is_cpp_api:
return
# create comment
lib = global_variables.library_name;
if self.cap_language == 'SBML' or self.cap_language == 'SEDML':
lib = 'lib{0}'.format(self.cap_language)
title_line = 'Returns the {0} type code for this {1} object.'\
.format(lib, self.object_name)
params = ['@copydetails doc_what_are_typecodes']
return_lines = ['@return the {0} type code for this '
'object:'.format(self.cap_language)]
additional = []
if global_variables.is_package:
if self.is_list_of:
line = '@{0}constant{2}{1}_LIST_OF, ' \
'{1}TypeCode_t{3}.'.format(self.language, self.cap_language,
'{', '}')
else:
line = '@{0}constant{1}{2}, {3}{4}' \
'TypeCode_t{5}.'.format(self.language, '{', self.typecode,
self.cap_language, self.package, '}')
else:
if self.is_list_of:
line = '@{0}constant{2}{1}_LIST_OF, ' \
'{4}TypeCode_t{3}.'.format(self.language, self.cap_language,
'{', '}', global_variables.prefix)
else:
line = '@{0}constant{1}{2}, {3}' \
'TypeCode_t{4}.'.format(self.language, '{', self.typecode,
global_variables.prefix, '}')
return_lines.append(line)
additional.append('@copydetails doc_warning_typecodes_not_unique')
if not self.is_list_of:
additional.append(' ')
additional.append('@see getElementName()')
if global_variables.is_package:
additional.append('@see getPackageName()')
# create function declaration
function = 'getTypeCode'
arguments = []
return_type = 'int'
# create the function implementation
if self.is_list_of:
implementation = ['return {0}_LIST_OF'.format(self.cap_language)]
else:
implementation = ['return {0}'.format(self.typecode)]
code = [dict({'code_type': 'line', 'code': implementation})]
# return the parts
return dict({'title_line': title_line,
'params': params,
'return_lines': return_lines,
'additional': additional,
'function': function,
'return_type': return_type,
'arguments': arguments,
'constant': True,
'virtual': True,
'object_name': self.struct_name,
'implementation': code})
# function to write getTypeCode
def write_get_item_typecode(self):
# only needed for cpp list of class
if not self.status == 'cpp_list':
return
# create comment
title_line = 'Returns the lib{0} type code for the {0} objects ' \
'contained in this {1} object.'.format(self.cap_language,
self.object_name)
params = ['@copydetails doc_what_are_typecodes']
return_lines = ['@return the {0} typecode for the '
'objects contained in this '
'{1}:'.format(self.cap_language, self.object_name)]
additional = []
if global_variables.is_package:
line = '@{0}constant{1}{2}, {3}{4}TypeCode_t{5}.' \
''.format(self.language, '{', self.typecode, self.cap_language,
self.package, '}')
else:
line = '@{0}constant{1}{2}, {3}TypeCode_t{4}.'.format(self.language, '{', self.typecode,
global_variables.prefix, '}')
return_lines.append(line)
additional.append('@copydetails doc_warning_typecodes_not_unique')
additional.append(' ')
additional.append('@see getElementName()')
if global_variables.is_package:
additional.append('@see getPackageName()')
# create function declaration
function = 'getItemTypeCode'
arguments = []
return_type = 'int'
# create the function implementation
implementation = ['return {0}'.format(self.typecode)]
code = [dict({'code_type': 'line', 'code': implementation})]
# return the parts
return dict({'title_line': title_line,
'params': params,
'return_lines': return_lines,
'additional': additional,
'function': function,
'return_type': return_type,
'arguments': arguments,
'constant': True,
'virtual': True,
'object_name': self.struct_name,
'implementation': code})
########################################################################
# Functions for writing checking necessary children status
# function to write hasRequiredAttributes
def write_has_required_attributes(self):
if self.has_std_base and len(self.attributes) == 0:
return
# create comment parts
title_line = 'Predicate returning {0} if all the required ' \
'attributes for this {1} object have been set.'\
.format(self.true, self.object_name)
params = []
if not self.is_cpp_api:
params.append('@param {0} the {1} structure.'
.format(self.abbrev_parent, self.object_name))
return_lines = ['@return {0} to indicate that all the required '
'attributes of this {1} have been set, otherwise '
'{2} is returned.'.format(self.true, self.object_name,
self.false)]
reqd_atts_names = []
additional = []
for i in range(0, len(self.attributes)):
if self.attributes[i]['reqd']:
att_name = self.attributes[i]['xml_name']
reqd_atts_names.append(att_name)
if len(reqd_atts_names) > 0:
additional = [' ', '@note The required attributes for the {0} object'
' are:'.format(self.object_name)]
for reqd_atts_name in reqd_atts_names:
additional.append('@li \"{0}\"'.format(reqd_atts_name))
# create the function declaration
if self.is_cpp_api:
function = 'hasRequiredAttributes'
return_type = 'bool'
else:
function = '{0}_hasRequiredAttributes'.format(self.class_name)
return_type = 'int'
arguments = []
if not self.is_cpp_api:
arguments.append('const {0} * {1}'
.format(self.object_name, self.abbrev_parent))
# create the function implementation
if self.is_cpp_api:
if self.has_std_base:
all_present = 'true'
else:
all_present = '{0}::hasRequired' \
'Attributes()'.format(self.base_class)
code = [dict({'code_type': 'line',
'code': ['bool all'
'Present = {0}'.format(all_present)]})]
if self.has_multiple_versions:
[reqd_atts, reqd_versions] = self.get_multiple_version_info()
if len(reqd_versions) > 0:
implementation = ['unsigned int level = getLevel()', 'unsigned int version = getVersion()',
'unsigned int pkgVersion = getPackageVersion()']
code.append(self.create_code_block('line', implementation))
for att in reqd_atts:
implementation = ['isSet{0}() == false'.format(att), 'allPresent = false']
code.append(dict({'code_type': 'if', 'code': implementation}))
for att in reqd_versions:
lv_needed = []
for i in range(0, len(att['versions'])):
if att['versions'][i]:
lv_needed.append(i)
if len(lv_needed) > 1:
line = ''
for lv in lv_needed:
level = self.lv_info[lv]['core_level']
vers = self.lv_info[lv]['core_version']
pkg = self.lv_info[lv]['pkg_version']
this_line = 'level == {0} && version == {1} && pkgVersion == {2}'.format(level, vers, pkg)
line = line + '({0}) || '.format(this_line)
length = len(line)
line = line[0:length-4]
else:
level = self.lv_info[lv_needed[0]]['core_level']
vers = self.lv_info[lv_needed[0]]['core_version']
pkg = self.lv_info[lv_needed[0]]['pkg_version']
line = 'level == {0} && version == {1} && pkgVersion == {2}'.format(level, vers, pkg)
implementation = ['isSet{0}() == false'.format(att['cap']), 'allPresent = false']
nested_if = dict({'code_type': 'if', 'code': implementation})
code.append(dict({'code_type': 'if', 'code': [line, nested_if]}))
else:
for i in range(0, len(self.attributes)):
att = self.attributes[i]
if att['reqd']:
implementation = ['isSet{0}() == '
'false'.format(att['capAttName']),
'allPresent = false']
code.append(dict({'code_type': 'if',
'code': implementation}))
code.append(dict({'code_type': 'line',
'code': ['return allPresent']}))
else:
line = ['return ({0} != NULL) ? static_cast<int>({0}->'
'hasRequiredAttributes()) : 0'.format(self.abbrev_parent)]
code = [dict({'code_type': 'line', 'code': line})]
# return the parts
return dict({'title_line': title_line,
'params': params,
'return_lines': return_lines,
'additional': additional,
'function': function,
'return_type': return_type,
'arguments': arguments,
'constant': True,
'virtual': True,
'object_name': self.struct_name,
'implementation': code})
def get_multiple_version_info(self):
num_versions = len(self.version_attributes)
reqd_atts = []
required_attributes = []
for attribute in self.attributes:
name = attribute['name']
reqd_version = []
for i in range(0, num_versions):
reqd_version.append(self.get_reqd_in_version(i, name))
if True in reqd_version:
if False in reqd_version:
# sometimes required sometimes not
required_attributes.append(dict({'name': name, 'versions': reqd_version, 'cap': attribute['capAttName']}))
else:
# always requiresd
reqd_atts.append(attribute['capAttName'])
return [reqd_atts, required_attributes]
def get_reqd_in_version(self, i, name):
match = False
j = 0
while not match and j < len(self.version_attributes[i]):
att = self.version_attributes[i][j]
if att['name'] == name:
match = True
break
j = j + 1
if not match:
return False
else:
return self.version_attributes[i][j]['reqd']
# function to write hasRequiredElements
def write_has_required_elements(self):
if not self.has_children:
return
has_reqd_children = False
# if this is not a derived class and has no required elements dont write the function
if not self.has_std_base:
has_reqd_children = True
if not has_reqd_children:
for att in self.child_elements:
if att['reqd']:
has_reqd_children = True
for att in self.child_lo_elements:
if att['reqd']:
has_reqd_children = True
if not has_reqd_children:
return
# create comment parts
title_line = 'Predicate returning {0} if all the required ' \
'elements for this {1} object have been set.'\
.format(self.true, self.object_name)
params = []
if not self.is_cpp_api:
params.append('@param {0} the {1} structure.'
.format(self.abbrev_parent, self.object_name))
return_lines = ['@return {0} to indicate that all the required '
'elements of this {1} have been set, otherwise '
'{2} is returned.'.format(self.true, self.object_name,
self.false)]
additional = [' ', '@note The required elements for the {0} object'
' are:'.format(self.object_name)]
for i in range(0, len(self.child_elements)):
if self.child_elements[i]['reqd']:
additional.append('@li \"{0}\"'
.format(self.child_elements[i]['name']))
for i in range(0, len(self.child_lo_elements)):
if self.child_lo_elements[i]['reqd']:
additional.append('@li \"{0}\"'
.format(self.child_lo_elements[i]['name']))
# create the function declaration
if self.is_cpp_api:
function = 'hasRequiredElements'
return_type = 'bool'
else:
function = '{0}_hasRequiredElements'.format(self.class_name)
return_type = 'int'
arguments = []
if not self.is_cpp_api:
arguments.append('const {0} * {1}'
.format(self.object_name, self.abbrev_parent))
# create the function implementation
if self.is_cpp_api:
if self.has_std_base:
all_present = 'true'
else:
all_present = '{0}::hasRequired' \
'Elements()'.format(self.base_class)
code = [dict({'code_type': 'line',
'code': ['bool allPresent '
'= {0}'.format(all_present)]})]
for i in range(0, len(self.child_elements)):
att = self.child_elements[i]
if att['reqd']:
implementation = ['isSet{0}() == '
'false'.format(att['capAttName']),
'allPresent = false']
code.append(dict({'code_type': 'if',
'code': implementation}))
for i in range(0, len(self.child_lo_elements)):
att = self.child_lo_elements[i]
if att['reqd']:
name = strFunctions.upper_first(att['pluralName'])
implementation = ['getNum{0}() == '
'0'.format(name),
'allPresent = false']
code.append(dict({'code_type': 'if',
'code': implementation}))
code.append(dict({'code_type': 'line',
'code': ['return allPresent']}))
else:
line = ['return ({0} != NULL) ? static_cast<int>({0}->'
'hasRequiredElements()) : 0'.format(self.abbrev_parent)]
code = [dict({'code_type': 'line', 'code': line})]
# return the parts
return dict({'title_line': title_line,
'params': params,
'return_lines': return_lines,
'additional': additional,
'function': function,
'return_type': return_type,
'arguments': arguments,
'constant': True,
'virtual': True,
'object_name': self.struct_name,
'implementation': code})
########################################################################
# Functions for writing general functions: writeElement, accept
# setDocument, write (if we have an array)
def has_child_elements(self):
if self.child_elements and len(self.child_elements) > 0:
return True
elif self.child_lo_elements and len(self.child_lo_elements) > 0:
return True
else:
return False
# function to write writeElement
def write_write_elements(self):
if not self.status == 'cpp_not_list':
if not(self.status == 'cpp_list' and len(self.child_elements) > 0):
return
elif self.is_doc_plugin and not self.has_child_elements():
return
# create comment parts
title_line = 'Write any contained elements'
params = []
return_lines = []
additional = []
# create the function declaration
function = 'writeElements'
return_type = 'void'
if global_variables.is_package:
arguments = ['XMLOutputStream& stream']
else:
arguments = ['LIBSBML_CPP_NAMESPACE_QUALIFIER XMLOutputStream& stream']
# create the function implementation
base = self.base_class
if not self.is_plugin:
code = [dict({'code_type': 'line',
'code': ['{0}::writeElements(stream)'.format(base)]})]
else:
code = []
for i in range(0, len(self.child_elements)):
att = self.child_elements[i]
if att['element'] == 'ASTNode':
if global_variables.is_package:
line = ['writeMathML(getMath(), stream, get{0}'
'Namespaces())'.format(global_variables.prefix)]
else:
line = ['writeMathML(getMath(), stream, NULL)']
elif att['element'] == 'XMLNode':
line = ['stream.startElement(\"{0}\")'.format(att['name']),
'stream << *{0}'.format(att['memberName']),
'stream.endElement(\"{0}\")'.format(att['name'])]
else:
line = ['{0}->write(stream)'.format(att['memberName'])]
implementation = ['isSet{0}() == true'.format(att['capAttName'])]
implementation += line
code.append(dict({'code_type': 'if',
'code': implementation}))
for i in range(0, len(self.child_lo_elements)):
att = self.child_lo_elements[i]
if self.is_plugin:
name = att['pluralName'][6:]
else:
# hack for spatial csg elements
if self.package == 'Spatial' and \
att['pluralName'].startswith('csg'):
name = 'CSG' + att['pluralName'][3:]
else:
if 'used_child_name' in att:
name = strFunctions.upper_first(strFunctions.plural(att['used_child_name']))
elif 'xml_name' in att and att['xml_name'] != att['name']:
name = strFunctions.upper_first(strFunctions.plural(att['xml_name']))
else:
name = strFunctions.remove_prefix(strFunctions.upper_first(att['pluralName']))
# fix for sbgn but may need to sort
#name = strFunctions.plural(att['capAttName'])
if att['type'] == 'inline_lo_element':
implementation = ['unsigned int i = 0; i < getNum{0}(); i++'.format(name),
'get{0}(i)->write(stream)'.format(strFunctions.singular(name))]
code.append(dict({'code_type': 'for',
'code': implementation}))
else:
qualifier = '.'
if 'recursive_child' in att and att['recursive_child']:
qualifier = '->'
implementation = ['getNum{0}() > '
'0'.format(name),
'{0}{1}write(stream)'.format(att['memberName'], qualifier)]
code.append(dict({'code_type': 'if',
'code': implementation}))
if not self.is_plugin and global_variables.is_package:
code.append(dict({'code_type': 'line',
'code': ['{0}::writeExtension'
'Elements'
'(stream)'.format(self.std_base)]}))
# look and see if we have a vector attribute which would need
# to be written here
for attrib in self.attributes:
if 'isVector' in attrib and attrib['isVector']:
code.append(self.write_write_vector(attrib))
# return the parts
return dict({'title_line': title_line,
'params': params,
'return_lines': return_lines,
'additional': additional,
'function': function,
'return_type': return_type,
'arguments': arguments,
'constant': True,
'virtual': True,
'object_name': self.struct_name,
'implementation': code})
def write_write_vector(self, attrib):
implementation = ['std::vector<{0}>::const_iterator it = {1}.begin(); '
'it != {1}.end(); ++it'.format(attrib['element'], attrib['memberName']),
'stream.startElement(\"{0}\")'.format(attrib['name']),
'stream.setAutoIndent(false)',
'stream << \" \" << *it << \" \"',
'stream.endElement(\"{0}\")'.format(attrib['name']),
'stream.setAutoIndent(true)']
nested_for = self.create_code_block('for', implementation)
implementation = ['has{0}()'.format(strFunctions.plural(attrib['capAttName'])),
nested_for]
code = self.create_code_block('if', implementation)
return code
# function to write accept
def write_accept(self):
if not self.status == 'cpp_not_list':
return
# create comment parts
title_line = 'Accepts the given ' \
'{0}Visitor'.format(global_variables.prefix)
params = []
return_lines = []
additional = []
# create the function declaration
function = 'accept'
return_type = 'bool'
arguments = ['{0}Visitor& v'.format(global_variables.prefix)]
# create the function implementation
simple = False
# cover cases where a doc plugin is used (no children but not simple)
# or there are children but they are non std based children (simple)
if self.has_children:
if self.num_children == self.num_non_std_children:
simple = True
else:
if not self.is_plugin:
simple = True
if not global_variables.is_package:
implementation = ['return false']
code = [dict({'code_type': 'line', 'code': implementation})]
elif simple:
implementation = ['return v.visit(*this)']
code = [dict({'code_type': 'line', 'code': implementation})]
else:
if not self.is_plugin:
code = [dict({'code_type': 'line',
'code': ['v.visit(*this)']})]
else:
obj = strFunctions.abbrev_name(self.ext_class)
implementation = ['const {0}* {1} = static_cast<const {0}*>'
'(this->getParent{2}Object()'
')'.format(self.ext_class, obj,
self.cap_language),
'v.visit(*{0})'.format(obj),
'v.leave(*{0})'.format(obj)]
code = [self.create_code_block('line', implementation)]
for i in range(0, len(self.child_elements)):
elem = self.child_elements[i]
implementation = ['{0} != NULL'.format(elem['memberName']),
'{0}->accept(v)'.format(elem['memberName'])]
code.append(dict({'code_type': 'if',
'code': implementation}))
for i in range(0, len(self.child_lo_elements)):
att = self.child_lo_elements[i]
qualifier = '.'
if 'recursive_child' in att and att['recursive_child']:
qualifier = '->'
implementation = ['{0}{1}accept(v)'.format(att['memberName'], qualifier)]
code.append(dict({'code_type': 'line',
'code': implementation}))
if not self.is_plugin:
code.append(dict({'code_type': 'line',
'code': ['v.leave(*this)', 'return true']}))
else:
code.append(self.create_code_block('line', ['return true']))
# return the parts
return dict({'title_line': title_line,
'params': params,
'return_lines': return_lines,
'additional': additional,
'function': function,
'return_type': return_type,
'arguments': arguments,
'constant': True,
'virtual': True,
'object_name': self.struct_name,
'implementation': code})
# function to write setDocument
def write_set_document(self):
if not self.status == 'cpp_not_list':
return
elif self.is_doc_plugin and not self.has_child_elements():
return
# create comment parts
title_line = 'Sets the parent ' \
'{0}'.format(global_variables.document_class)
params = []
return_lines = []
additional = []
# create the function declaration
function = 'set{0}'.format(global_variables.document_class)
return_type = 'void'
arguments = ['{0}* d'.format(global_variables.document_class)]
# create the function implementation
if self.base_class:
line = '{0}::set{1}(d)'.format(self.base_class,
global_variables.document_class)
implementation = [line]
code = [dict({'code_type': 'line', 'code': implementation})]
else:
code = []
if self.has_children and not self.has_only_math:
for i in range(0, len(self.child_elements)):
att = self.child_elements[i]
if 'is_ml' in att and att['is_ml']:
continue
else:
implementation = ['{0} != NULL'.format(att['memberName']),
'{0}->{1}'
'(d)'.format(att['memberName'], function)]
code.append(self.create_code_block('if', implementation))
for i in range(0, len(self.child_lo_elements)):
att = self.child_lo_elements[i]
symbol ='.'
if 'recursive_child' in att and att['recursive_child']:
symbol = '->'
implementation = ['{0}{2}{1}'
'(d)'.format(att['memberName'], function, symbol)]
code.append(dict({'code_type': 'line',
'code': implementation}))
# return the parts
return dict({'title_line': title_line,
'params': params,
'return_lines': return_lines,
'additional': additional,
'function': function,
'return_type': return_type,
'arguments': arguments,
'constant': False,
'virtual': True,
'object_name': self.struct_name,
'implementation': code})
# function to write_write if there is an array
def write_write(self):
if not self.has_array:
return
elif not self.status == 'cpp_not_list':
return
# create comment parts
title_line = 'used to write arrays'
params = []
return_lines = []
additional = []
# create the function declaration
function = 'write'
return_type = 'void'
if global_variables.is_package:
arguments = ['XMLOutputStream& stream']
else:
arguments = ['LIBSBML_CPP_NAMESPACE_QUALIFIER XMLOutputStream& stream']
# create the function implementation
# find the array attribute
name = ''
member = ''
array_type = ''
for attrib in self.attributes:
if attrib['isArray']:
name = attrib['capAttName']
member = attrib['memberName']
array_type = attrib['element']
if array_type == 'int':
array_type = 'long'
code = [self.create_code_block('line',
['stream.startElement(getElementName(), '
'getPrefix())',
'writeAttributes(stream)'])]
nested_for = self.create_code_block(
'for', ['int i = 0; i < m{0}Length; ++i'.format(name),
'stream << ({0}){1}[i] << \" \"'
''.format(array_type, member)])
implementation = ['isSet{0}()'.format(name), nested_for]
code.append(self.create_code_block('if', implementation))
code.append(self.create_code_block(
'line', ['stream.endElement(getElementName(), getPrefix())']))
# return the parts
return dict({'title_line': title_line,
'params': params,
'return_lines': return_lines,
'additional': additional,
'function': function,
'return_type': return_type,
'arguments': arguments,
'constant': True,
'virtual': True,
'object_name': self.struct_name,
'implementation': code})
# function to write updateNamespace
def write_update_ns(self):
if not self.status == 'cpp_not_list':
return
elif not self.has_child_elements():
return
# create comment parts
title_line = 'Updates the namespaces when setLevelVersion is used'
params = []
return_lines = []
additional = []
# create the function declaration
function = 'updateSBMLNamespace'
return_type = 'void'
arguments = ['const std::string& package', 'unsigned int level', 'unsigned int version']
# create the function implementation
if self.base_class:
line = '{0}::updateSBMLNamespace(package, level, version)'.format(self.base_class)
implementation = [line]
code = [dict({'code_type': 'line', 'code': implementation})]
else:
code = []
if self.has_children and not self.has_only_math:
for i in range(0, len(self.child_elements)):
att = self.child_elements[i]
if 'is_ml' in att and att['is_ml']:
continue
else:
implementation = ['{0} != NULL'.format(att['memberName']),
'{0}->{1}'
'(package, level, version)'.format(att['memberName'], function)]
code.append(self.create_code_block('if', implementation))
for i in range(0, len(self.child_lo_elements)):
att = self.child_lo_elements[i]
qualifier = '.'
if 'recursive_child' in att and att['recursive_child']:
qualifier = '->'
implementation = ['{0}{2}{1}'
'(package, level, version)'.format(att['memberName'], function, qualifier)]
code.append(dict({'code_type': 'line',
'code': implementation}))
# return the parts
return dict({'title_line': title_line,
'params': params,
'return_lines': return_lines,
'additional': additional,
'function': function,
'return_type': return_type,
'arguments': arguments,
'constant': False,
'virtual': True,
'object_name': self.struct_name,
'implementation': code})
########################################################################
# Functions for dealing with packages: enablePackage, connectToChild
# function to write enable_package
def write_enable_package(self):
if not self.status == 'cpp_not_list':
return
elif self.is_doc_plugin and not self.has_child_elements():
return
# create comment parts
title_line = 'Enables/disables the given package with this element'
params = []
return_lines = []
additional = []
# create the function declaration
function = 'enablePackageInternal'
return_type = 'void'
arguments = ['const std::string& pkgURI',
'const std::string& pkgPrefix', 'bool flag']
# create the function implementation
code = []
if not self.is_plugin and self.base_class:
implementation = ['{0}::enablePackageInternal(pkgURI, pkgPrefix, '
'flag)'.format(self.base_class)]
code = [dict({'code_type': 'line', 'code': implementation})]
if self.has_children and not self.has_only_math:
for i in range(0, len(self.child_elements)):
att = self.child_elements[i]
if 'is_ml' in att and att['is_ml']:
continue
else:
implementation = ['isSet{0}()'.format(att['capAttName']),
'{0}->enablePackageInternal'
'(pkgURI, pkgPrefix, '
'flag)'.format(att['memberName'])]
code.append(dict({'code_type': 'if',
'code': implementation}))
for i in range(0, len(self.child_lo_elements)):
att = self.child_lo_elements[i]
qualifier = '.'
if 'recursive_child' in att and att['recursive_child']:
qualifier = '->'
implementation = ['{0}{1}enablePackageInternal'
'(pkgURI, pkgPrefix, '
'flag)'.format(att['memberName'], qualifier)]
code.append(dict({'code_type': 'line',
'code': implementation}))
# return the parts
return dict({'title_line': title_line,
'params': params,
'return_lines': return_lines,
'additional': additional,
'function': function,
'return_type': return_type,
'arguments': arguments,
'constant': False,
'virtual': True,
'object_name': self.struct_name,
'implementation': code})
# function to write connectToChild
def write_connect_to_child(self):
if not self.is_cpp_api:
return
elif not self.has_children:
return
# create comment parts
title_line = 'Connects to child elements'
params = []
return_lines = []
additional = []
# create the function declaration
function = 'connectToChild'
return_type = 'void'
arguments = []
# create the function implementation
if not self.is_plugin:
implementation = ['{0}::connectToChild()'.format(self.base_class)]
code = [dict({'code_type': 'line', 'code': implementation})]
for i in range(0, len(self.child_elements)):
att = self.child_elements[i]
if 'is_ml' in att and att['is_ml']:
continue
else:
implementation = ['{0} != NULL'.format(att['memberName']),
'{0}->connectToParent'
'(this)'.format(att['memberName'])]
code.append(self.create_code_block('if', implementation))
for i in range(0, len(self.child_lo_elements)):
att = self.child_lo_elements[i]
symbol ='.'
if 'recursive_child' in att and att['recursive_child']:
symbol = '->'
implementation = ['{0}{1}connectToParent'
'(this)'.format(att['memberName'], symbol)]
code.append(dict({'code_type': 'line',
'code': implementation}))
else:
code = [self.create_code_block('line',
['connectToParent(getParent'
'{0}Object()'
')'.format(self.cap_language)])]
# return the parts
return dict({'title_line': title_line,
'params': params,
'return_lines': return_lines,
'additional': additional,
'function': function,
'return_type': return_type,
'arguments': arguments,
'constant': False,
'virtual': True,
'object_name': self.struct_name,
'implementation': code})
# function to write connectToParent
def write_connect_to_parent(self):
if not self.is_cpp_api:
return
elif not self.has_children:
return
# create comment parts
title_line = 'Connects to parent element'
params = []
return_lines = []
additional = []
# create the function declaration
function = 'connectToParent'
return_type = 'void'
if self.is_doc_plugin:
arguments = ['{0}* base'.format(global_variables.baseClass)]
else:
arguments = ['{0}* base'.format(self.std_base)]
# create the function implementation
implementation = ['{0}::connectToParent(base)'.format(self.base_class)]
code = [dict({'code_type': 'line', 'code': implementation})]
for i in range(0, len(self.child_elements)):
att = self.child_elements[i]
if 'is_ml' in att and att['is_ml']:
continue
else:
implementation = ['{0} != NULL'.format(att['memberName']),
'{0}->connectToParent'
'(base)'.format(att['memberName'])]
code.append(self.create_code_block('if', implementation))
for i in range(0, len(self.child_lo_elements)):
att = self.child_lo_elements[i]
symbol ='.'
if 'recursive_child' in att and att['recursive_child']:
symbol = '->'
implementation = ['{0}{1}connectToParent'
'(base)'.format(att['memberName'], symbol)]
code.append(dict({'code_type': 'line',
'code': implementation}))
# return the parts
return dict({'title_line': title_line,
'params': params,
'return_lines': return_lines,
'additional': additional,
'function': function,
'return_type': return_type,
'arguments': arguments,
'constant': False,
'virtual': True,
'object_name': self.struct_name,
'implementation': code})
########################################################################
# Functions for when an element has a different XML name
# function to write setElementName
def write_set_element_name(self):
if not self.is_cpp_api:
return
if not self.overwrites_children:
return
# create comment parts
title_line = 'Sets the XML name of this {0} object.'\
.format(self.object_name,)
params = []
return_lines = []
additional = []
# create the function declaration
arguments = ['const std::string& name']
function = 'setElementName'
return_type = 'void'
# create the function implementation
implementation = ['mElementName = name']
code = [dict({'code_type': 'line', 'code': implementation})]
# return the parts
return dict({'title_line': title_line,
'params': params,
'return_lines': return_lines,
'additional': additional,
'function': function,
'return_type': return_type,
'arguments': arguments,
'constant': False,
'virtual': True,
'object_name': self.struct_name,
'implementation': code})
########################################################################
# Functions for document plugin
# function to write is comp flattening done
def write_is_comp_flat(self):
if not self.is_doc_plugin:
return
# create comment parts
title_line = 'Predicate indicating whether \'comp\' flattening has ' \
'been implemented for the {0} package.' \
''.format(self.package)
params = []
return_lines = []
additional = []
# create the function declaration
arguments = []
function = 'isCompFlatteningImplemented'
return_type = 'bool'
# create the function implementation
code = [dict({'code_type': 'line', 'code': ['return false']})]
# return the parts
return dict({'title_line': title_line,
'params': params,
'return_lines': return_lines,
'additional': additional,
'function': function,
'return_type': return_type,
'arguments': arguments,
'constant': True,
'virtual': True,
'object_name': self.struct_name,
'implementation': code})
# function to write check consistency
def write_check_consistency(self):
if not self.is_doc_plugin:
return
# create comment parts
title_line = 'Calls check consistency for any relevant ' \
'{0} validators.'.format(self.package)
params = []
return_lines = []
additional = []
# create the function declaration
arguments = []
function = 'checkConsistency'
return_type = 'unsigned int'
# create the function implementation
implementation = ['unsigned int nerrors = 0',
'unsigned int total_errors = 0']
code = [self.create_code_block('line', implementation)]
implementation = ['{0}* doc = static_cast<{0}*>(this->'
'getParent{1}'
'Object())'.format(global_variables.document_class,
self.cap_language),
'{0}ErrorLog* log = doc->getError'
'Log()'.format(self.cap_language)]
code.append(self.create_code_block('line', implementation))
implementation = ['unsigned char applicableValidators = '
'doc->getApplicableValidators()',
'bool id = ((applicableValidators & 0x01) ==0x01)',
'bool core = ((applicableValidators & 0x02) ==0x02)']
code.append(self.create_code_block('line', implementation))
implementation = ['{0}IdentifierConsistencyValidator '
'id_validator'.format(self.package),
'{0}ConsistencyValidator '
'core_validator'.format(self.package)]
code.append(self.create_code_block('line', implementation))
implementation = self.get_validator_block('id')
code.append(self.create_code_block('if', implementation))
implementation = self.get_validator_block('core')
code.append(self.create_code_block('if', implementation))
code.append(self.create_code_block('line', ['return total_errors']))
# return the parts
return dict({'title_line': title_line,
'params': params,
'return_lines': return_lines,
'additional': additional,
'function': function,
'return_type': return_type,
'arguments': arguments,
'constant': False,
'virtual': True,
'object_name': self.struct_name,
'implementation': code})
# function to write read attributes
# note not the standard read attributes function; this is specific to
# the document plugin
def write_read_attributes(self):
if not self.is_doc_plugin:
return
# sort error names to be used
error = '{0}AttributeRequiredMustBeBoolean'.format(self.package)
req_error = '{0}AttributeRequiredMissing'.format(self.package)
value_error = '{0}AttributeRequiredMustHaveValue'.format(self.package)
# create comment parts
title_line = 'Reads the {0} attributes in the top-level ' \
'element.'.format(self.package)
params = []
return_lines = []
additional = []
# create the function declaration
if global_variables.is_package:
arguments = ['const XMLAttributes& attributes',
'const ExpectedAttributes& expectedAttributes']
else:
arguments = ['const LIBSBML_CPP_NAMESPACE_QUALIFIER XMLAttributes& attributes',
'const LIBSBML_CPP_NAMESPACE_QUALIFIER ExpectedAttributes& expectedAttributes']
function = 'readAttributes'
return_type = 'void'
# create the function implementation
implementation = ['get{0}() != NULL && get{0}()->'
'getLevel() < '
'3'.format(global_variables.document_class),
'return']
code = [dict({'code_type': 'if', 'code': implementation})]
if global_variables.is_package:
triple = 'XMLTriple'
else:
triple = 'LIBSBML_CPP_NAMESPACE_QUALIFIER XMLTriple'
implementation = ['{0}ErrorLog* log = getErrorLog'
'()'.format(self.cap_language),
'unsigned int numErrs = log->getNumErrors()',
'{0} tripleReqd(\"required\", mURI, '
'getPrefix())'.format(triple),
'bool assigned = attributes.readInto(tripleReqd, '
'mRequired)']
code.append(self.create_code_block('line', implementation))
implementation = ['log->getNumErrors() == numErrs + 1 && '
'log->contains(XMLAttributeTypeMismatch)',
'log->remove(XMLAttributeTypeMismatch)',
'log->logPackageError(\"{0}\", {1}, '
'getPackageVersion(), getLevel(), '
'getVersion(), "", getLine(), getColumn())'
''.format(self.package.lower(), error),
'else',
'log->logPackageError(\"{0}\", {1}, '
'getPackageVersion(), getLevel(), '
'getVersion(), "", getLine(), getColumn())'
''.format(self.package.lower(), req_error)
]
nested_if = self.create_code_block('if_else', implementation)
implementation = ['mRequired != {0}'.format(self.required),
'log->logPackageError(\"{0}\", {1}, '
'getPackageVersion(), getLevel(), '
'getVersion(), "", getLine(), getColumn())'
''.format(self.package.lower(), value_error)
]
second_nested_if = self.create_code_block('if', implementation)
implementation = ['assigned == false', nested_if,
'else', 'mIsSetRequired = true', second_nested_if]
code.append(self.create_code_block('if_else', implementation))
# return the parts
return dict({'title_line': title_line,
'params': params,
'return_lines': return_lines,
'additional': additional,
'function': function,
'return_type': return_type,
'arguments': arguments,
'constant': False,
'virtual': True,
'object_name': self.struct_name,
'implementation': code})
########################################################################
# HELPER FUNCTIONS
def get_validator_block(self, valid_id):
bail_if = self.create_code_block('if',
['log->getNumFailsWithSeverity(LIB{0}'
'_SEV_ERROR) > '
'0'.format(self.cap_language),
'return total_errors'])
errors_if = self.create_code_block('if',
['nerrors > 0',
'log->add({0}_validator.get'
'Failures())'.format(valid_id),
bail_if])
code_block = ['{0}'.format(valid_id),
'{0}_validator.init()'.format(valid_id),
'nerrors = {0}_validator.validate(*doc)'.format(valid_id),
'total_errors += nerrors', errors_if]
return code_block
@staticmethod
def create_code_block(code_type, lines):
code = dict({'code_type': code_type, 'code': lines})
return code
|
sbmlteam/deviser
|
deviser/code_files/cpp_functions/GeneralFunctions.py
|
Python
|
lgpl-2.1
| 63,638
|
[
"VisIt"
] |
fc80137bb23a0003afbb99f30a05580119a26d428ed3f64eb980b002b7ac87d7
|
from __future__ import absolute_import
from __future__ import division
from typing import Any, Dict, List, Tuple, Optional, Sequence, Callable, Union
from django.db import connection
from django.db.models.query import QuerySet
from django.template import RequestContext, loader
from django.core import urlresolvers
from django.http import HttpResponseNotFound, HttpRequest, HttpResponse
from jinja2 import Markup as mark_safe
from zerver.decorator import has_request_variables, REQ, zulip_internal
from zerver.models import get_realm, UserActivity, UserActivityInterval, Realm
from zerver.lib.timestamp import timestamp_to_datetime
from collections import defaultdict
from datetime import datetime, timedelta
import itertools
import time
import re
import pytz
from six.moves import filter
from six.moves import map
from six.moves import range
from six.moves import zip
eastern_tz = pytz.timezone('US/Eastern')
from zproject.jinja2 import render_to_response
def make_table(title, cols, rows, has_row_class=False):
# type: (str, List[str], List[Any], bool) -> str
if not has_row_class:
def fix_row(row):
# type: (Any) -> Dict[str, Any]
return dict(cells=row, row_class=None)
rows = list(map(fix_row, rows))
data = dict(title=title, cols=cols, rows=rows)
content = loader.render_to_string(
'analytics/ad_hoc_query.html',
dict(data=data)
)
return content
def dictfetchall(cursor):
# type: (connection.cursor) -> List[Dict[str, Any]]
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(list(zip([col[0] for col in desc], row)))
for row in cursor.fetchall()
]
def get_realm_day_counts():
# type: () -> Dict[str, Dict[str, str]]
query = '''
select
r.domain,
(now()::date - pub_date::date) age,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
join zerver_client c on c.id = m.sending_client_id
where
(not up.is_bot)
and
pub_date > now()::date - interval '8 day'
and
c.name not in ('zephyr_mirror', 'ZulipMonitoring')
group by
r.domain,
age
order by
r.domain,
age
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
counts = defaultdict(dict) # type: Dict[str, Dict[int, int]]
for row in rows:
counts[row['domain']][row['age']] = row['cnt']
result = {}
for domain in counts:
raw_cnts = [counts[domain].get(age, 0) for age in range(8)]
min_cnt = min(raw_cnts)
max_cnt = max(raw_cnts)
def format_count(cnt):
# type: (int) -> str
if cnt == min_cnt:
good_bad = 'bad'
elif cnt == max_cnt:
good_bad = 'good'
else:
good_bad = 'neutral'
return '<td class="number %s">%s</td>' % (good_bad, cnt)
cnts = ''.join(map(format_count, raw_cnts))
result[domain] = dict(cnts=cnts)
return result
def realm_summary_table(realm_minutes):
# type: (Dict[str, float]) -> str
query = '''
SELECT
realm.domain,
coalesce(user_counts.active_user_count, 0) active_user_count,
coalesce(at_risk_counts.at_risk_count, 0) at_risk_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND not is_bot
) user_profile_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND is_bot
) bot_count
FROM zerver_realm realm
LEFT OUTER JOIN
(
SELECT
up.realm_id realm_id,
count(distinct(ua.user_profile_id)) active_user_count
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
last_visit > now() - interval '1 day'
AND
not is_bot
GROUP BY realm_id
) user_counts
ON user_counts.realm_id = realm.id
LEFT OUTER JOIN
(
SELECT
realm_id,
count(*) at_risk_count
FROM (
SELECT
realm.id as realm_id,
up.email
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
JOIN zerver_realm realm
ON realm.id = up.realm_id
WHERE up.is_active
AND (not up.is_bot)
AND
ua.query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
GROUP by realm.id, up.email
HAVING max(last_visit) between
now() - interval '7 day' and
now() - interval '1 day'
) as at_risk_users
GROUP BY realm_id
) at_risk_counts
ON at_risk_counts.realm_id = realm.id
WHERE EXISTS (
SELECT *
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'/api/v1/send_message',
'send_message_backend',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
up.realm_id = realm.id
AND
last_visit > now() - interval '2 week'
)
ORDER BY active_user_count DESC, domain ASC
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
# get messages sent per day
counts = get_realm_day_counts()
for row in rows:
try:
row['history'] = counts[row['domain']]['cnts']
except:
row['history'] = ''
# augment data with realm_minutes
total_hours = 0.0
for row in rows:
domain = row['domain']
minutes = realm_minutes.get(domain, 0.0)
hours = minutes / 60.0
total_hours += hours
row['hours'] = str(int(hours))
try:
row['hours_per_user'] = '%.1f' % (hours / row['active_user_count'],)
except:
pass
# formatting
for row in rows:
row['domain'] = realm_activity_link(row['domain'])
# Count active sites
def meets_goal(row):
# type: (Dict[str, int]) -> bool
return row['active_user_count'] >= 5
num_active_sites = len(list(filter(meets_goal, rows)))
# create totals
total_active_user_count = 0
total_user_profile_count = 0
total_bot_count = 0
total_at_risk_count = 0
for row in rows:
total_active_user_count += int(row['active_user_count'])
total_user_profile_count += int(row['user_profile_count'])
total_bot_count += int(row['bot_count'])
total_at_risk_count += int(row['at_risk_count'])
rows.append(dict(
domain='Total',
active_user_count=total_active_user_count,
user_profile_count=total_user_profile_count,
bot_count=total_bot_count,
hours=int(total_hours),
at_risk_count=total_at_risk_count,
))
content = loader.render_to_string(
'analytics/realm_summary_table.html',
dict(rows=rows, num_active_sites=num_active_sites)
)
return content
def user_activity_intervals():
# type: () -> Tuple[mark_safe, Dict[str, float]]
day_end = timestamp_to_datetime(time.time())
day_start = day_end - timedelta(hours=24)
output = "Per-user online duration for the last 24 hours:\n"
total_duration = timedelta(0)
all_intervals = UserActivityInterval.objects.filter(
end__gte=day_start,
start__lte=day_end
).select_related(
'user_profile',
'user_profile__realm'
).only(
'start',
'end',
'user_profile__email',
'user_profile__realm__domain'
).order_by(
'user_profile__realm__domain',
'user_profile__email'
)
by_domain = lambda row: row.user_profile.realm.domain
by_email = lambda row: row.user_profile.email
realm_minutes = {}
for domain, realm_intervals in itertools.groupby(all_intervals, by_domain):
realm_duration = timedelta(0)
output += '<hr>%s\n' % (domain,)
for email, intervals in itertools.groupby(realm_intervals, by_email):
duration = timedelta(0)
for interval in intervals:
start = max(day_start, interval.start)
end = min(day_end, interval.end)
duration += end - start
total_duration += duration
realm_duration += duration
output += " %-*s%s\n" % (37, email, duration)
realm_minutes[domain] = realm_duration.total_seconds() / 60
output += "\nTotal Duration: %s\n" % (total_duration,)
output += "\nTotal Duration in minutes: %s\n" % (total_duration.total_seconds() / 60.,)
output += "Total Duration amortized to a month: %s" % (total_duration.total_seconds() * 30. / 60.,)
content = mark_safe('<pre>' + output + '</pre>')
return content, realm_minutes
def sent_messages_report(realm):
# type: (str) -> str
title = 'Recently sent messages for ' + realm
cols = [
'Date',
'Humans',
'Bots'
]
query = '''
select
series.day::date,
humans.cnt,
bots.cnt
from (
select generate_series(
(now()::date - interval '2 week'),
now()::date,
interval '1 day'
) as day
) as series
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.domain = %s
and
(not up.is_bot)
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) humans on
series.day = humans.pub_date
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.domain = %s
and
up.is_bot
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) bots on
series.day = bots.pub_date
'''
cursor = connection.cursor()
cursor.execute(query, [realm, realm])
rows = cursor.fetchall()
cursor.close()
return make_table(title, cols, rows)
def ad_hoc_queries():
# type: () -> List[Dict[str, str]]
def get_page(query, cols, title):
# type: (str, List[str], str) -> Dict[str, str]
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
rows = list(map(list, rows))
cursor.close()
def fix_rows(i, fixup_func):
# type: (int, Union[Callable[[Realm], mark_safe], Callable[[datetime], str]]) -> None
for row in rows:
row[i] = fixup_func(row[i])
for i, col in enumerate(cols):
if col == 'Domain':
fix_rows(i, realm_activity_link)
elif col in ['Last time', 'Last visit']:
fix_rows(i, format_date_for_activity_reports)
content = make_table(title, cols, rows)
return dict(
content=content,
title=title
)
pages = []
###
for mobile_type in ['Android', 'ZulipiOS']:
title = '%s usage' % (mobile_type,)
query = '''
select
realm.domain,
up.id user_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like '%s'
group by domain, up.id, client.name
having max(last_visit) > now() - interval '2 week'
order by domain, up.id, client.name
''' % (mobile_type,)
cols = [
'Domain',
'User id',
'Name',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Desktop users'
query = '''
select
realm.domain,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like 'desktop%%'
group by domain, client.name
having max(last_visit) > now() - interval '2 week'
order by domain, client.name
'''
cols = [
'Domain',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by domain'
query = '''
select
realm.domain,
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by domain, client_name
having max(last_visit) > now() - interval '2 week'
order by domain, client_name
'''
cols = [
'Domain',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by client'
query = '''
select
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
realm.domain,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by client_name, domain
having max(last_visit) > now() - interval '2 week'
order by client_name, domain
'''
cols = [
'Client',
'Domain',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
return pages
@zulip_internal
@has_request_variables
def get_activity(request):
# type: (HttpRequest) -> HttpResponse
duration_content, realm_minutes = user_activity_intervals() # type: Tuple[mark_safe, Dict[str, float]]
counts_content = realm_summary_table(realm_minutes) # type: str
data = [
('Counts', counts_content),
('Durations', duration_content),
]
for page in ad_hoc_queries():
data.append((page['title'], page['content']))
title = 'Activity'
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title, is_home=True),
request=request
)
def get_user_activity_records_for_realm(realm, is_bot):
# type: (str, bool) -> QuerySet
fields = [
'user_profile__full_name',
'user_profile__email',
'query',
'client__name',
'count',
'last_visit',
]
records = UserActivity.objects.filter(
user_profile__realm__domain=realm,
user_profile__is_active=True,
user_profile__is_bot=is_bot
)
records = records.order_by("user_profile__email", "-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def get_user_activity_records_for_email(email):
# type: (str) -> List[QuerySet]
fields = [
'user_profile__full_name',
'query',
'client__name',
'count',
'last_visit'
]
records = UserActivity.objects.filter(
user_profile__email=email
)
records = records.order_by("-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def raw_user_activity_table(records):
# type: (List[QuerySet]) -> str
cols = [
'query',
'client',
'count',
'last_visit'
]
def row(record):
# type: (QuerySet) -> List[Any]
return [
record.query,
record.client.name,
record.count,
format_date_for_activity_reports(record.last_visit)
]
rows = list(map(row, records))
title = 'Raw Data'
return make_table(title, cols, rows)
def get_user_activity_summary(records):
# type: (List[QuerySet]) -> Dict[str, Dict[str, Any]]
#: `Any` used above should be `Union(int, datetime)`.
#: However current version of `Union` does not work inside other function.
#: We could use something like:
# `Union[Dict[str, Dict[str, int]], Dict[str, Dict[str, datetime]]]`
#: but that would require this long `Union` to carry on throughout inner functions.
summary = {} # type: Dict[str, Dict[str, Any]]
def update(action, record):
# type: (str, QuerySet) -> None
if action not in summary:
summary[action] = dict(
count=record.count,
last_visit=record.last_visit
)
else:
summary[action]['count'] += record.count
summary[action]['last_visit'] = max(
summary[action]['last_visit'],
record.last_visit
)
if records:
summary['name'] = records[0].user_profile.full_name
for record in records:
client = record.client.name
query = record.query
update('use', record)
if client == 'API':
m = re.match('/api/.*/external/(.*)', query)
if m:
client = m.group(1)
update(client, record)
if client.startswith('desktop'):
update('desktop', record)
if client == 'website':
update('website', record)
if ('send_message' in query) or re.search('/api/.*/external/.*', query):
update('send', record)
if query in ['/json/update_pointer', '/json/users/me/pointer', '/api/v1/update_pointer']:
update('pointer', record)
update(client, record)
return summary
def format_date_for_activity_reports(date):
# type: (Optional[datetime]) -> str
if date:
return date.astimezone(eastern_tz).strftime('%Y-%m-%d %H:%M')
else:
return ''
def user_activity_link(email):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_user_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(email=email))
email_link = '<a href="%s">%s</a>' % (url, email)
return mark_safe(email_link)
def realm_activity_link(realm):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_realm_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(realm=realm))
realm_link = '<a href="%s">%s</a>' % (url, realm)
return mark_safe(realm_link)
def realm_client_table(user_summaries):
# type: (Dict[str, Dict[str, Dict[str, Any]]]) -> str
exclude_keys = [
'internal',
'name',
'use',
'send',
'pointer',
'website',
'desktop',
]
rows = []
for email, user_summary in user_summaries.items():
email_link = user_activity_link(email)
name = user_summary['name']
for k, v in user_summary.items():
if k in exclude_keys:
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
name,
email_link,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'Last visit',
'Client',
'Name',
'Email',
'Count',
]
title = 'Clients'
return make_table(title, cols, rows)
def user_activity_summary_table(user_summary):
# type: (Dict[str, Dict[str, Any]]) -> str
rows = []
for k, v in user_summary.items():
if k == 'name':
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'last_visit',
'client',
'count',
]
title = 'User Activity'
return make_table(title, cols, rows)
def realm_user_summary_table(all_records, admin_emails):
# type: (List[QuerySet], Set[str]) -> Tuple[Dict[str, Dict[str, Any]], str]
user_records = {}
def by_email(record):
# type: (QuerySet) -> str
return record.user_profile.email
for email, records in itertools.groupby(all_records, by_email):
user_records[email] = get_user_activity_summary(list(records))
def get_last_visit(user_summary, k):
# type: (Dict[str, Dict[str, datetime]], str) -> Optional[datetime]
if k in user_summary:
return user_summary[k]['last_visit']
else:
return None
def get_count(user_summary, k):
# type: (Dict[str, Dict[str, str]], str) -> str
if k in user_summary:
return user_summary[k]['count']
else:
return ''
def is_recent(val):
# type: (Optional[datetime]) -> bool
age = datetime.now(val.tzinfo) - val # type: ignore # datetie.now tzinfo bug.
return age.total_seconds() < 5 * 60
rows = []
for email, user_summary in user_records.items():
email_link = user_activity_link(email)
sent_count = get_count(user_summary, 'send')
cells = [user_summary['name'], email_link, sent_count]
row_class = ''
for field in ['use', 'send', 'pointer', 'desktop', 'ZulipiOS', 'Android']:
visit = get_last_visit(user_summary, field)
if field == 'use':
if visit and is_recent(visit):
row_class += ' recently_active'
if email in admin_emails:
row_class += ' admin'
val = format_date_for_activity_reports(visit)
cells.append(val)
row = dict(cells=cells, row_class=row_class)
rows.append(row)
def by_used_time(row):
# type: (Dict[str, Sequence[str]]) -> str
return row['cells'][3]
rows = sorted(rows, key=by_used_time, reverse=True)
cols = [
'Name',
'Email',
'Total sent',
'Heard from',
'Message sent',
'Pointer motion',
'Desktop',
'ZulipiOS',
'Android'
]
title = 'Summary'
content = make_table(title, cols, rows, has_row_class=True)
return user_records, content
@zulip_internal
def get_realm_activity(request, realm):
# type: (HttpRequest, str) -> HttpResponse
data = [] # type: List[Tuple[str, str]]
all_user_records = {} # type: Dict[str, Any]
try:
admins = get_realm(realm).get_admin_users()
except Realm.DoesNotExist:
return HttpResponseNotFound("Realm %s does not exist" % (realm,))
admin_emails = {admin.email for admin in admins}
for is_bot, page_title in [(False, 'Humans'), (True, 'Bots')]:
all_records = list(get_user_activity_records_for_realm(realm, is_bot))
user_records, content = realm_user_summary_table(all_records, admin_emails)
all_user_records.update(user_records)
data += [(page_title, content)]
page_title = 'Clients'
content = realm_client_table(all_user_records)
data += [(page_title, content)]
page_title = 'History'
content = sent_messages_report(realm)
data += [(page_title, content)]
fix_name = lambda realm: realm.replace('.', '_')
realm_link = 'https://stats1.zulip.net:444/render/?from=-7days'
realm_link += '&target=stats.gauges.staging.users.active.%s.0_16hr' % (fix_name(realm),)
title = realm
return render_to_response(
'analytics/activity.html',
dict(data=data, realm_link=realm_link, title=title),
request=request
)
@zulip_internal
def get_user_activity(request, email):
# type: (HttpRequest, str) -> HttpResponse
records = get_user_activity_records_for_email(email)
data = [] # type: List[Tuple[str, str]]
user_summary = get_user_activity_summary(records)
content = user_activity_summary_table(user_summary)
data += [('Summary', content)]
content = raw_user_activity_table(records)
data += [('Info', content)]
title = email
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title),
request=request
)
|
Vallher/zulip
|
analytics/views.py
|
Python
|
apache-2.0
| 28,082
|
[
"VisIt"
] |
c4dbacdcda4fa57e8af82afe72694f9e5ed231a659dceedd27eaef89362a2ea2
|
# coding: utf-8
from __future__ import division, unicode_literals
"""
Created on Jul 24, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jul 24, 2012"
import unittest
import os
import json
import numpy as np
from pymatgen import Lattice, Structure
from pymatgen.transformations.standard_transformations import \
OxidationStateDecorationTransformation, SubstitutionTransformation, \
OrderDisorderedStructureTransformation
from pymatgen.transformations.advanced_transformations import \
SuperTransformation, EnumerateStructureTransformation, \
MultipleSubstitutionTransformation, ChargeBalanceTransformation, \
SubstitutionPredictorTransformation, MagOrderingTransformation
from monty.os.path import which
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.energy_models import IsingModel
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
def get_table():
"""
Loads a lightweight lambda table for use in unit tests to reduce
initialization time, and make unit tests insensitive to changes in the
default lambda table.
"""
data_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files', 'struct_predictor')
json_file = os.path.join(data_dir, 'test_lambda.json')
with open(json_file) as f:
lambda_table = json.load(f)
return lambda_table
enumlib_present = which('multienum.x') and which('makestr.x')
class SuperTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
tl = [SubstitutionTransformation({"Li+": "Na+"}),
SubstitutionTransformation({"Li+": "K+"})]
t = SuperTransformation(tl)
coords = list()
coords.append([0, 0, 0])
coords.append([0.375, 0.375, 0.375])
coords.append([.5, .5, .5])
coords.append([0.875, 0.875, 0.875])
coords.append([0.125, 0.125, 0.125])
coords.append([0.25, 0.25, 0.25])
coords.append([0.625, 0.625, 0.625])
coords.append([0.75, 0.75, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Li+", "Li+", "Li+", "Li+", "Li+", "Li+",
"O2-", "O2-"], coords)
s = t.apply_transformation(struct, return_ranked_list=True)
for s_and_t in s:
self.assertEqual(s_and_t['transformation']
.apply_transformation(struct),
s_and_t['structure'])
@unittest.skipIf(not enumlib_present, "enum_lib not present.")
def test_apply_transformation_mult(self):
#Test returning multiple structures from each transformation.
disord = Structure(np.eye(3) * 4.209, [{"Cs+": 0.5, "K+": 0.5}, "Cl-"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
disord.make_supercell([2, 2, 1])
tl = [EnumerateStructureTransformation(),
OrderDisorderedStructureTransformation()]
t = SuperTransformation(tl, nstructures_per_trans=10)
self.assertEqual(len(t.apply_transformation(disord,
return_ranked_list=20)), 8)
t = SuperTransformation(tl)
self.assertEqual(len(t.apply_transformation(disord,
return_ranked_list=20)), 2)
class MultipleSubstitutionTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
sub_dict = {1: ["Na", "K"]}
t = MultipleSubstitutionTransformation("Li+", 0.5, sub_dict, None)
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.75, 0.75])
coords.append([0.5, 0.5, 0.5])
coords.append([0.25, 0.25, 0.25])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Li+", "Li+", "O2-", "O2-"], coords)
self.assertEqual(len(t.apply_transformation(struct,
return_ranked_list=True)),
2)
class ChargeBalanceTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
t = ChargeBalanceTransformation('Li+')
coords = list()
coords.append([0, 0, 0])
coords.append([0.375, 0.375, 0.375])
coords.append([.5, .5, .5])
coords.append([0.875, 0.875, 0.875])
coords.append([0.125, 0.125, 0.125])
coords.append([0.25, 0.25, 0.25])
coords.append([0.625, 0.625, 0.625])
coords.append([0.75, 0.75, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Li+", "Li+", "Li+", "Li+", "Li+", "Li+",
"O2-", "O2-"], coords)
s = t.apply_transformation(struct)
self.assertAlmostEqual(s.charge, 0, 5)
@unittest.skipIf(not enumlib_present, "enum_lib not present.")
class EnumerateStructureTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
enum_trans = EnumerateStructureTransformation(refine_structure=True)
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR.LiFePO4'),
check_for_POTCAR=False)
struct = p.structure
expected_ans = [1, 3, 1]
for i, frac in enumerate([0.25, 0.5, 0.75]):
trans = SubstitutionTransformation({'Fe': {'Fe': frac}})
s = trans.apply_transformation(struct)
oxitrans = OxidationStateDecorationTransformation(
{'Li': 1, 'Fe': 2, 'P': 5, 'O': -2})
s = oxitrans.apply_transformation(s)
alls = enum_trans.apply_transformation(s, 100)
self.assertEqual(len(alls), expected_ans[i])
self.assertIsInstance(trans.apply_transformation(s), Structure)
for s in alls:
self.assertIn("energy", s)
#make sure it works for non-oxidation state decorated structure
trans = SubstitutionTransformation({'Fe': {'Fe': 0.5}})
s = trans.apply_transformation(struct)
alls = enum_trans.apply_transformation(s, 100)
self.assertEqual(len(alls), 3)
self.assertIsInstance(trans.apply_transformation(s), Structure)
for s in alls:
self.assertNotIn("energy", s)
def test_to_from_dict(self):
trans = EnumerateStructureTransformation()
d = trans.as_dict()
trans = EnumerateStructureTransformation.from_dict(d)
self.assertEqual(trans.symm_prec, 0.1)
class SubstitutionPredictorTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
t = SubstitutionPredictorTransformation(threshold=1e-3, alpha=-5,
lambda_table=get_table())
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.75, 0.75])
coords.append([0.5, 0.5, 0.5])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ['O2-', 'Li1+', 'Li1+'], coords)
outputs = t.apply_transformation(struct, return_ranked_list=True)
self.assertEqual(len(outputs), 4, 'incorrect number of structures')
def test_as_dict(self):
t = SubstitutionPredictorTransformation(threshold=2, alpha=-2,
lambda_table=get_table())
d = t.as_dict()
t = SubstitutionPredictorTransformation.from_dict(d)
self.assertEqual(t._threshold, 2,
'incorrect threshold passed through dict')
self.assertEqual(t._substitutor.p.alpha, -2,
'incorrect alpha passed through dict')
@unittest.skipIf(not enumlib_present, "enum_lib not present.")
class MagOrderingTransformationTest(PymatgenTest):
def test_apply_transformation(self):
trans = MagOrderingTransformation({"Fe": 5})
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR.LiFePO4'),
check_for_POTCAR=False)
s = p.structure
alls = trans.apply_transformation(s, 10)
self.assertEqual(len(alls), 3)
f = SpacegroupAnalyzer(alls[0]["structure"], 0.1)
self.assertEqual(f.get_spacegroup_number(), 31)
model = IsingModel(5, 5)
trans = MagOrderingTransformation({"Fe": 5},
energy_model=model)
alls2 = trans.apply_transformation(s, 10)
#Ising model with +J penalizes similar neighbor magmom.
self.assertNotEqual(alls[0]["structure"], alls2[0]["structure"])
self.assertEqual(alls[0]["structure"], alls2[2]["structure"])
s = self.get_structure('Li2O')
#Li2O doesn't have magnetism of course, but this is to test the
# enumeration.
trans = MagOrderingTransformation({"Li+": 1}, max_cell_size=3)
alls = trans.apply_transformation(s, 100)
self.assertEqual(len(alls), 10)
def test_ferrimagnetic(self):
trans = MagOrderingTransformation({"Fe": 5}, 0.75, max_cell_size=1)
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR.LiFePO4'),
check_for_POTCAR=False)
s = p.structure
alls = trans.apply_transformation(s, 10)
self.assertEqual(len(alls), 2)
def test_to_from_dict(self):
trans = MagOrderingTransformation({"Fe": 5}, 0.75)
d = trans.as_dict()
#Check json encodability
s = json.dumps(d)
trans = MagOrderingTransformation.from_dict(d)
self.assertEqual(trans.mag_species_spin, {"Fe": 5})
from pymatgen.analysis.energy_models import SymmetryModel
self.assertIsInstance(trans.emodel, SymmetryModel)
def test_zero_spin_case(self):
#ensure that zero spin case maintains sites and formula
s = self.get_structure('Li2O')
trans = MagOrderingTransformation({"Li+": 0.0}, 0.5)
alls = trans.apply_transformation(s)
#Ensure s does not have a spin property
self.assertFalse('spin' in s.sites[0].specie._properties)
#ensure sites are assigned a spin property in alls
self.assertTrue('spin' in alls.sites[0].specie._properties)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
rousseab/pymatgen
|
pymatgen/transformations/tests/test_advanced_transformations.py
|
Python
|
mit
| 11,086
|
[
"VASP",
"pymatgen"
] |
17a93f81547fa6a127e74922941e420e65f960d4e3235b3723649af8b4c4539a
|
#!/usr/bin/env python
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
import numpy as np
from numpy import random
import cv2
def make_gaussians(cluster_n, img_size):
points = []
ref_distrs = []
for _i in xrange(cluster_n):
mean = (0.1 + 0.8*random.rand(2)) * img_size
a = (random.rand(2, 2)-0.5)*img_size*0.1
cov = np.dot(a.T, a) + img_size*0.05*np.eye(2)
n = 100 + random.randint(900)
pts = random.multivariate_normal(mean, cov, n)
points.append( pts )
ref_distrs.append( (mean, cov) )
points = np.float32( np.vstack(points) )
return points, ref_distrs
def draw_gaussain(img, mean, cov, color):
x, y = np.int32(mean)
w, u, _vt = cv2.SVDecomp(cov)
ang = np.arctan2(u[1, 0], u[0, 0])*(180/np.pi)
s1, s2 = np.sqrt(w)*3.0
cv2.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv2.LINE_AA)
if __name__ == '__main__':
cluster_n = 5
img_size = 512
print('press any key to update distributions, ESC - exit\n')
while True:
print('sampling distributions...')
points, ref_distrs = make_gaussians(cluster_n, img_size)
print('EM (opencv) ...')
em = cv2.ml.EM_create()
em.setClustersNumber(cluster_n)
em.setCovarianceMatrixType(cv2.ml.EM_COV_MAT_GENERIC)
em.trainEM(points)
means = em.getMeans()
covs = em.getCovs() # Known bug: https://github.com/opencv/opencv/pull/4232
found_distrs = zip(means, covs)
print('ready!\n')
img = np.zeros((img_size, img_size, 3), np.uint8)
for x, y in np.int32(points):
cv2.circle(img, (x, y), 1, (255, 255, 255), -1)
for m, cov in ref_distrs:
draw_gaussain(img, m, cov, (0, 255, 0))
for m, cov in found_distrs:
draw_gaussain(img, m, cov, (0, 0, 255))
cv2.imshow('gaussian mixture', img)
ch = cv2.waitKey(0)
if ch == 27:
break
cv2.destroyAllWindows()
|
zzjkf2009/Midterm_Astar
|
opencv/samples/python/gaussian_mix.py
|
Python
|
mit
| 2,081
|
[
"Gaussian"
] |
1cbc640509a6f03ac8c82113bb1145932c7e9467551bc9a0f758ad6f90be3619
|
""" DIRAC FileCatalog component representing a simple directory tree
"""
__RCSID__ = "$Id: $"
import os
import types
from DIRAC import S_OK, S_ERROR
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.DirectoryTreeBase import DirectoryTreeBase
class DirectorySimpleTree( DirectoryTreeBase ):
""" Class managing Directory Tree as a simple self-linked structure with full
directory path stored in each node
"""
def __init__( self, database = None ):
DirectoryTreeBase.__init__(self,database)
self.treeTable = 'FC_DirectoryTree'
def findDir( self, path ):
req = "SELECT DirID from FC_DirectoryTree WHERE DirName='%s'" % path
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_OK('')
return S_OK( result['Value'][0][0] )
def removeDir( self, path ):
""" Remove directory
"""
result = self.findDir(path)
if not result['OK']:
return result
if not result['Value']:
return S_OK()
dirID = result['Value']
req = "DELETE FROM FC_DirectoryTree WHERE DirID=%d" % dirID
result = self.db._update(req)
return result
def makeDir( self, path ):
result = self.findDir(path)
if not result['OK']:
return result
dirID = result['Value']
if dirID:
return S_OK(dirID)
names = ['DirName']
values = [path]
result = self.db._insert( 'FC_DirectoryTree', names, values )
if not result['OK']:
return result
return S_OK(result['lastRowId'])
def existsDir( self, path ):
""" Check the existence of a directory at the specified path
"""
result = self.findDir(path)
if not result['OK']:
return result
if not result['Value']:
return S_OK({"Exists":False})
else:
return S_OK({"Exists":True,"DirID":result['Value']})
def getParent( self, path ):
""" Get the parent ID of the given directory
"""
parent_dir = os.path.dirname(path)
if parent_dir == "/":
return S_OK(0)
return self.findDir(parent_dir)
def getParentID( self, dirID ):
""" Get the ID of the parent of a directory specified by ID
"""
if dirID == 0:
return S_ERROR( 'Root directory ID given' )
req = "SELECT Parent FROM FC_DirectoryTree WHERE DirID=%d" % dirID
result = self.db._query( req )
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('No parent found')
return S_OK( result['Value'][0][0] )
def getDirectoryPath( self, dirID ):
""" Get directory name by directory ID
"""
req = "SELECT DirName FROM FC_DirectoryTree WHERE DirID=%d" % int(dirID)
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Directory with id %d not found' % int(dirID) )
return S_OK(result['Value'][0][0])
def getDirectoryName( self, dirID ):
""" Get directory name by directory ID
"""
result = self.getDirectoryPath( dirID )
if not result['OK']:
return result
return S_OK( os.path.basename( result['Value'] ) )
def getPathIDs( self, path ):
""" Get IDs of all the directories in the parent hierarchy
"""
elements = path.split('/')
pelements = []
dPath = ''
for el in elements[1:]:
dPath += '/'+el
pelements.append(dPath)
pathString = [ "'"+p+"'" for p in pelements ]
req = "SELECT DirID FROM FC_DirectoryTree WHERE DirName in (%s) ORDER BY DirID" % pathString
result = self.db._query( req )
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Directory %s not found' % path)
return S_OK([ x[0] for x in result['Value'] ])
def getChildren( self, path ):
""" Get child directory IDs for the given directory
"""
if type(path) in types.StringTypes:
result = self.findDir(path)
if not result['OK']:
return result
dirID = result['Value']
else:
dirID = path
req = "SELECD DirID FROM FC_DirectoryTree WHERE Parent=%d" % dirID
result = self.db._query( req )
if not result['OK']:
return result
if not result['Value']:
return S_OK([])
return S_OK( [ x[0] for x in result['Value'] ] )
|
andresailer/DIRAC
|
DataManagementSystem/DB/FileCatalogComponents/DirectorySimpleTree.py
|
Python
|
gpl-3.0
| 4,383
|
[
"DIRAC"
] |
a7446a7c135c0e37d02815dfa964f2f85d97823c9ed7c0be7bdeed6b9a9ff910
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
import collections
import functools
import logging
import threading
import time
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
# Logger for internal use by the futures package.
LOGGER = logging.getLogger("concurrent.futures")
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that wait() and as_completed() block on."""
def __init__(self):
self.event = threading.Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by as_completed()."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = threading.Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_COMPLETED)."""
def add_result(self, future):
super().add_result(future)
self.event.set()
def add_exception(self, future):
super().add_exception(future)
self.event.set()
def add_cancelled(self, future):
super().add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
self.lock = threading.Lock()
super().__init__()
def _decrement_pending_calls(self):
with self.lock:
self.num_pending_calls -= 1
if not self.num_pending_calls:
self.event.set()
def add_result(self, future):
super().add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super().add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super().add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of Future conditions."""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled).
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.time()
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = set(fs) - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
try:
for future in finished:
yield future
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.time()
if wait_timeout < 0:
raise TimeoutError(
'%d (of %d) futures unfinished' % (
len(pending), len(fs)))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
for future in finished:
yield future
pending.remove(future)
finally:
for f in fs:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = collections.namedtuple(
'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns:
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)
class Future(object):
"""Represents the result of an asynchronous computation."""
def __init__(self):
"""Initializes the future. Should not be called by clients."""
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._waiters = []
self._done_callbacks = []
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except Exception:
LOGGER.exception('exception calling callback for %r', self)
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
return '<Future at %s state=%s raised %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__)
else:
return '<Future at %s state=%s returned %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__)
return '<Future at %s state=%s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
def cancelled(self):
"""Return True if the future has cancelled."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
def done(self):
"""Return True of the future was cancelled or finished executing."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
def __get_result(self):
if self._exception:
raise self._exception
else:
return self._result
def add_done_callback(self, fn):
"""Attaches a callable that will be called when the future finishes.
Args:
fn: A callable that will be called with this future as its only
argument when the future completes or is cancelled. The callable
will always be called by a thread in the same process in which
it was added. If the future has already completed or been
cancelled then the callable will be called immediately. These
callables are called in the order that they were added.
"""
with self._condition:
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
self._done_callbacks.append(fn)
return
fn(self)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
Exception: If the call raised then that exception will be raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError()
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
else:
raise TimeoutError()
# The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by Executor implementations and unit tests.
If the future has been cancelled (cancel() was called and returned
True) then any threads waiting on the future completing (though calls
to as_completed() or wait()) are notified and False is returned.
If the future was not cancelled then it is put in the running state
(future calls to running() will return True) and True is returned.
This method should be called by Executor implementations before
executing the work associated with this future. If this method returns
False then the work should not be executed.
Returns:
False if the Future was cancelled, True otherwise.
Raises:
RuntimeError: if this method was already called or if set_result()
or set_exception() was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
LOGGER.critical('Future %s in unexpected state: %s',
id(self.future),
self.future._state)
raise RuntimeError('Future in unexpected state')
def set_result(self, result):
"""Sets the return value of work associated with the future.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks()
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors."""
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and returns
a Future instance representing the execution of the callable.
Returns:
A Future representing the given call.
"""
raise NotImplementedError()
def map(self, fn, *iterables, timeout=None):
"""Returns a iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
if timeout is not None:
end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in zip(*iterables)]
try:
for future in fs:
if timeout is None:
yield future.result()
else:
yield future.result(end_time - time.time())
finally:
for future in fs:
future.cancel()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the Executor.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Args:
wait: If True then shutdown will not return until all running
futures have finished executing and the resources used by the
executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
|
cnsoft/kbengine-cocos2dx
|
kbe/src/lib/python/Lib/concurrent/futures/_base.py
|
Python
|
lgpl-3.0
| 19,316
|
[
"Brian"
] |
3db5a8d7353bf624187a623f78e60f5dc947410e31c8bc82a22023570052776a
|
# Orca
#
# Copyright 2006-2008 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Provides support for a flat review find."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2008 Sun Microsystems Inc."
__license__ = "LGPL"
import copy
import re
import debug
import flat_review
import orca_state
from orca_i18n import _ # for gettext support
class SearchQuery:
"""Represents a search that the user wants to perform."""
def __init__(self):
"""Creates a new SearchQuery. A searchQuery has the following
properties:
searchString - the string to find
searchBackwards - if true, search upward for matches
caseSensitive - if true, case counts
matchEntireWord - if true, only match on the entire string
startAtTop - if true, begin the search from the top of
the window, rather than at the current
location
windowWrap - if true, when the top/bottom edge of the
window is reached wrap to the bottom/top
and continue searching
"""
self.searchString = ""
self.searchBackwards = False
self.caseSensitive = False
self.matchEntireWord = False
self.windowWrap = False
self.startAtTop = False
self.debugLevel = debug.LEVEL_FINEST
def debugContext(self, context, string):
"""Prints out the context and the string to find to debug.out"""
debug.println(self.debugLevel, \
"------------------------------------------------------------")
debug.println(self.debugLevel, \
"findQuery: %s line=%d zone=%d word=%d char=%d" \
% (string, context.lineIndex, context.zoneIndex, \
context.wordIndex, context.charIndex))
debug.println(self.debugLevel, \
"Number of lines: %d" % len(context.lines))
debug.println(self.debugLevel, \
"Number of zones in current line: %d" % \
len(context.lines[context.lineIndex].zones))
debug.println(self.debugLevel, \
"Number of words in current zone: %d" % \
len(context.lines[context.lineIndex].zones[context.zoneIndex].words))
debug.println(self.debugLevel, \
"==========================================================\n\n")
def dumpContext(self, context):
"""Debug utility which prints out the context."""
print "DUMP"
for i in range(0, len(context.lines)):
print " Line %d" % i
for j in range(0, len(context.lines[i].zones)):
print " Zone: %d" % j
for k in range(0, len(context.lines[i].zones[j].words)):
print " Word %d = `%s` len(word): %d" % \
(k, context.lines[i].zones[j].words[k].string, \
len(context.lines[i].zones[j].words[k].string))
def findQuery(self, context, justEnteredFlatReview):
"""Performs a search on the string specified in searchQuery.
Arguments:
- context: The context from active script
- justEnteredFlatReview: If true, we began the search in focus
tracking mode.
Returns:
- The context of the match, if found
"""
# Get the starting context so that we can restore it at the end.
#
originalLineIndex = context.lineIndex
originalZoneIndex = context.zoneIndex
originalWordIndex = context.wordIndex
originalCharIndex = context.charIndex
debug.println(self.debugLevel, \
"findQuery: original context line=%d zone=%d word=%d char=%d" \
% (originalLineIndex, originalZoneIndex, \
originalWordIndex, originalCharIndex))
# self.dumpContext(context)
flags = re.LOCALE
if not self.caseSensitive:
flags = flags | re.IGNORECASE
if self.matchEntireWord:
regexp = "\\b" + self.searchString + "\\b"
else:
regexp = self.searchString
pattern = re.compile(regexp, flags)
debug.println(self.debugLevel, \
"findQuery: startAtTop: %d regexp: `%s`" \
% (self.startAtTop, regexp))
if self.startAtTop:
context.goBegin(flat_review.Context.WINDOW)
self.debugContext(context, "go begin")
location = None
found = False
wrappedYet = False
doneWithLine = False
while not found:
# Check the current line for the string.
#
[currentLine, x, y, width, height] = \
context.getCurrent(flat_review.Context.LINE)
debug.println(self.debugLevel, \
"findQuery: current line=`%s` x=%d y=%d width=%d height=%d" \
% (currentLine, x, y, width, height))
if re.search(pattern, currentLine) and not doneWithLine:
# It's on this line. Check the current zone for the string.
#
while not found:
[currentZone, x, y, width, height] = \
context.getCurrent(flat_review.Context.ZONE)
debug.println(self.debugLevel, \
"findQuery: current zone=`%s` x=%d y=%d " % \
(currentZone, x, y))
debug.println(self.debugLevel, \
"width=%d height=%d" % (width, height))
if re.search(pattern, currentZone):
# It's in this zone at least once.
#
theZone = context.lines[context.lineIndex] \
.zones[context.zoneIndex]
startedInThisZone = \
(originalLineIndex == context.lineIndex) and \
(originalZoneIndex == context.zoneIndex)
try:
theZone.accessible.queryText()
except:
pass
else:
# Make a list of the character offsets for the
# matches in this zone.
#
allMatches = re.finditer(pattern, currentZone)
offsets = []
for m in allMatches:
offsets.append(m.start(0))
if self.searchBackwards:
offsets.reverse()
i = 0
while not found and (i < len(offsets)):
[nextInstance, offset] = \
theZone.getWordAtOffset(offsets[i])
if nextInstance:
offsetDiff = \
nextInstance.index - context.wordIndex
if self.searchBackwards \
and (offsetDiff < 0) \
or (not self.searchBackwards \
and offsetDiff > 0):
context.wordIndex = nextInstance.index
context.charIndex = 0
found = True
elif not offsetDiff and \
(not startedInThisZone or \
justEnteredFlatReview):
# We landed on a match by happenstance.
# This can occur when the nextInstance
# is the first thing we come across.
#
found = True
else:
i += 1
else:
break
if not found:
# Locate the next zone to try again.
#
if self.searchBackwards:
moved = context.goPrevious( \
flat_review.Context.ZONE, \
flat_review.Context.WRAP_LINE)
self.debugContext(context, "[1] go previous")
context.goEnd(flat_review.Context.ZONE)
self.debugContext(context, "[1] go end")
else:
moved = context.goNext( \
flat_review.Context.ZONE, \
flat_review.Context.WRAP_LINE)
self.debugContext(context, "[1] go next")
if not moved:
doneWithLine = True
break
else:
# Locate the next line to try again.
#
if self.searchBackwards:
moved = context.goPrevious(flat_review.Context.LINE, \
flat_review.Context.WRAP_LINE)
self.debugContext(context, "[2] go previous")
else:
moved = context.goNext(flat_review.Context.LINE, \
flat_review.Context.WRAP_LINE)
self.debugContext(context, "[2] go next")
if moved:
if self.searchBackwards:
moved = context.goEnd(flat_review.Context.LINE)
self.debugContext(context, "[2] go end")
else:
# Then we're at the screen's edge.
#
if self.windowWrap and not wrappedYet:
script = orca_state.activeScript
doneWithLine = False
wrappedYet = True
if self.searchBackwards:
# Translators: the Orca "Find" dialog
# allows a user to search for text in a
# window and then move focus to that text.
# For example, they may want to find the
# "OK" button. This message indicates
# that a find operation in the reverse
# direction is wrapping from the top of
# the window down to the bottom.
#
script.presentMessage(_("Wrapping to Bottom"))
moved = context.goPrevious( \
flat_review.Context.LINE, \
flat_review.Context.WRAP_ALL)
self.debugContext(context, "[3] go previous")
else:
# Translators: the Orca "Find" dialog
# allows a user to search for text in a
# window and then move focus to that text.
# For example, they may want to find the
# "OK" button. This message indicates
# that a find operation in the forward
# direction is wrapping from the bottom of
# the window up to the top.
#
script.presentMessage(_("Wrapping to Top"))
moved = context.goNext( \
flat_review.Context.LINE, \
flat_review.Context.WRAP_ALL)
self.debugContext(context, "[3] go next")
if not moved:
debug.println(self.debugLevel, \
"findQuery: cannot wrap")
break
else:
break
if found:
location = copy.copy(context)
self.debugContext(context, "before setting original")
context.setCurrent(originalLineIndex, originalZoneIndex, \
originalWordIndex, originalCharIndex)
self.debugContext(context, "after setting original")
if location:
debug.println(self.debugLevel, \
"findQuery: returning line=%d zone=%d word=%d char=%d" \
% (location.lineIndex, location.zoneIndex, \
location.wordIndex, location.charIndex))
return location
def getLastQuery():
"""Grabs the last search query performed from orca_state.
Returns:
- A copy of the last search query, if it exists
"""
lastQuery = copy.copy(orca_state.searchQuery)
return lastQuery
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/orca/find.py
|
Python
|
gpl-3.0
| 14,205
|
[
"ORCA"
] |
43a5e938d423a4cd5eb3454bbda069325c5bdf887fa2669f834dd128e0704fd6
|
"""
=========================================
Density Estimation for a Gaussian mixture
=========================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0.0, -0.7], [3.5, 0.7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GaussianMixture(n_components=2, covariance_type="full")
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)
Z = Z.reshape(X.shape)
CS = plt.contour(
X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0), levels=np.logspace(0, 3, 10)
)
CB = plt.colorbar(CS, shrink=0.8, extend="both")
plt.scatter(X_train[:, 0], X_train[:, 1], 0.8)
plt.title("Negative log-likelihood predicted by a GMM")
plt.axis("tight")
plt.show()
|
manhhomienbienthuy/scikit-learn
|
examples/mixture/plot_gmm_pdf.py
|
Python
|
bsd-3-clause
| 1,518
|
[
"Gaussian"
] |
0c634ac657be44919216b8f244834615417a6256885539802c1f1f3994c0b43a
|
"""quodsite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin, auth
from django.views import defaults as default_views
from django.views.generic import TemplateView
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
from wagtail.wagtailcore import urls as wagtail_urls
urlpatterns = static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) + [
# url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
# url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
url(settings.ADMIN_URL, include(admin.site.urls)), # default=r'^admin/'
url('^auth/', include('django.contrib.auth.urls')), # Removed namespace="users" because causes more issues
url(r'^cms/', include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'', include(wagtail_urls)),
]
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns = [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
] + urlpatterns
|
ouh-churchill/quod
|
config/urls.py
|
Python
|
mit
| 2,234
|
[
"VisIt"
] |
1b6508fdfcd60c56d78e3bdcd91046ecb903fcc5213cc29cb0f402bc837da179
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# This test checks netCDF reader. It uses the COARDS convention.
renWin = vtk.vtkRenderWindow()
renWin.SetSize(400,400)
#############################################################################
# Case 1: Image type.
# Open the file.
reader_image = vtk.vtkNetCDFCFReader()
reader_image.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/tos_O1_2001-2002.nc")
reader_image.SetOutputTypeToImage()
# Set the arrays we want to load.
reader_image.UpdateMetaData()
reader_image.SetVariableArrayStatus("tos",1)
reader_image.SphericalCoordinatesOff()
aa_image = vtk.vtkAssignAttribute()
aa_image.SetInputConnection(reader_image.GetOutputPort())
aa_image.Assign("tos","SCALARS","POINT_DATA")
thresh_image = vtk.vtkThreshold()
thresh_image.SetInputConnection(aa_image.GetOutputPort())
thresh_image.ThresholdByLower(10000)
surface_image = vtk.vtkDataSetSurfaceFilter()
surface_image.SetInputConnection(thresh_image.GetOutputPort())
mapper_image = vtk.vtkPolyDataMapper()
mapper_image.SetInputConnection(surface_image.GetOutputPort())
mapper_image.SetScalarRange(270,310)
actor_image = vtk.vtkActor()
actor_image.SetMapper(mapper_image)
ren_image = vtk.vtkRenderer()
ren_image.AddActor(actor_image)
ren_image.SetViewport(0.0,0.0,0.5,0.5)
renWin.AddRenderer(ren_image)
#############################################################################
# Case 2: Rectilinear type.
# Open the file.
reader_rect = vtk.vtkNetCDFCFReader()
reader_rect.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/tos_O1_2001-2002.nc")
reader_rect.SetOutputTypeToRectilinear()
# Set the arrays we want to load.
reader_rect.UpdateMetaData()
reader_rect.SetVariableArrayStatus("tos",1)
reader_rect.SphericalCoordinatesOff()
aa_rect = vtk.vtkAssignAttribute()
aa_rect.SetInputConnection(reader_rect.GetOutputPort())
aa_rect.Assign("tos","SCALARS","POINT_DATA")
thresh_rect = vtk.vtkThreshold()
thresh_rect.SetInputConnection(aa_rect.GetOutputPort())
thresh_rect.ThresholdByLower(10000)
surface_rect = vtk.vtkDataSetSurfaceFilter()
surface_rect.SetInputConnection(thresh_rect.GetOutputPort())
mapper_rect = vtk.vtkPolyDataMapper()
mapper_rect.SetInputConnection(surface_rect.GetOutputPort())
mapper_rect.SetScalarRange(270,310)
actor_rect = vtk.vtkActor()
actor_rect.SetMapper(mapper_rect)
ren_rect = vtk.vtkRenderer()
ren_rect.AddActor(actor_rect)
ren_rect.SetViewport(0.5,0.0,1.0,0.5)
renWin.AddRenderer(ren_rect)
#############################################################################
# Case 3: Structured type.
# Open the file.
reader_struct = vtk.vtkNetCDFCFReader()
reader_struct.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/tos_O1_2001-2002.nc")
reader_struct.SetOutputTypeToStructured()
# Set the arrays we want to load.
reader_struct.UpdateMetaData()
reader_struct.SetVariableArrayStatus("tos",1)
reader_struct.SphericalCoordinatesOff()
aa_struct = vtk.vtkAssignAttribute()
aa_struct.SetInputConnection(reader_struct.GetOutputPort())
aa_struct.Assign("tos","SCALARS","POINT_DATA")
thresh_struct = vtk.vtkThreshold()
thresh_struct.SetInputConnection(aa_struct.GetOutputPort())
thresh_struct.ThresholdByLower(10000)
surface_struct = vtk.vtkDataSetSurfaceFilter()
surface_struct.SetInputConnection(thresh_struct.GetOutputPort())
mapper_struct = vtk.vtkPolyDataMapper()
mapper_struct.SetInputConnection(surface_struct.GetOutputPort())
mapper_struct.SetScalarRange(270,310)
actor_struct = vtk.vtkActor()
actor_struct.SetMapper(mapper_struct)
ren_struct = vtk.vtkRenderer()
ren_struct.AddActor(actor_struct)
ren_struct.SetViewport(0.0,0.5,0.5,1.0)
renWin.AddRenderer(ren_struct)
#############################################################################
# Case 4: Unstructured type.
# Open the file.
reader_auto = vtk.vtkNetCDFCFReader()
reader_auto.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/tos_O1_2001-2002.nc")
reader_auto.SetOutputTypeToUnstructured()
# Set the arrays we want to load.
reader_auto.UpdateMetaData()
reader_auto.SetVariableArrayStatus("tos",1)
reader_auto.SphericalCoordinatesOff()
aa_auto = vtk.vtkAssignAttribute()
aa_auto.SetInputConnection(reader_auto.GetOutputPort())
aa_auto.Assign("tos","SCALARS","POINT_DATA")
thresh_auto = vtk.vtkThreshold()
thresh_auto.SetInputConnection(aa_auto.GetOutputPort())
thresh_auto.ThresholdByLower(10000)
surface_auto = vtk.vtkDataSetSurfaceFilter()
surface_auto.SetInputConnection(thresh_auto.GetOutputPort())
mapper_auto = vtk.vtkPolyDataMapper()
mapper_auto.SetInputConnection(surface_auto.GetOutputPort())
mapper_auto.SetScalarRange(270,310)
actor_auto = vtk.vtkActor()
actor_auto.SetMapper(mapper_auto)
ren_auto = vtk.vtkRenderer()
ren_auto.AddActor(actor_auto)
ren_auto.SetViewport(0.5,0.5,1.0,1.0)
renWin.AddRenderer(ren_auto)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renWin.Render()
# # Setup a lookup table.
# vtkLookupTable lut
# lut SetTableRange 270 310
# lut SetHueRange 0.66 0.0
# lut SetRampToLinear
# # Make pretty colors
# vtkImageMapToColors map
# map SetInputConnection [asinine GetOutputPort]
# map SetLookupTable lut
# map SetOutputFormatToRGB
# # vtkImageViewer viewer
# # viewer SetInputConnection [map GetOutputPort]
# # viewer SetColorWindow 256
# # viewer SetColorLevel 127.5
# # viewer Render
# vtkImageViewer2 viewer
# viewer SetInputConnection [map GetOutputPort]
# viewer Render
# --- end of script --
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/IO/NetCDF/Testing/Python/NetCDFCFSetOutputType.py
|
Python
|
gpl-3.0
| 5,422
|
[
"NetCDF",
"VTK"
] |
aaf52edba26445a60d271fec5c0ee6410248c4ed23342365963026bd4a338a1d
|
import os
from tornado.web import StaticFileHandler, HTTPError
from DIRAC import rootPath
class StaticHandler(StaticFileHandler):
def initialize(self, pathList, default_filename=None):
# pathList: ['/opt/dirac/pro/WebAppExt/WebApp/static', ...]
self.pathList = [os.path.abspath(path) + os.path.sep for path in pathList]
self.default_filename = default_filename
self.root = rootPath
def parse_url_path(self, url_path):
if os.path.sep != "/":
url_path = url_path.replace("/", os.path.sep)
for possiblePath in self.pathList:
possiblePath = os.path.join(possiblePath, url_path)
if self.default_filename and os.path.isdir(possiblePath):
possiblePath = os.path.join(possiblePath, self.default_filename)
if os.path.isfile(possiblePath):
return possiblePath
raise HTTPError(404)
|
DIRACGrid/WebAppDIRAC
|
src/WebAppDIRAC/Core/StaticHandler.py
|
Python
|
gpl-3.0
| 920
|
[
"DIRAC"
] |
271e3d0790185f3e2c66e083ad1f7020cff02b23608d7fcd6c2ecf4aa484ea0a
|
import pickle
import signal_processing as sig_proc
import numpy as np
import matplotlib.pyplot as plt
import math
from scipy import stats
import copy
#step phase analysis for each neuron and global
dir_name = '../data/r448/r448_131022_rH/'
img_ext = '.eps'
save_img = True
show = True
trials = [2, 5, 6, 7]
#signal filtering parameter
low_cut = 3e2
high_cut = 3e3
sp = sig_proc.Signal_processing(save_img, show, img_ext)
global_snr = []
dir_name = '../data/r415/'
base_name = 'r415_'
record_name = ['130926', '131008', '131009', '131011', '131016', '131017', '131018', '131021', '131023', '131025',
'131030', '131101', '131118', '131129']
print('### spikes load ###')
with open(dir_name + 'data_processed', 'rb') as my_file:
record_data = pickle.load(my_file)
# record_data[trial] = {'spikes_values': all_chan_spikes_values,
# 'spikes_time': all_chan_spikes_times,
# 'spikes_classes': all_chan_spikes_classes,
# 'clusters': all_chan_clusters,
# 'length_signal': signal.shape[1],
# 'fs': fs }
for record in record_name:
signal = sp.load_m(dir_name + 'r415_'+record+'.mat', 'd')
fs = float(sp.load_m(dir_name + 'fech.mat', 'sampFreq'))
signal_noise_ratio_r415 = []
fsignal = sp.signal_mc_filtering(signal, low_cut, high_cut, fs)
for chan in range(len(record_data[record]['clusters'])):
sig_mean = np.array(fsignal[chan]).mean()
sig_std = np.array(fsignal[chan]).std()
min_sig = sig_mean-2*sig_std
max_sig = sig_mean+2*sig_std
for cluster in record_data[record]['clusters'][chan]:
if np.array(cluster.spikes_values).shape[0]>0:
max_spike = np.array(cluster.spikes_values).max(1).mean()
min_spike = np.array(cluster.spikes_values).min(1).mean()
signal_noise_ratio_r415.append((max_spike-min_spike)/(max_sig-min_sig))
else:
signal_noise_ratio_r415.append(0)
global_snr.append(copy.copy(signal_noise_ratio_r415))
plt.figure()
plt.boxplot(global_snr)
plt.plot(np.array(global_snr).mean(1))
if save_img:
plt.savefig('box_plot_snr_r415'+img_ext, bbox_inches='tight')
if show:
plt.show()
else:
plt.close()
|
scauglog/brain_record_toolbox
|
script_r415_snr_evolution.py
|
Python
|
mit
| 2,300
|
[
"NEURON"
] |
d526a6b1963a84529346cd5208dab000ebde9ec10c07e668d2f78a1e11980cf8
|
"""
Simulation-generated data can provide an external criterion to validate
clustering methods. This module contains a set of command-line tools for
performing simulations, clustering their output, and producing analaysis
reports.
"""
import random
import os
import sys
import string
import operator
import logging
import json
from collections import OrderedDict
from itertools import izip, cycle
from lsh_hdc import Shingler, HASH_FUNC_TABLE
from lsh_hdc.cluster import MinHashCluster as Cluster
from lsh_hdc.utils import random_string, get_df_subset
from pymaptools.iter import intersperse, isiterable
from pymaptools.io import GzipFileType, read_json_lines, ndjson2col, \
PathArgumentParser, write_json_line
from lsh_hdc.monte_carlo import utils
from pymaptools.sample import discrete_sample
from pymaptools.benchmark import PMTimer
ALPHABET = string.letters + string.digits
def gauss_uint(mu, sigma):
"""Draw a positive integer from Gaussian distribution
:param mu: mean
:param sigma: std. dev
:return: positive integer drawn from Gaussian distribution
:rtype: int
"""
return abs(int(random.gauss(mu, sigma)))
def gauss_uint_threshold(threshold=1, **kwargs):
result = -1
while result < threshold:
result = gauss_uint(**kwargs)
return result
class MarkovChainGenerator(object):
def __init__(self, alphabet=ALPHABET):
self.alphabet = alphabet
self.chain = MarkovChainGenerator.get_markov_chain(alphabet)
def generate(self, start, length):
"""Generate a sequence according to a Markov chain"""
for _ in xrange(length):
prob_dist = self.chain[start]
start = discrete_sample(prob_dist)
yield start
def generate_str(self, start, length):
"""Generate a string according to a Markov chain"""
return ''.join(self.generate(start, length))
@staticmethod
def get_markov_chain(alphabet):
"""
:param alphabet: letters to use
:type alphabet: str
:return: transition probabilities
:rtype: dict
"""
l = len(alphabet)
markov_chain = dict()
second = operator.itemgetter(1)
for from_letter in alphabet:
slice_points = sorted([0] + [random.random() for _ in xrange(l - 1)] + [1])
transition_probabilities = \
[slice_points[i + 1] - slice_points[i] for i in xrange(l)]
letter_probs = sorted(izip(alphabet, transition_probabilities),
key=second, reverse=True)
markov_chain[from_letter] = OrderedDict(letter_probs)
return markov_chain
class MarkovChainMutator(object):
delimiter = '-'
def __init__(self, p_err=0.1, alphabet=ALPHABET):
self.alphabet = alphabet
self.chain = MarkovChainMutator.get_markov_chain(alphabet + self.delimiter, p_err=p_err)
@staticmethod
def get_markov_chain(alphabet, p_err=0.2):
"""
:param p_err: probability of an error
:type p_err: float
:param alphabet: letters to use
:type alphabet: str
:return: transition probabilities
:rtype: dict
"""
markov_chain = dict()
alpha_set = set(alphabet)
l = len(alpha_set)
for from_letter in alpha_set:
slice_points = sorted([0] + [random.uniform(0, p_err) for _ in xrange(l - 2)]) + [p_err]
transition_prob = \
[slice_points[idx + 1] - slice_points[idx] for idx in xrange(l - 1)] + [1.0 - p_err]
markov_chain[from_letter] = \
dict(izip(list(alpha_set - {from_letter}) + [from_letter], transition_prob))
return markov_chain
def mutate(self, seq):
"""
:param seq: sequence
:type seq: str
:returns: mutated sequence
:rtype: str
"""
delimiter = self.delimiter
doc_list = list(intersperse(delimiter, seq)) + [delimiter]
mutation_site = random.randint(0, len(doc_list) - 1)
from_letter = doc_list[mutation_site]
prob_dist = self.chain[from_letter]
to_letter = discrete_sample(prob_dist)
doc_list[mutation_site] = to_letter
return ''.join(el for el in doc_list if el != delimiter)
def perform_simulation(args):
doc_len_mean = args.doc_len_mean
doc_len_sigma = args.doc_len_sigma
c_size_mean = args.c_size_mean
c_size_sigma = args.c_size_sigma
doc_len_min = args.doc_len_min
pos_count = 0
mcg = MarkovChainGenerator()
mcm = MarkovChainMutator(p_err=args.p_err)
data = []
stats = dict()
# pick first letter at random
start = random_string(length=1, alphabet=mcg.alphabet)
positive_ratio = args.pos_ratio
cluster_size = args.cluster_size
simulation_size = args.sim_size
if cluster_size is None:
# generate some cluster sizes until we approximately reach pos_ratio
current_pos = 0
expected_pos = positive_ratio * simulation_size
cluster_sizes = []
num_clusters = 0
while current_pos < expected_pos:
cluster_size = gauss_uint_threshold(
threshold=2, mu=c_size_mean, sigma=c_size_sigma)
cluster_sizes.append(cluster_size)
current_pos += cluster_size
num_clusters += 1
logging.info("Creating %d variable-length clusters", num_clusters)
else:
# calculate from simulation size
stats['cluster_size'] = cluster_size
num_clusters = int(simulation_size * positive_ratio / float(cluster_size))
cluster_sizes = [cluster_size] * num_clusters
logging.info("Creating %d clusters of size %d", num_clusters, cluster_size)
stats['num_clusters'] = num_clusters
for c_id, cluster_size in enumerate(cluster_sizes):
doc_length = gauss_uint_threshold(
threshold=doc_len_min, mu=doc_len_mean, sigma=doc_len_sigma)
master = mcg.generate_str(start, doc_length)
if len(master) > 0:
start = master[-1]
for doc_id in xrange(cluster_size):
data.append(("{}:{}".format(c_id + 1, doc_id), mcm.mutate(master)))
pos_count += 1
stats['num_positives'] = pos_count
num_negatives = max(0, simulation_size - pos_count)
for neg_idx in xrange(num_negatives):
doc_length = gauss_uint_threshold(
threshold=doc_len_min, mu=doc_len_mean, sigma=doc_len_sigma)
master = mcg.generate_str(start, doc_length)
if len(master) > 0:
start = master[-1]
data.append(("{}".format(neg_idx), master))
logging.info("Positives: %d, Negatives: %d", pos_count, num_negatives)
stats['num_negatives'] = num_negatives
random.shuffle(data)
return data, stats
def get_clusters(args, data):
cluster = Cluster(width=args.width,
bandwidth=args.bandwidth,
lsh_scheme=args.lsh_scheme,
kmin=args.kmin,
hashfun=args.hashfun)
shingler = Shingler(
span=args.shingle_span,
skip=args.shingle_skip,
kmin=args.shingle_kmin,
unique=bool(args.shingle_uniq)
)
content_dict = dict()
for label, text in data:
content_dict[label] = text
shingles = shingler.get_shingles(text)
cluster.add_item(shingles, label)
return cluster.get_clusters()
def load_simulation(args):
def iter_simulation(sim_iter):
for line in sim_iter:
label, text = line.split(" ")
yield (label, text.strip())
iterator = args.input
namespace = json.loads(iterator.next())
return namespace, iter_simulation(iterator)
def load_clustering(args):
def iter_clustering(clust_iter):
for line in clust_iter:
yield json.loads(line)
iterator = args.input
namespace = json.loads(iterator.next())
return namespace, iter_clustering(iterator)
def class_is_positive(point):
return ':' in point
def cluster_is_positive(cluster):
return len(cluster) > 1
def point_to_class_label(point_idx, point, neg_label=None):
"""Return class label given a point
"""
if class_is_positive(point):
label, _ = point.split(':')
label = int(label)
elif neg_label is None:
label = -point_idx
else:
label = neg_label
return label
def cluster_to_cluster_label(cluster_idx, cluster, neg_label=None):
"""Return cluster label given a cluster
"""
if cluster_is_positive(cluster):
label = cluster_idx
elif neg_label is None:
label = -cluster_idx
else:
label = neg_label
return label
def clusters_to_labels(cluster_iter, double_negs=False, join_negs=True):
"""
:param double_negs: whether to exclude double negatives
:param join_negs: if set to true, both negative classes and negative
clusters are labeled with zero
Default behavior:
>>> clusters = [["5:6", "8", "5:1", "5:3", "7"], ["76"], ["69"]]
>>> clusters_to_labels(clusters, double_negs=False, join_negs=True)
([5, 0, 5, 5, 0], [1, 1, 1, 1, 1])
Other behaviors:
>>> clusters_to_labels(clusters, double_negs=True, join_negs=True)
([5, 0, 5, 5, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0])
>>> clusters_to_labels(clusters, double_negs=True, join_negs=False)
([5, -2, 5, 5, -5, -6, -7], [1, 1, 1, 1, 1, -2, -3])
>>> clusters_to_labels(clusters, double_negs=False, join_negs=False)
([5, -2, 5, 5, -5], [1, 1, 1, 1, 1])
"""
labels_true = []
labels_pred = []
neg_label = 0 if join_negs else None
point_idx = 1
for cluster_idx, cluster in enumerate(cluster_iter, start=1):
cluster_label = cluster_to_cluster_label(cluster_idx, cluster, neg_label=neg_label)
for point in cluster:
# Both negative classes and negative clusters are labeled with
# either a zero or a negative cluster index.
class_label = point_to_class_label(point_idx, point, neg_label=neg_label)
if double_negs or (class_label > 0 or cluster_label > 0):
labels_true.append(class_label)
labels_pred.append(cluster_label)
point_idx += 1
return labels_true, labels_pred
def do_simulation(args):
if args.seed is not None:
random.seed(args.seed)
data, stats = perform_simulation(args)
namespace = utils.serialize_args(args)
namespace.update(stats)
output = args.output
write_json_line(output, namespace)
for i, seq in data:
output.write("%s %s\n" % (i, seq))
LEGEND_METRIC_KWARGS = {
'time_wall': dict(loc='upper left'),
'time_cpu': dict(loc='upper left'),
}
def append_scores(cm, pairs, metrics):
for metric in metrics:
try:
scores = cm.get_score(metric)
except AttributeError:
logging.warn("Method %s not defined", metric)
continue
else:
if isiterable(scores):
for idx, score in enumerate(scores):
pairs.append(("%s-%d" % (metric, idx), score))
else:
pairs.append((metric, scores))
def add_incidence_metrics(args, clusters, pairs):
"""Add metrics based on incidence matrix of classes and clusters
"""
args_metrics = args.metrics
if set(utils.INCIDENCE_METRICS) & set(args_metrics):
from lsh_hdc.metrics import ClusteringMetrics
labels = clusters_to_labels(
clusters,
double_negs=bool(args.double_negs),
join_negs=bool(args.join_negs)
)
cm = ClusteringMetrics.from_labels(*labels)
pairwise_metrics = set(utils.PAIRWISE_METRICS) & set(args_metrics)
append_scores(cm, pairs, pairwise_metrics)
contingency_metrics = set(utils.CONTINGENCY_METRICS) & set(args_metrics)
append_scores(cm, pairs, contingency_metrics)
def add_ranking_metrics(args, clusters, pairs):
"""Add metrics based on ROC and Lift curves
"""
args_metrics = utils.METRICS
if set(utils.ROC_METRICS) & set(args_metrics):
from lsh_hdc.ranking import RocCurve
rc = RocCurve.from_clusters(clusters, is_class_pos=class_is_positive)
if 'roc_auc' in args_metrics:
pairs.append(('roc_auc', rc.auc_score()))
if 'roc_max_info' in args_metrics:
pairs.append(('roc_max_info', rc.max_informedness()))
if set(utils.LIFT_METRICS) & set(args_metrics):
from lsh_hdc.ranking import aul_score_from_clusters as aul_score
clusters_2xc = ([class_is_positive(point) for point in cluster]
for cluster in clusters)
if 'aul_score' in args_metrics:
pairs.append(('aul_score', aul_score(clusters_2xc)))
def perform_clustering(args, data):
with PMTimer() as timer:
clusters = get_clusters(args, data)
return clusters, timer.to_dict()
def perform_analysis(args, clusters):
clusters = list(clusters)
pairs = []
add_ranking_metrics(args, clusters, pairs)
add_incidence_metrics(args, clusters, pairs)
return dict(pairs)
def do_cluster(args):
namespace = {}
sim_namespace, simulation = load_simulation(args)
namespace.update(sim_namespace)
clustering_results, clustering_stats = perform_clustering(args, simulation)
clustering_namespace = utils.serialize_args(args)
namespace.update(clustering_namespace)
namespace.update(clustering_stats)
write_json_line(args.output, namespace)
for cluster in clustering_results:
write_json_line(args.output, cluster)
def do_analyze(args):
namespace = {}
clustering_namespace, clustering = load_clustering(args)
namespace.update(clustering_namespace)
analysis_stats = perform_analysis(args, clustering)
namespace.update(analysis_stats)
write_json_line(args.output, namespace)
def create_plots(args, df, metrics):
import matplotlib.pyplot as plt
from palettable import colorbrewer
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size('small')
groups = df.groupby([args.group_by])
palette_size = min(max(len(groups), 3), 9)
for metric in metrics:
if metric in df:
colors = cycle(colorbrewer.get_map('Set1', 'qualitative', palette_size).mpl_colors)
fig, ax = plt.subplots()
for color, (label, dfel) in izip(colors, groups):
try:
dfel.plot(
ax=ax, label=label, x=args.x_axis, linewidth='1.3',
y=metric, kind="scatter", logx=True, title=args.fig_title,
facecolors='none', edgecolors=color)
except Exception:
logging.exception("Exception caught plotting %s:%s", metric, label)
fig_filename = "fig_%s.%s" % (metric, args.fig_format)
fig_path = os.path.join(args.output, fig_filename)
ax.legend(prop=fontP, **LEGEND_METRIC_KWARGS.get(metric, {'loc': 'lower right'}))
fig.savefig(fig_path)
plt.close(fig)
def do_mapper(args):
if args.seed is not None:
random.seed(args.seed)
namespace = utils.serialize_args(args)
simulation, simulation_stats = perform_simulation(args)
namespace.update(simulation_stats)
clustering, clustering_stats = perform_clustering(args, simulation)
namespace.update(clustering_stats)
analysis_stats = perform_analysis(args, clustering)
namespace.update(analysis_stats)
args.output.write("%s\n" % json.dumps(namespace))
def do_reducer(args):
import pandas as pd
obj = ndjson2col(read_json_lines(args.input))
df = pd.DataFrame.from_dict(obj)
subset = get_df_subset(
df, [args.group_by, args.x_axis, args.trial] + args.metrics)
csv_path = os.path.join(args.output, "summary.csv")
logging.info("Writing brief summary to %s", csv_path)
subset.to_csv(csv_path)
create_plots(args, subset, args.metrics)
def add_simul_args(p_simul):
p_simul.add_argument(
'--seed', type=int, default=None,
help='Random number generator seed for reproducibility')
p_simul.add_argument(
'--sim_size', type=int, default=1000,
help='Simulation size (when number of clusters is not given)')
p_simul.add_argument(
'--cluster_size', type=int, default=None,
help='cluster size (overrides cluster mean and sigma)')
p_simul.add_argument(
'--c_size_mean', type=float, default=4,
help='Mean of cluster size')
p_simul.add_argument(
'--c_size_sigma', type=float, default=10,
help='Std. dev. of cluster size')
p_simul.add_argument(
'--pos_ratio', type=float, default=0.1,
help='ratio of positives to all')
p_simul.add_argument(
'--p_err', type=float, default=0.05,
help='Probability of error at any location in sequence')
p_simul.add_argument(
'--doc_len_min', type=int, default=3,
help='Minimum sequence length')
p_simul.add_argument(
'--doc_len_mean', type=float, default=8,
help='Mean of sequence length')
p_simul.add_argument(
'--doc_len_sigma', type=float, default=10,
help='Std. dev. of sequence length')
def add_clust_args(p_clust):
p_clust.add_argument(
'--hashfun', type=str, default='builtin',
choices=HASH_FUNC_TABLE.keys(),
help='Hash function to use')
p_clust.add_argument(
'--shingle_span', type=int, default=4,
help='shingle length (in tokens)')
p_clust.add_argument(
'--shingle_skip', type=int, default=0,
help='words to skip')
p_clust.add_argument(
'--shingle_uniq', type=int, default=1,
help='whether to unique shingles')
p_clust.add_argument(
'--shingle_kmin', type=int, default=0,
help='minimum expected shingles')
p_clust.add_argument(
'--width', type=int, default=3,
help='length of minhash feature vectors')
p_clust.add_argument(
'--bandwidth', type=int, default=3,
help='rows per band')
p_clust.add_argument(
'--kmin', type=int, default=3,
help='number of minhashes to sample')
p_clust.add_argument(
'--lsh_scheme', type=str, default="a0",
help='LSH binning scheme')
def add_analy_args(parser):
parser.add_argument(
'--group_by', type=str, default='hashfun',
help='Field to group by')
parser.add_argument(
'--x_axis', type=str, default='cluster_size',
help='Which column to plot as X axis')
parser.add_argument(
'--trial', type=str, default='seed',
help='Which column to average')
parser.add_argument(
'--double_negs', type=int, default=0,
help='exclude points that are negatives in source and clustering')
parser.add_argument(
'--join_negs', type=int, default=1,
help='label negative classes and clusters with the same label')
parser.add_argument(
'--metrics', type=str, nargs='*',
default=('roc_auc', 'matthews_corr', 'time_cpu'),
help='Which metrics to calculate')
def parse_args(args=None):
parser = PathArgumentParser(
description="Simulate data and/or run analysis")
parser.add_argument(
'--logging', type=str, default='WARN', help="Logging level",
choices=[key for key in logging._levelNames.keys() if isinstance(key, str)])
subparsers = parser.add_subparsers()
p_simul = subparsers.add_parser('simulate', help='generate simulation')
add_simul_args(p_simul)
p_simul.add_argument(
'--output', type=GzipFileType('w'), default=sys.stdout, help='File output')
p_simul.set_defaults(func=do_simulation)
p_clust = subparsers.add_parser('cluster', help='run clustering')
p_clust.add_argument(
'--input', type=GzipFileType('r'), default=sys.stdin, help='File input')
add_clust_args(p_clust)
p_clust.add_argument(
'--output', type=GzipFileType('w'), default=sys.stdout, help='File output')
p_clust.set_defaults(func=do_cluster)
p_analy = subparsers.add_parser('analyze', help='run analysis')
p_analy.add_argument(
'--input', type=GzipFileType('r'), default=sys.stdin, help='File input')
add_analy_args(p_analy)
p_analy.add_argument(
'--output', type=GzipFileType('w'), default=sys.stdout, help='File output')
p_analy.set_defaults(func=do_analyze)
p_mapper = subparsers.add_parser(
'mapper', help='Perform multiple steps')
add_simul_args(p_mapper)
add_clust_args(p_mapper)
add_analy_args(p_mapper)
p_mapper.add_argument(
'--output', type=GzipFileType('w'), default=sys.stdout, help='File output')
p_mapper.set_defaults(func=do_mapper)
p_reducer = subparsers.add_parser('reducer', help='summarize analysis results')
add_analy_args(p_reducer)
p_reducer.add_argument(
'--input', type=GzipFileType('r'), default=sys.stdin, help='File input')
p_reducer.add_argument(
'--fig_title', type=str, default=None, help='Title (for figures generated)')
p_reducer.add_argument(
'--fig_format', type=str, default='svg', help='Figure format')
p_reducer.add_argument(
'--output', type=str, metavar='DIR', help='Output directory')
p_reducer.set_defaults(func=do_reducer)
namespace = parser.parse_args()
return namespace
def run(args):
logging.basicConfig(level=getattr(logging, args.logging))
args.func(args)
if __name__ == '__main__':
run(parse_args())
|
escherba/lsh-hdc
|
lsh_hdc/monte_carlo/strings.py
|
Python
|
bsd-3-clause
| 21,732
|
[
"Gaussian"
] |
05659c5bffb44b07b2650497bf72d2a02025296e71ae8a3232b4c2a7a8a5f91e
|
__author__ = 'adeb'
import numpy as np
import theano
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv, conv3d2d
from theano.tensor.shared_randomstreams import RandomStreams
from spynet.utils.utilities import share, get_h5file_data
from spynet.models.max_pool_3d import max_pool_3d
class LayerBlock():
"""
Abstract class that represents a function from an input space to an output space.
It is the building block of a Layer object.
"""
name = None
def __init__(self):
self.params = []
def forward(self, x, batch_size, run_time):
"""Return the output of the layer block
Args:
x (theano.tensor.TensorType): input of the layer block
batch_size (int): size of the batch of data being processed by the network
run_time (boolean): equals true when the function is used at runtime and false when it is used during
training. This is useful for dropout.
Returns:
(theano.tensor.TensorType): output of the layer block
"""
raise NotImplementedError
def save_parameters(self, h5file, name):
"""
Save all parameters of the layer block in a hdf5 file.
"""
pass
def load_parameters(self, h5file, name):
"""
Load all parameters of the layer block in a hdf5 file.
"""
pass
def update_params(self):
pass
def __str__(self):
msg = "[{}] \n".format(self.name)
return msg
class LayerBlockIdentity(LayerBlock):
"""
Identity function
"""
name = "Identity Layer block"
def __init__(self):
LayerBlock.__init__(self)
def forward(self, x, batch_size, run_time):
return x
class LayerBlockNoise(LayerBlock):
"""
Noise layer block that adds a random signal on the fly
"""
def __init__(self):
LayerBlock.__init__(self)
numpy_rng = np.random.RandomState(123)
self.theano_rng = RandomStreams(numpy_rng.randint(2**30))
class LayerBlockNoiseDropoutBernoulli(LayerBlockNoise):
"""
Noise block layer that adds bernoulli noise on the fly
"""
name = "Bernoulli Layer block"
def __init__(self, bernoulli_p):
LayerBlockNoise.__init__(self)
self.bernoulli_p = bernoulli_p
def forward(self, x, batch_size, run_time):
if run_time:
return x * self.bernoulli_p
else:
return x * self.theano_rng.binomial(size=x.shape, n=1, p=self.bernoulli_p, dtype=theano.config.floatX)
class LayerBlockGaussianNoise(LayerBlockNoise):
"""
Noise block layer that adds gaussian noise on the fly
"""
name = "Gaussian noise Layer block"
def __init__(self):
LayerBlockNoise.__init__(self)
def forward(self, x, batch_size, run_time):
return x + self.theano_rng.normal(size=x.shape, avg=0, std=0.2, dtype=theano.config.floatX)
class LayerBlockMultiplication(LayerBlock):
"""
Block that multiplies the input elementwise by a vector of the same size
"""
name = "Multiplication Layer block"
def __init__(self, vec):
LayerBlock.__init__(self)
self.vec = share(vec)
def forward(self, x, batch_size, run_time):
return x * self.vec
class LayerBlockNormalization(LayerBlock):
"""
Block that normalizes the input so it sums to one
"""
name = "Normalization Layer block"
def __init__(self):
LayerBlock.__init__(self)
def forward(self, x, batch_size, run_time):
return x / theano.tensor.sum(x)
class LayerBlockOfNeurons(LayerBlock):
"""
Abstract class defining a group of neurons.
Attributes:
name (string): Name of the layer block (used for printing or writing)
w (theano shared numpy array): Weights of the layer block
b (theano shared numpy array): Biases of the layer block
params (list): [w,b]
neuron_type (NeuronType object): defines the type of the neurons of the layer block
"""
def __init__(self, neuron_type):
LayerBlock.__init__(self)
self.w = None
self.b = None
self.neuron_type = neuron_type
def init_parameters(self, w_shape, b_shape):
w_bound = self.compute_bound_parameters_virtual()
# initialize weights with random weights
self.w = share(np.asarray(
np.random.uniform(low=-w_bound, high=w_bound, size=w_shape),
dtype=theano.config.floatX), "w")
# the bias is a 1D tensor -- one bias per output feature map
b_values = 0.1 + np.zeros(b_shape, dtype=theano.config.floatX) # Slightly positive for RELU units
self.b = share(b_values, "b")
self.update_params()
def compute_bound_parameters_virtual(self):
raise NotImplementedError
def save_parameters(self, h5file, name):
h5file.create_dataset(name + "/w", data=self.w.get_value(), dtype='f')
h5file.create_dataset(name + "/b", data=self.b.get_value(), dtype='f')
def load_parameters(self, h5file, name):
self.w.set_value(get_h5file_data(h5file, name + "/w"), borrow=True)
self.b.set_value(get_h5file_data(h5file, name + "/b"), borrow=True)
def update_params(self):
self.params = [self.w, self.b]
def __str__(self):
msg = "[{}] with [{}] \n".format(self.name, self.neuron_type)
msg += self.print_virtual()
n_parameters = 0
for p in self.params:
n_parameters += p.get_value().size
msg += "Number of parameters: {} \n".format(n_parameters)
return msg
def print_virtual(self):
return ""
class LayerBlockFullyConnected(LayerBlockOfNeurons):
"""
Layer block in which each input is connected to all the block neurons
"""
name = "Fully connected layer block"
def __init__(self, neuron_type, n_in, n_out):
LayerBlockOfNeurons.__init__(self, neuron_type)
self.n_in = n_in
self.n_out = n_out
self.init_parameters((self.n_in, self.n_out), (self.n_out,))
def compute_bound_parameters_virtual(self):
return np.sqrt(6. / (self.n_in + self.n_out))
def set_w(self, new_w):
self.w.set_value(new_w, borrow=True)
self.n_in, self.n_out = new_w.shape
def forward(self, x, batch_size, run_time):
return self.neuron_type.activation_function(theano.tensor.dot(x, self.w) + self.b)
def print_virtual(self):
return "Number of inputs: {} \nNumber of outputs: {}\n".format(self.n_in, self.n_out)
class LayerBlockConv2DAbstract(LayerBlockOfNeurons):
"""
Abstract class defining common components of LayerConv2D and LayerConvPool2D
"""
def __init__(self, neuron_type, in_shape, flt_shape):
"""
Args:
in_shape (tuple or list of length 3):
(num input feature maps, image height, image width)
flt_shape (tuple or list of length 4):
(number of filters, num input feature maps, filter height, filter width)
"""
LayerBlockOfNeurons.__init__(self, neuron_type)
self.in_shape = in_shape
self.filter_shape = flt_shape
if in_shape[0] != flt_shape[1]:
raise Exception("The number of feature maps is not consistent")
self.init_parameters(flt_shape, (flt_shape[0],))
def forward(self, x, batch_size, run_time):
img_batch_shape = (batch_size,) + self.in_shape
x = x.reshape(img_batch_shape)
# Convolve input feature maps with filters
conv_out = conv.conv2d(input=x,
filters=self.w,
image_shape=img_batch_shape,
filter_shape=self.filter_shape)
return self.forward_virtual(conv_out)
def forward_virtual(self, conv_out):
raise NotImplementedError
def print_virtual(self):
return "Image shape: {}\nFilter shape: {}\n".format(self.in_shape, self.filter_shape)
class LayerBlockConv2D(LayerBlockConv2DAbstract):
"""
2D convolutional layer block
"""
name = "2D convolutional layer block"
def __init__(self, neuron_type, in_shape, flt_shape):
LayerBlockConv2DAbstract.__init__(self, neuron_type, in_shape, flt_shape)
def compute_bound_parameters_virtual(self):
fan_in = np.prod(self.filter_shape[1:])
fan_out = self.filter_shape[0] * np.prod(self.filter_shape[2:])
return np.sqrt(6. / (fan_in + fan_out))
def forward_virtual(self, conv_out):
return self.neuron_type.activation_function(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')).flatten(2)
class LayerBlockConvPool2D(LayerBlockConv2DAbstract):
"""
2D convolutional layer + pooling layer. The reason for not having a separate pooling layer is that the combination
of the two layer blocks can be optimized.
"""
name = "2D convolutional + pooling layer"
def __init__(self, neuron_type, in_shape, flt_shape, poolsize=(2, 2)):
self.poolsize = poolsize
LayerBlockConv2DAbstract.__init__(self, neuron_type, in_shape, flt_shape)
def compute_bound_parameters_virtual(self):
fan_in = np.prod(self.filter_shape[1:])
fan_out = (self.filter_shape[0] * np.prod(self.filter_shape[2:]) / np.prod(self.poolsize))
return np.sqrt(6. / (fan_in + fan_out))
def forward_virtual(self, conv_out):
# Downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(input=conv_out,
ds=self.poolsize,
ignore_border=True)
return self.neuron_type.activation_function(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')).flatten(2)
def print_virtual(self):
return LayerBlockConv2DAbstract.print_virtual(self) + "Pool size: {}\n".format(self.poolsize)
class LayerBlockConvPool3D(LayerBlockOfNeurons):
"""
3D convolutional layer block + pooling layer block
"""
name = "3D convolutional + pooling layer block"
def __init__(self, neuron_type, in_channels, in_shape, flt_channels, flt_shape, poolsize):
"""
Args:
in_channels (int): number of input channels
in_shape (tuple of length 3): shape of the input (in_width, in_height, in_depth)
flt_channels (int):
flt_shape (tuple of length 3): shape of the filters (flt_depth, flt_height, flt_width)
poolsize (tuple of length 3): window of the pooling operation
"""
LayerBlockOfNeurons.__init__(self, neuron_type)
in_width, in_height, in_depth = self.in_shape = in_shape
flt_width, flt_height, flt_depth = self.flt_shape = flt_shape
self.in_channels = in_channels
self.flt_channels = flt_channels
self.image_shape = (in_depth, in_channels, in_height, in_width)
self.filter_shape = (flt_channels, flt_depth, in_channels, flt_height, flt_width)
self.poolsize = poolsize
self.init_parameters(self.filter_shape, (self.filter_shape[0],))
def compute_bound_parameters_virtual(self):
fan_in = np.prod(self.in_shape)
fan_out = self.flt_channels * np.prod(self.flt_shape) / np.prod(self.poolsize)
return np.sqrt(6. / (fan_in + fan_out))
def forward(self, x, batch_size, run_time):
img_batch_shape = (batch_size,) + self.image_shape
x = x.reshape(img_batch_shape)
# Convolve input feature maps with filters
conv_out = conv3d2d.conv3d(signals=x,
filters=self.w,
signals_shape=img_batch_shape,
filters_shape=self.filter_shape,
border_mode='valid')
perm = [0, 2, 1, 3, 4] # Permutation is needed due to the pooling function prototype
pooled_out = max_pool_3d(conv_out.dimshuffle(perm), self.poolsize, ignore_border=True)
return self.neuron_type.activation_function(pooled_out.dimshuffle(perm)
+ self.b.dimshuffle('x', 'x', 0, 'x', 'x')).flatten(2)
def print_virtual(self):
return "Image shape: {} \n Filter shape: {} \n Pool size: {} \n".format(
self.image_shape, self.filter_shape, self.poolsize)
|
adbrebs/spynet
|
models/layer_block.py
|
Python
|
bsd-2-clause
| 12,434
|
[
"Gaussian"
] |
987cdf7bc69c7a19ba22dd6507c434f84d8f5c68da5249bd219a0d3faf2d06da
|
########################################################################
# This example illustrates how a function can be used to control a reaction
# rate. This kind of calculation is appropriate when we need to link
# different kinds of physical processses with chemical reactions, for
# example, membrane curvature with molecule accumulation. The use of
# functions to modify reaction rates should be avoided in purely chemical
# systems since they obscure the underlying chemistry, and do not map
# cleanly to stochastic calculations.
#
# In this example we simply have a molecule C that controls the forward
# rate of a reaction that converts A to B. C is a function of location
# on the cylinder, and is fixed. In more elaborate computations we could
# have a function of multiple molecules, some of which could be changing and
# others could be buffered.
#
# Copyright (C) Upinder S. Bhalla NCBS 2018
# Released under the terms of the GNU Public License V3.
########################################################################
import numpy as np
import moose
import rdesigneur as rd
plot_ = False
def makeFuncRate():
model = moose.Neutral( '/library' )
model = moose.Neutral( '/library/chem' )
compt = moose.CubeMesh( '/library/chem/compt' )
compt.volume = 1e-15
A = moose.Pool( '/library/chem/compt/A' )
B = moose.Pool( '/library/chem/compt/B' )
C = moose.Pool( '/library/chem/compt/C' )
reac = moose.Reac( '/library/chem/compt/reac' )
func = moose.Function( '/library/chem/compt/reac/func' )
func.x.num = 1
func.expr = "(x0/1e8)^2"
moose.connect( C, 'nOut', func.x[0], 'input' )
moose.connect( func, 'valueOut', reac, 'setNumKf' )
moose.connect( reac, 'sub', A, 'reac' )
moose.connect( reac, 'prd', B, 'reac' )
A.concInit = 1
B.concInit = 0
C.concInit = 0
reac.Kb = 1
def test():
makeFuncRate()
rdes = rd.rdesigneur(
turnOffElec = True,
#This subdivides the 50-micron cylinder into 2 micron voxels
diffusionLength = 2e-6,
cellProto = [['somaProto', 'soma', 5e-6, 50e-6]],
chemProto = [['chem', 'chem']],
chemDistrib = [['chem', 'soma', 'install', '1' ]],
plotList = [['soma', '1', 'dend/A', 'conc', 'A conc', 'wave'],
['soma', '1', 'dend/C', 'conc', 'C conc', 'wave']],
)
rdes.buildModel()
ts = moose.wildcardFind('/##[TYPE=Table2]')
C = moose.element( '/model/chem/dend/C' )
C.vec.concInit = [ 1+np.sin(x/5.0) for x in range( len(C.vec) ) ]
moose.reinit()
moose.start(10)
if plot_:
rdes.display()
ts = moose.wildcardFind( '/##[TYPE=Table2]')
mat = []
assert len(ts) == 50, len(ts)
for t in ts:
print(t)
if 'plot1' in t.path:
mat.append(t.vector)
mat = np.matrix(mat)
exMean, exStd = 1.1619681711817156, 0.6944155817587526
assert np.isclose( np.mean(mat), exMean), (np.mean(mat), exMean)
assert np.isclose( np.std(mat), exStd), mp.std(mat)
assert( np.isclose(np.mean(mat, axis=0), exMean).all() )
assert( np.isclose(np.std(mat, axis=0), exStd).all() )
assert( np.isclose(0.0, np.std(mat, axis=1)).all())
def main():
test()
if __name__ == '__main__':
main()
|
dilawar/moose-core
|
tests/core/test_function_controls_reac_rate.py
|
Python
|
gpl-3.0
| 3,293
|
[
"MOOSE"
] |
b9306124af3f27c2ad5babc84d4ddf37706d5698759bed95beffbc7b17f18ae5
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database (Truhlar) of non-hydrogen-transfer barrier height reactions.
| Geometries and Reaction energies from Truhlar and coworkers at site http://t1.chem.umn.edu/misc/database_group/database_therm_bh/non_H.htm.
- **cp** ``'off'``
- **rlxd** ``'off'``
- **subset**
- ``'small'``
- ``'large'``
"""
import re
import qcdb
# <<< NHTBH Database Module >>>
dbse = 'NHTBH'
isOS = 'true'
# <<< Database Members >>>
HRXN = range(1, 39)
HRXN_SM = [3, 4, 31, 32]
HRXN_LG = [36]
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV['%s-%s' % (dbse, 1)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'N2O' ),
'%s-%s-reagent' % (dbse, 'N2OHts') ]
RXNM['%s-%s' % (dbse, 1)] = dict(zip(ACTV['%s-%s' % (dbse, 1)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 2)] = ['%s-%s-reagent' % (dbse, 'OH' ),
'%s-%s-reagent' % (dbse, 'N2' ),
'%s-%s-reagent' % (dbse, 'N2OHts') ]
RXNM['%s-%s' % (dbse, 2)] = dict(zip(ACTV['%s-%s' % (dbse, 2)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 3)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'HF' ),
'%s-%s-reagent' % (dbse, 'HFHts') ]
RXNM['%s-%s' % (dbse, 3)] = dict(zip(ACTV['%s-%s' % (dbse, 3)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 4)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'HF' ),
'%s-%s-reagent' % (dbse, 'HFHts') ]
RXNM['%s-%s' % (dbse, 4)] = dict(zip(ACTV['%s-%s' % (dbse, 4)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 5)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'HCl' ),
'%s-%s-reagent' % (dbse, 'HClHts') ]
RXNM['%s-%s' % (dbse, 5)] = dict(zip(ACTV['%s-%s' % (dbse, 5)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 6)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'HCl' ),
'%s-%s-reagent' % (dbse, 'HClHts') ]
RXNM['%s-%s' % (dbse, 6)] = dict(zip(ACTV['%s-%s' % (dbse, 6)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 7)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'CH3F' ),
'%s-%s-reagent' % (dbse, 'HFCH3ts') ]
RXNM['%s-%s' % (dbse, 7)] = dict(zip(ACTV['%s-%s' % (dbse, 7)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 8)] = ['%s-%s-reagent' % (dbse, 'HF' ),
'%s-%s-reagent' % (dbse, 'CH3' ),
'%s-%s-reagent' % (dbse, 'HFCH3ts') ]
RXNM['%s-%s' % (dbse, 8)] = dict(zip(ACTV['%s-%s' % (dbse, 8)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 9)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'F2' ),
'%s-%s-reagent' % (dbse, 'HF2ts') ]
RXNM['%s-%s' % (dbse, 9)] = dict(zip(ACTV['%s-%s' % (dbse, 9)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 10)] = ['%s-%s-reagent' % (dbse, 'HF' ),
'%s-%s-reagent' % (dbse, 'F' ),
'%s-%s-reagent' % (dbse, 'HF2ts') ]
RXNM['%s-%s' % (dbse, 10)] = dict(zip(ACTV['%s-%s' % (dbse, 10)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 11)] = ['%s-%s-reagent' % (dbse, 'CH3' ),
'%s-%s-reagent' % (dbse, 'ClF' ),
'%s-%s-reagent' % (dbse, 'CH3FClts') ]
RXNM['%s-%s' % (dbse, 11)] = dict(zip(ACTV['%s-%s' % (dbse, 11)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 12)] = ['%s-%s-reagent' % (dbse, 'CH3F' ),
'%s-%s-reagent' % (dbse, 'Cl' ),
'%s-%s-reagent' % (dbse, 'CH3FClts') ]
RXNM['%s-%s' % (dbse, 12)] = dict(zip(ACTV['%s-%s' % (dbse, 12)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 13)] = ['%s-%s-reagent' % (dbse, 'F_anion'),
'%s-%s-reagent' % (dbse, 'CH3F' ),
'%s-%s-reagent' % (dbse, 'FCH3Fts') ]
RXNM['%s-%s' % (dbse, 13)] = dict(zip(ACTV['%s-%s' % (dbse, 13)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 14)] = ['%s-%s-reagent' % (dbse, 'F_anion'),
'%s-%s-reagent' % (dbse, 'CH3F' ),
'%s-%s-reagent' % (dbse, 'FCH3Fts') ]
RXNM['%s-%s' % (dbse, 14)] = dict(zip(ACTV['%s-%s' % (dbse, 14)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 15)] = ['%s-%s-reagent' % (dbse, 'FCH3Fcomp'),
'%s-%s-reagent' % (dbse, 'FCH3Fts' ) ]
RXNM['%s-%s' % (dbse, 15)] = dict(zip(ACTV['%s-%s' % (dbse, 15)], [-1, +1]))
ACTV['%s-%s' % (dbse, 16)] = ['%s-%s-reagent' % (dbse, 'FCH3Fcomp'),
'%s-%s-reagent' % (dbse, 'FCH3Fts' ) ]
RXNM['%s-%s' % (dbse, 16)] = dict(zip(ACTV['%s-%s' % (dbse, 16)], [-1, +1]))
ACTV['%s-%s' % (dbse, 17)] = ['%s-%s-reagent' % (dbse, 'Cl_anion' ),
'%s-%s-reagent' % (dbse, 'CH3Cl' ),
'%s-%s-reagent' % (dbse, 'ClCH3Clts') ]
RXNM['%s-%s' % (dbse, 17)] = dict(zip(ACTV['%s-%s' % (dbse, 17)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 18)] = ['%s-%s-reagent' % (dbse, 'Cl_anion' ),
'%s-%s-reagent' % (dbse, 'CH3Cl' ),
'%s-%s-reagent' % (dbse, 'ClCH3Clts') ]
RXNM['%s-%s' % (dbse, 18)] = dict(zip(ACTV['%s-%s' % (dbse, 18)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 19)] = ['%s-%s-reagent' % (dbse, 'ClCH3Clcomp'),
'%s-%s-reagent' % (dbse, 'ClCH3Clts' ) ]
RXNM['%s-%s' % (dbse, 19)] = dict(zip(ACTV['%s-%s' % (dbse, 19)], [-1, +1]))
ACTV['%s-%s' % (dbse, 20)] = ['%s-%s-reagent' % (dbse, 'ClCH3Clcomp'),
'%s-%s-reagent' % (dbse, 'ClCH3Clts' ) ]
RXNM['%s-%s' % (dbse, 20)] = dict(zip(ACTV['%s-%s' % (dbse, 20)], [-1, +1]))
ACTV['%s-%s' % (dbse, 21)] = ['%s-%s-reagent' % (dbse, 'F_anion' ),
'%s-%s-reagent' % (dbse, 'CH3Cl' ),
'%s-%s-reagent' % (dbse, 'FCH3Clts') ]
RXNM['%s-%s' % (dbse, 21)] = dict(zip(ACTV['%s-%s' % (dbse, 21)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 22)] = ['%s-%s-reagent' % (dbse, 'CH3F'),
'%s-%s-reagent' % (dbse, 'Cl_anion'),
'%s-%s-reagent' % (dbse, 'FCH3Clts') ]
RXNM['%s-%s' % (dbse, 22)] = dict(zip(ACTV['%s-%s' % (dbse, 22)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 23)] = ['%s-%s-reagent' % (dbse, 'FCH3Clcomp1'),
'%s-%s-reagent' % (dbse, 'FCH3Clts' ) ]
RXNM['%s-%s' % (dbse, 23)] = dict(zip(ACTV['%s-%s' % (dbse, 23)], [-1, +1]))
ACTV['%s-%s' % (dbse, 24)] = ['%s-%s-reagent' % (dbse, 'FCH3Clcomp2'),
'%s-%s-reagent' % (dbse, 'FCH3Clts' ) ]
RXNM['%s-%s' % (dbse, 24)] = dict(zip(ACTV['%s-%s' % (dbse, 24)], [-1, +1]))
ACTV['%s-%s' % (dbse, 25)] = ['%s-%s-reagent' % (dbse, 'OH_anion'),
'%s-%s-reagent' % (dbse, 'CH3F' ),
'%s-%s-reagent' % (dbse, 'HOCH3Fts') ]
RXNM['%s-%s' % (dbse, 25)] = dict(zip(ACTV['%s-%s' % (dbse, 25)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 26)] = ['%s-%s-reagent' % (dbse, 'CH3OH' ),
'%s-%s-reagent' % (dbse, 'F_anion' ),
'%s-%s-reagent' % (dbse, 'HOCH3Fts') ]
RXNM['%s-%s' % (dbse, 26)] = dict(zip(ACTV['%s-%s' % (dbse, 26)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 27)] = ['%s-%s-reagent' % (dbse, 'HOCH3Fcomp2'),
'%s-%s-reagent' % (dbse, 'HOCH3Fts' ) ]
RXNM['%s-%s' % (dbse, 27)] = dict(zip(ACTV['%s-%s' % (dbse, 27)], [-1, +1]))
ACTV['%s-%s' % (dbse, 28)] = ['%s-%s-reagent' % (dbse, 'HOCH3Fcomp1'),
'%s-%s-reagent' % (dbse, 'HOCH3Fts' ) ]
RXNM['%s-%s' % (dbse, 28)] = dict(zip(ACTV['%s-%s' % (dbse, 28)], [-1, +1]))
ACTV['%s-%s' % (dbse, 29)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'N2' ),
'%s-%s-reagent' % (dbse, 'HN2ts') ]
RXNM['%s-%s' % (dbse, 29)] = dict(zip(ACTV['%s-%s' % (dbse, 29)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 30)] = ['%s-%s-reagent' % (dbse, 'HN2' ),
'%s-%s-reagent' % (dbse, 'HN2ts') ]
RXNM['%s-%s' % (dbse, 30)] = dict(zip(ACTV['%s-%s' % (dbse, 30)], [-1, +1]))
ACTV['%s-%s' % (dbse, 31)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'CO' ),
'%s-%s-reagent' % (dbse, 'HCOts') ]
RXNM['%s-%s' % (dbse, 31)] = dict(zip(ACTV['%s-%s' % (dbse, 31)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 32)] = ['%s-%s-reagent' % (dbse, 'HCO' ),
'%s-%s-reagent' % (dbse, 'HCOts') ]
RXNM['%s-%s' % (dbse, 32)] = dict(zip(ACTV['%s-%s' % (dbse, 32)], [-1, +1]))
ACTV['%s-%s' % (dbse, 33)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'C2H4' ),
'%s-%s-reagent' % (dbse, 'C2H5ts') ]
RXNM['%s-%s' % (dbse, 33)] = dict(zip(ACTV['%s-%s' % (dbse, 33)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 34)] = ['%s-%s-reagent' % (dbse, 'C2H5' ),
'%s-%s-reagent' % (dbse, 'C2H5ts') ]
RXNM['%s-%s' % (dbse, 34)] = dict(zip(ACTV['%s-%s' % (dbse, 34)], [-1, +1]))
ACTV['%s-%s' % (dbse, 35)] = ['%s-%s-reagent' % (dbse, 'CH3' ),
'%s-%s-reagent' % (dbse, 'C2H4' ),
'%s-%s-reagent' % (dbse, 'C3H7ts') ]
RXNM['%s-%s' % (dbse, 35)] = dict(zip(ACTV['%s-%s' % (dbse, 35)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 36)] = ['%s-%s-reagent' % (dbse, 'C3H7' ),
'%s-%s-reagent' % (dbse, 'C3H7ts') ]
RXNM['%s-%s' % (dbse, 36)] = dict(zip(ACTV['%s-%s' % (dbse, 36)], [-1, +1]))
ACTV['%s-%s' % (dbse, 37)] = ['%s-%s-reagent' % (dbse, 'HCN' ),
'%s-%s-reagent' % (dbse, 'HCNts') ]
RXNM['%s-%s' % (dbse, 37)] = dict(zip(ACTV['%s-%s' % (dbse, 37)], [-1, +1]))
ACTV['%s-%s' % (dbse, 38)] = ['%s-%s-reagent' % (dbse, 'HNC' ),
'%s-%s-reagent' % (dbse, 'HCNts') ]
RXNM['%s-%s' % (dbse, 38)] = dict(zip(ACTV['%s-%s' % (dbse, 38)], [-1, +1]))
# <<< Reference Values >>>
BIND = {}
BIND['%s-%s' % (dbse, 1)] = 18.14
BIND['%s-%s' % (dbse, 2)] = 83.22
BIND['%s-%s' % (dbse, 3)] = 42.18
BIND['%s-%s' % (dbse, 4)] = 42.18
BIND['%s-%s' % (dbse, 5)] = 18.00
BIND['%s-%s' % (dbse, 6)] = 18.00
BIND['%s-%s' % (dbse, 7)] = 30.38
BIND['%s-%s' % (dbse, 8)] = 57.02
BIND['%s-%s' % (dbse, 9)] = 2.27
BIND['%s-%s' % (dbse, 10)] = 106.18
BIND['%s-%s' % (dbse, 11)] = 7.43
BIND['%s-%s' % (dbse, 12)] = 60.17
BIND['%s-%s' % (dbse, 13)] = -0.34
BIND['%s-%s' % (dbse, 14)] = -0.34
BIND['%s-%s' % (dbse, 15)] = 13.38
BIND['%s-%s' % (dbse, 16)] = 13.38
BIND['%s-%s' % (dbse, 17)] = 3.10
BIND['%s-%s' % (dbse, 18)] = 3.10
BIND['%s-%s' % (dbse, 19)] = 13.61
BIND['%s-%s' % (dbse, 20)] = 13.61
BIND['%s-%s' % (dbse, 21)] = -12.54
BIND['%s-%s' % (dbse, 22)] = 20.11
BIND['%s-%s' % (dbse, 23)] = 2.89
BIND['%s-%s' % (dbse, 24)] = 29.62
BIND['%s-%s' % (dbse, 25)] = -2.78
BIND['%s-%s' % (dbse, 26)] = 17.33
BIND['%s-%s' % (dbse, 27)] = 10.96
BIND['%s-%s' % (dbse, 28)] = 47.20
BIND['%s-%s' % (dbse, 29)] = 14.69
BIND['%s-%s' % (dbse, 30)] = 10.72
BIND['%s-%s' % (dbse, 31)] = 3.17
BIND['%s-%s' % (dbse, 32)] = 22.68
BIND['%s-%s' % (dbse, 33)] = 1.72
BIND['%s-%s' % (dbse, 34)] = 41.75
BIND['%s-%s' % (dbse, 35)] = 6.85
BIND['%s-%s' % (dbse, 36)] = 32.97
BIND['%s-%s' % (dbse, 37)] = 48.16
BIND['%s-%s' % (dbse, 38)] = 33.11
# <<< Comment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, 1)] = '{ H + N2O <-- [HN2O] } --> OH + N2'
TAGL['%s-%s' % (dbse, 2)] = 'H + N2O <-- { [HN2O] --> OH + N2 }'
TAGL['%s-%s' % (dbse, 3)] = '{ H + FH <-- [HFH] } --> HF + H'
TAGL['%s-%s' % (dbse, 4)] = 'H + FH <-- { [HFH] --> HF + H }'
TAGL['%s-%s' % (dbse, 5)] = '{ H + ClH <-- [HClH] } --> HCl + H'
TAGL['%s-%s' % (dbse, 6)] = 'H + ClH <-- { [HClH] --> HCl + H }'
TAGL['%s-%s' % (dbse, 7)] = '{ H + FCH3 <-- [HFCH3] } --> HF + CH3'
TAGL['%s-%s' % (dbse, 8)] = 'H + FCH3 <-- { [HFCH3] --> HF + CH3 }'
TAGL['%s-%s' % (dbse, 9)] = '{ H + F2 <-- [HF2] } --> HF + F'
TAGL['%s-%s' % (dbse, 10)] = 'H + F2 <-- { [HF2] --> HF + F }'
TAGL['%s-%s' % (dbse, 11)] = '{ CH3 + FCl <-- [CH3FCl] } --> CH3F + Cl'
TAGL['%s-%s' % (dbse, 12)] = 'CH3 + FCl <-- { [CH3FCl] --> CH3F + Cl }'
TAGL['%s-%s' % (dbse, 13)] = '{ F- + CH3F <-- [FCH3F-] } --> FCH3 + F-'
TAGL['%s-%s' % (dbse, 14)] = 'F- + CH3F <-- { [FCH3F-] --> FCH3 + F- }'
TAGL['%s-%s' % (dbse, 15)] = '{ F- ... CH3F <-- [FCH3F-] } --> FCH3 ... F-'
TAGL['%s-%s' % (dbse, 16)] = 'F- ... CH3F <-- { [FCH3F-] --> FCH3 ... F- }'
TAGL['%s-%s' % (dbse, 17)] = '{ Cl- + CH3Cl <-- [ClCH3Cl-] } --> ClCH3 + Cl-'
TAGL['%s-%s' % (dbse, 18)] = 'Cl- + CH3Cl <-- { [ClCH3Cl-] --> ClCH3 + Cl- }'
TAGL['%s-%s' % (dbse, 19)] = '{ Cl- ... CH3Cl <-- [ClCH3Cl-] } --> ClCH3 ... Cl-'
TAGL['%s-%s' % (dbse, 20)] = 'Cl- ... CH3Cl <-- { [ClCH3Cl-] --> ClCH3 ... Cl- }'
TAGL['%s-%s' % (dbse, 21)] = '{ F- + CH3Cl <-- [FCH3Cl-] } --> FCH3 + Cl-'
TAGL['%s-%s' % (dbse, 22)] = 'F- + CH3Cl <-- { [FCH3Cl-] --> FCH3 + Cl- }'
TAGL['%s-%s' % (dbse, 23)] = '{ F- ... CH3Cl <-- [FCH3Cl-] } --> FCH3 ... Cl-'
TAGL['%s-%s' % (dbse, 24)] = 'F- ... CH3Cl <-- { [FCH3Cl-] --> FCH3 ... Cl- }'
TAGL['%s-%s' % (dbse, 25)] = '{ OH- + CH3F <-- [OHCH3F-] } --> HOCH3 + F-'
TAGL['%s-%s' % (dbse, 26)] = 'OH- + CH3F <-- { [OHCH3F-] --> HOCH3 + F- }'
TAGL['%s-%s' % (dbse, 27)] = '{ OH- ... CH3F <-- [OHCH3F-] } --> HOCH3 ... F-'
TAGL['%s-%s' % (dbse, 28)] = 'OH- ... CH3F <-- { [OHCH3F-] --> HOCH3 ... F- }'
TAGL['%s-%s' % (dbse, 29)] = '{ H + N2 <-- [HN2] } --> HN2'
TAGL['%s-%s' % (dbse, 30)] = 'H + N2 <-- { [HN2] --> HN2 }'
TAGL['%s-%s' % (dbse, 31)] = '{ H + CO <-- [HCO] } --> HCO'
TAGL['%s-%s' % (dbse, 32)] = 'H + CO <-- { [HCO] --> HCO }'
TAGL['%s-%s' % (dbse, 33)] = '{ H + C2H4 <-- [HC2H4] } --> CH3CH2'
TAGL['%s-%s' % (dbse, 34)] = 'H + C2H4 <-- { [HC2H4] --> CH3CH2 }'
TAGL['%s-%s' % (dbse, 35)] = '{ CH3 + C2H4 <-- [CH3C2H4] } --> CH3CH2CH2'
TAGL['%s-%s' % (dbse, 36)] = 'CH3 + C2H4 <-- { [CH3C2H4] --> CH3CH2CH2 }'
TAGL['%s-%s' % (dbse, 37)] = '{ HCN <-- [HCN] } --> HNC'
TAGL['%s-%s' % (dbse, 38)] = 'HCN <-- { [HCN] --> HNC }'
TAGL['%s-%s-reagent' % (dbse, 'C2H4' )] = 'Ethene'
TAGL['%s-%s-reagent' % (dbse, 'C2H5ts' )] = 'Transition State of H + C2H4 <--> CH3CH2'
TAGL['%s-%s-reagent' % (dbse, 'C2H5' )] = 'C2H5'
TAGL['%s-%s-reagent' % (dbse, 'C3H7ts' )] = 'Transition State of CH3 + C2H4 <--> CH3CH2CH2'
TAGL['%s-%s-reagent' % (dbse, 'C3H7' )] = 'C3H7'
TAGL['%s-%s-reagent' % (dbse, 'CH3Cl' )] = 'CH3Cl'
TAGL['%s-%s-reagent' % (dbse, 'CH3FClts' )] = 'Transition State of CH3 + FCL <--> CH3F + Cl'
TAGL['%s-%s-reagent' % (dbse, 'CH3F' )] = 'CH3F'
TAGL['%s-%s-reagent' % (dbse, 'CH3OH' )] = 'Methanol'
TAGL['%s-%s-reagent' % (dbse, 'CH3' )] = 'CH3'
TAGL['%s-%s-reagent' % (dbse, 'ClCH3Clcomp')] = 'Complex of Cl- + CH3Cl'
TAGL['%s-%s-reagent' % (dbse, 'ClCH3Clts' )] = 'Transition State of Cl- + CH3Cl <--> ClCH3 + Cl-'
TAGL['%s-%s-reagent' % (dbse, 'ClF' )] = 'ClF'
TAGL['%s-%s-reagent' % (dbse, 'Cl_anion' )] = 'Chloride Anion'
TAGL['%s-%s-reagent' % (dbse, 'Cl' )] = 'Chlorine Atom'
TAGL['%s-%s-reagent' % (dbse, 'CO' )] = 'Carbon Monoxide'
TAGL['%s-%s-reagent' % (dbse, 'F2' )] = 'Fluorine Molecule'
TAGL['%s-%s-reagent' % (dbse, 'FCH3Clcomp1')] = 'Complex of F- + CH3Cl'
TAGL['%s-%s-reagent' % (dbse, 'FCH3Clcomp2')] = 'Complex of FCH3 + Cl-'
TAGL['%s-%s-reagent' % (dbse, 'FCH3Clts' )] = 'Transition State of F- + CH3Cl <--> FCH3 + Cl-'
TAGL['%s-%s-reagent' % (dbse, 'FCH3Fcomp' )] = 'Complex of F- + CH3F'
TAGL['%s-%s-reagent' % (dbse, 'FCH3Fts' )] = 'Transition State of F- CH3F <--> FCH3 + F-'
TAGL['%s-%s-reagent' % (dbse, 'F_anion' )] = 'Fluoride Anion'
TAGL['%s-%s-reagent' % (dbse, 'F' )] = 'Fluorine Atom'
TAGL['%s-%s-reagent' % (dbse, 'HClHts' )] = 'Transition State of H + ClH <--> HCl + H'
TAGL['%s-%s-reagent' % (dbse, 'HCl' )] = 'Hydrogen Chloride'
TAGL['%s-%s-reagent' % (dbse, 'HCNts' )] = 'Transition State of HCN <--> HNC'
TAGL['%s-%s-reagent' % (dbse, 'HCN' )] = 'Hydrogen Cyanide'
TAGL['%s-%s-reagent' % (dbse, 'HCOts' )] = 'Transition State of H + CO <--> HCO'
TAGL['%s-%s-reagent' % (dbse, 'HCO' )] = 'HCO'
TAGL['%s-%s-reagent' % (dbse, 'HF2ts' )] = 'Transition State of H + F2 <--> HF + F'
TAGL['%s-%s-reagent' % (dbse, 'HFCH3ts' )] = 'Transition State of H + FCH3 <--> HF + CH3'
TAGL['%s-%s-reagent' % (dbse, 'HFHts' )] = 'Transition State of H + FH <--> HF + H'
TAGL['%s-%s-reagent' % (dbse, 'HF' )] = 'Hydrogen Fluoride'
TAGL['%s-%s-reagent' % (dbse, 'HN2ts' )] = 'Transition State of H + N2 <--> HN2'
TAGL['%s-%s-reagent' % (dbse, 'HN2' )] = 'HN2'
TAGL['%s-%s-reagent' % (dbse, 'HNC' )] = 'HNC'
TAGL['%s-%s-reagent' % (dbse, 'HOCH3Fcomp1')] = 'Complex of HOCH3 + F-'
TAGL['%s-%s-reagent' % (dbse, 'HOCH3Fcomp2')] = 'Complex of OH- + CH3F'
TAGL['%s-%s-reagent' % (dbse, 'HOCH3Fts' )] = 'Transition State of OH- + CH3F <--> HOCH3 + F-'
TAGL['%s-%s-reagent' % (dbse, 'H' )] = 'Hydrogen Atom'
TAGL['%s-%s-reagent' % (dbse, 'N2OHts' )] = 'Transition State of H + N2O <--> OH + N2'
TAGL['%s-%s-reagent' % (dbse, 'N2O' )] = 'N2O'
TAGL['%s-%s-reagent' % (dbse, 'N2' )] = 'Nitrogen Molecule'
TAGL['%s-%s-reagent' % (dbse, 'OH_anion' )] = 'Hydroxide Anion'
TAGL['%s-%s-reagent' % (dbse, 'OH' )] = 'OH'
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-reagent' % (dbse, 'C2H4')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 0.66559300
C 0.00000000 -0.00000000 -0.66559300
H 0.00000000 0.92149500 1.23166800
H 0.00000000 -0.92149500 1.23166800
H 0.00000000 0.92149500 -1.23166800
H 0.00000000 -0.92149500 -1.23166800
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'C2H5ts')] = qcdb.Molecule("""
0 2
C -0.56787700 0.00005100 -0.21895800
C 0.75113900 -0.00003600 0.04193200
H -1.49388400 -0.00048800 1.53176500
H -1.10169100 0.92065100 -0.40862600
H -1.10202200 -0.92023400 -0.40911000
H 1.29912800 -0.92234400 0.17376300
H 1.29889900 0.92232500 0.17436300
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'C2H5')] = qcdb.Molecule("""
0 2
C -0.25871900 -0.81682900 0.00000000
C -0.25098700 0.67419100 0.00000000
H 0.75883000 -1.22593900 0.00000000
H -0.75883000 -1.21386600 0.88341900
H -0.75883000 -1.21386600 -0.88341900
H -0.17002100 1.22593900 -0.92432000
H -0.17002100 1.22593900 0.92432000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'C3H7ts')] = qcdb.Molecule("""
0 2
C -0.47213200 0.64593300 -0.00004300
C -1.38261700 -0.36388500 -0.00000200
H -0.23204400 1.16457500 -0.91726400
H -0.23234200 1.16475900 0.91716900
H -1.72712800 -0.80981000 0.92251900
H -1.72693600 -0.81013100 -0.92243500
C 1.61201500 -0.24218900 0.00003500
H 2.19518200 0.66867100 -0.00126900
H 1.58942300 -0.80961900 -0.91863200
H 1.59024500 -0.80759800 0.91996900
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'C3H7')] = qcdb.Molecule("""
0 2
C 1.20844000 -0.28718900 0.00005700
C -0.06535900 0.57613200 -0.00005700
C -1.31478700 -0.23951800 -0.00001100
H 1.24136900 -0.92839500 0.88123400
H 1.24139400 -0.92858600 -0.88098000
H 2.10187100 0.33872700 0.00000000
H -0.04821800 1.22685100 -0.87708900
H -0.04827200 1.22703700 0.87683400
H -1.72914600 -0.61577100 0.92443500
H -1.72876300 -0.61641500 -0.92436900
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'CH3Cl')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 -1.12588600
Cl 0.00000000 0.00000000 0.65683000
H 0.00000000 1.02799300 -1.47026400
H 0.89026800 -0.51399700 -1.47026400
H -0.89026800 -0.51399700 -1.47026400
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'CH3FClts')] = qcdb.Molecule("""
0 2
Cl 1.45474900 -0.00123700 -0.00004000
F -0.32358700 0.00463100 0.00012400
C -2.38741800 -0.00214700 -0.00007300
H -2.49508600 -0.85536100 -0.64940400
H -2.49731300 -0.13867300 1.06313900
H -2.50153700 0.98626900 -0.41373400
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'CH3F')] = qcdb.Molecule("""
0 1
C -0.63207400 0.00000100 -0.00000000
F 0.74911700 0.00000200 -0.00000200
H -0.98318200 -0.33848900 0.97262500
H -0.98322200 1.01155300 -0.19317200
H -0.98320300 -0.67308400 -0.77943700
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'CH3OH')] = qcdb.Molecule("""
0 1
C -0.04642300 0.66306900 0.00000000
O -0.04642300 -0.75506300 0.00000000
H -1.08695600 0.97593800 0.00000000
H 0.86059200 -1.05703900 0.00000000
H 0.43814500 1.07159400 0.88953900
H 0.43814500 1.07159400 -0.88953900
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'CH3')] = qcdb.Molecule("""
0 2
C 0.00000000 0.00000000 0.00000000
H 1.07731727 0.00000000 0.00000000
H -0.53865863 0.93298412 0.00000000
H -0.53865863 -0.93298412 -0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'ClCH3Clcomp')] = qcdb.Molecule("""
-1 1
Cl 0.00000000 0.00000000 -2.38473500
C 0.00000000 0.00000000 -0.56633100
H 0.00000000 1.02506600 -0.22437900
H -0.88773400 -0.51253300 -0.22437900
H 0.88773400 -0.51253300 -0.22437900
Cl 0.00000000 0.00000000 2.62421300
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'ClCH3Clts')] = qcdb.Molecule("""
-1 1
Cl 2.32258100 -0.00013200 0.00014000
C -0.00008500 0.00049100 -0.00050900
H 0.00007700 -0.74429000 -0.76760500
H -0.00032000 -0.29144300 1.02802100
H 0.00008100 1.03721800 -0.26195900
Cl -2.32254200 -0.00012900 0.00013000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'ClF')] = qcdb.Molecule("""
0 1
F 0.00000000 0.00000000 0.00000000
Cl 1.63033021 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'Cl_anion')] = qcdb.Molecule("""
-1 1
Cl 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'Cl')] = qcdb.Molecule("""
0 2
Cl 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'CO')] = qcdb.Molecule("""
0 1
O 0.00000000 0.00000000 0.00000000
C 1.12960815 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'F2')] = qcdb.Molecule("""
0 1
F 0.00000000 0.00000000 0.00000000
F 1.39520410 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'FCH3Clcomp1')] = qcdb.Molecule("""
-1 1
Cl 0.00000000 0.00000000 1.62313800
C 0.00000000 0.00000000 -0.22735800
H 0.00000000 1.02632100 -0.55514100
H 0.88882000 -0.51316000 -0.55514100
H -0.88882000 -0.51316000 -0.55514100
F 0.00000000 0.00000000 -2.72930800
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'FCH3Clcomp2')] = qcdb.Molecule("""
-1 1
F 0.00000000 0.00000000 -2.64853900
C 0.00000000 0.00000000 -1.24017000
H 0.00000000 1.02471900 -0.88640600
H -0.88743200 -0.51235900 -0.88640600
H 0.88743200 -0.51235900 -0.88640600
Cl 0.00000000 0.00000000 1.99629900
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'FCH3Clts')] = qcdb.Molecule("""
-1 1
F 0.00000000 0.00000000 -2.53792900
C 0.00000000 0.00000000 -0.48837200
H 0.00000000 1.06208700 -0.61497200
H -0.91979500 -0.53104400 -0.61497200
H 0.91979500 -0.53104400 -0.61497200
Cl 0.00000000 0.00000000 1.62450100
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'FCH3Fcomp')] = qcdb.Molecule("""
-1 1
F 0.00000000 0.00000000 -1.84762600
C 0.00000000 0.00000000 -0.42187300
H 0.00000000 1.02358100 -0.07384300
H -0.88644700 -0.51179100 -0.07384300
H 0.88644700 -0.51179100 -0.07384300
F 0.00000000 0.00000000 2.15348900
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'FCH3Fts')] = qcdb.Molecule("""
-1 1
F 0.00309800 -0.01889200 -0.01545600
C -0.00014900 -0.00014000 1.80785700
H 1.06944900 0.00170800 1.80976100
H -0.53660700 0.92513300 1.79693500
H -0.53260100 -0.92778300 1.81705800
F -0.00319100 0.01997400 3.63184500
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'F_anion')] = qcdb.Molecule("""
-1 1
F 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'F')] = qcdb.Molecule("""
0 2
F 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HClHts')] = qcdb.Molecule("""
0 2
H 0.00000000 0.00000000 1.48580000
Cl 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 -1.48580000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HCl')] = qcdb.Molecule("""
0 1
Cl 0.00000000 0.00000000 0.00000000
H 1.27444789 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HCNts')] = qcdb.Molecule("""
0 1
C 0.08031900 0.62025800 0.00000000
N 0.08031900 -0.56809500 0.00000000
H -1.04414800 0.25512100 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HCN')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 -0.50036500
N 0.00000000 0.00000000 0.65264000
H 0.00000000 0.00000000 -1.56629100
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HCOts')] = qcdb.Molecule("""
0 2
H -1.52086400 1.38882900 0.00000000
C 0.10863300 0.54932900 0.00000000
O 0.10863300 -0.58560100 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HCO')] = qcdb.Molecule("""
0 2
H -0.00905700 0.00000000 -0.00708600
C -0.00703500 0.00000000 1.10967800
O 0.95604000 0.00000000 1.78565600
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HF2ts')] = qcdb.Molecule("""
0 2
H 0.00000000 0.00000000 -2.23127300
F 0.00000000 0.00000000 -0.61621800
F 0.00000000 0.00000000 0.86413800
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HFCH3ts')] = qcdb.Molecule("""
0 2
H -0.03976400 0.00000000 0.04410600
F -0.04932100 0.00000000 1.28255400
C -0.06154400 0.00000000 2.95115700
H 0.99049700 0.00000000 3.19427500
H -0.59007000 0.91235500 3.18348100
H -0.59007000 -0.91235500 3.18348100
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HFHts')] = qcdb.Molecule("""
0 2
H 0.00000000 0.00000000 1.13721700
F 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 -1.13721700
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HF')] = qcdb.Molecule("""
0 1
F 0.00000000 0.00000000 0.00000000
H 0.91538107 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HN2ts')] = qcdb.Molecule("""
0 2
N 0.00000000 0.00000000 0.00000000
N 1.12281100 0.00000000 0.00000000
H 1.78433286 1.26844651 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HN2')] = qcdb.Molecule("""
0 2
N 0.00000000 0.00000000 0.00000000
N 1.17820000 0.00000000 0.00000000
H 1.64496947 0.93663681 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HNC')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 -0.73724800
N 0.00000000 0.00000000 0.43208900
H 0.00000000 0.00000000 1.42696000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HOCH3Fcomp1')] = qcdb.Molecule("""
-1 1
C -1.29799700 -0.38951800 -0.00003400
O -0.47722300 0.72802100 0.00005400
H -2.35192200 -0.08023200 -0.00863900
H -1.14085300 -1.03582100 -0.87810100
H -1.15317800 -1.02751300 0.88635900
H 0.51058000 0.37116000 0.00024300
F 1.74901600 -0.19051700 -0.00001000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HOCH3Fcomp2')] = qcdb.Molecule("""
-1 1
F 0.00037100 -2.46834000 0.02139000
C -0.27664200 -1.07441800 -0.00269000
H 0.64929000 -0.51650000 -0.00901600
H -0.84198900 -0.84711900 -0.89707500
H -0.85102800 -0.82658900 0.88141700
O -0.30171300 1.58252400 -0.20654400
H -0.60511200 2.49243400 -0.16430500
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HOCH3Fts')] = qcdb.Molecule("""
-1 1
F 0.02253600 -0.00745300 0.00552900
C -0.01842000 0.00503700 1.76492500
H 1.04805000 0.00524000 1.85414600
H -0.54781900 0.93470700 1.79222400
H -0.54895500 -0.92343300 1.80576200
O 0.00126500 0.01920000 3.75059900
H -0.92676300 0.03161500 3.99758100
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'H')] = qcdb.Molecule("""
0 2
H 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'N2OHts')] = qcdb.Molecule("""
0 2
H -0.30328600 -1.93071200 0.00000000
O -0.86100600 -0.62152600 0.00000000
N 0.00000000 0.25702700 0.00000000
N 1.02733300 0.72910400 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'N2O')] = qcdb.Molecule("""
0 1
N 0.00000000 0.00000000 0.00000000
N 1.12056262 0.00000000 0.00000000
O 2.30761092 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'N2')] = qcdb.Molecule("""
0 1
N 0.00000000 0.00000000 0.00000000
N 1.09710935 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'OH_anion')] = qcdb.Molecule("""
-1 1
O 0.00000000 0.00000000 0.00000000
H 0.96204317 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'OH')] = qcdb.Molecule("""
0 2
O 0.00000000 0.00000000 0.00000000
H 0.96889819 0.00000000 0.00000000
units angstrom
""")
#########################################################################
# <<< Supplementary Quantum Chemical Results >>>
DATA = {}
DATA['NUCLEAR REPULSION ENERGY'] = {}
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-H-reagent' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-N2O-reagent' ] = 60.94607766
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-N2OHts-reagent' ] = 65.68644495
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-OH-reagent' ] = 4.36931115
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-N2-reagent' ] = 23.63454766
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HF-reagent' ] = 5.20285489
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HFHts-reagent' ] = 8.60854029
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HCl-reagent' ] = 7.05875275
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HClHts-reagent' ] = 12.28739648
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-CH3F-reagent' ] = 37.42304655
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HFCH3ts-reagent' ] = 38.79779200
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-CH3-reagent' ] = 9.69236444
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-F2-reagent' ] = 30.72192369
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HF2ts-reagent' ] = 33.44223409
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-F-reagent' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-ClF-reagent' ] = 49.66117442
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-CH3FClts-reagent' ] = 95.59999471
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-Cl-reagent' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-F_anion-reagent' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-FCH3Fts-reagent' ] = 66.36618410
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-FCH3Fcomp-reagent' ] = 64.36230187
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-Cl_anion-reagent' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-CH3Cl-reagent' ] = 51.37857642
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-ClCH3Clts-reagent' ] = 110.27962403
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-ClCH3Clcomp-reagent' ] = 107.04230687
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-FCH3Clts-reagent' ] = 86.10066616
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-FCH3Clcomp1-reagent' ] = 86.07639241
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-FCH3Clcomp2-reagent' ] = 79.90981772
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-OH_anion-reagent' ] = 4.40044460
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HOCH3Fts-reagent' ] = 69.00558005
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-CH3OH-reagent' ] = 40.39337431
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HOCH3Fcomp2-reagent' ] = 67.43072234
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HOCH3Fcomp1-reagent' ] = 73.17394204
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HN2ts-reagent' ] = 27.37488066
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HN2-reagent' ] = 27.50439999
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-CO-reagent' ] = 22.48612142
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HCOts-reagent' ] = 25.76648888
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HCO-reagent' ] = 26.50985233
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-C2H4-reagent' ] = 33.42351838
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-C2H5ts-reagent' ] = 36.85248528
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-C2H5-reagent' ] = 36.97781691
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-C3H7ts-reagent' ] = 70.26842595
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-C3H7-reagent' ] = 75.86161869
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HCN-reagent' ] = 23.92417344
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HCNts-reagent' ] = 24.04634812
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HNC-reagent' ] = 24.19729155
|
kratman/psi4public
|
psi4/share/psi4/databases/NHTBH.py
|
Python
|
gpl-2.0
| 36,640
|
[
"Psi4"
] |
d4717652ae1abc10ce23028b07844ba44d6158fbca124f46f9d9b0264ba43b0a
|
import FluidChannel as fc
import numpy as np
#overall channel dimensions
aLx_p = 1.0
aLy_p = 1.0
aLz_p = 5.0
aNdivs = 21
#sphere position
a_x = 0.5
a_y = 0.5
a_z = 2.0
r = 0.2
#create the obstruction object
myObst = fc.SphereObstruction(r,a_x,a_y,a_z);
#creat the fluid channel object
myChan = fc.FluidChannel(Lx_p = aLx_p,Ly_p = aLy_p,Lz_p = aLz_p,
N_divs = aNdivs,obst = myObst);
# write the mat file
myChan.write_mat_file('demo1');
# write vtk of boundary conditions so you can visualize them
myChan.write_bc_vtk();
|
stu314159/pyNFC
|
geom_demo1.py
|
Python
|
mit
| 551
|
[
"VTK"
] |
2bb7663280ca8ad09ad4a661546457d8a909c90a3c5e8ebed48fd91e06bac3b0
|
"""
Implements several extended-ensemble Monte Carlo sampling algorithms.
Here is a short example which shows how to sample from a PDF using the replica
exchange with non-equilibrium switches (RENS) method. It draws 5000 samples from
a 1D normal distribution using the RENS algorithm working on three Markov chains
being generated by the HMC algorithm:
>>> import numpy
>>> from numpy import sqrt
>>> from csb.io.plots import Chart
>>> from csb.statistics.pdf import Normal
>>> from csb.statistics.samplers import State
>>> from csb.statistics.samplers.mc.multichain import ThermostattedMDRENSSwapParameterInfo
>>> from csb.statistics.samplers.mc.multichain import ThermostattedMDRENS, AlternatingAdjacentSwapScheme
>>> from csb.statistics.samplers.mc.singlechain import HMCSampler
>>> # Pick some initial state for the different Markov chains:
>>> initial_state = State(numpy.array([1.]))
>>> # Set standard deviations:
>>> std_devs = [1./sqrt(5), 1. / sqrt(3), 1.]
>>> # Set HMC timesteps and trajectory length:
>>> hmc_timesteps = [0.6, 0.7, 0.6]
>>> hmc_trajectory_length = 20
>>> hmc_gradients = [lambda q, t: 1 / (std_dev ** 2) * q for std_dev in std_devs]
>>> # Set parameters for the thermostatted RENS algorithm:
>>> rens_trajectory_length = 30
>>> rens_timesteps = [0.3, 0.5]
>>> # Set interpolation gradients as a function of the work parameter l:
>>> rens_gradients = [lambda q, l, i=i: (l / (std_devs[i + 1] ** 2) + (1 - l) / (std_devs[i] ** 2)) * q
for i in range(len(std_devs)-1)]
>>> # Initialize HMC samplers:
>>> samplers = [HMCSampler(Normal(sigma=std_devs[i]), initial_state, hmc_gradients[i], hmc_timesteps[i],
hmc_trajectory_length) for i in range(len(std_devs))]
>>> # Create swap parameter objects:
params = [ThermostattedMDRENSSwapParameterInfo(samplers[0], samplers[1], rens_timesteps[0],
rens_trajectory_length, rens_gradients[0]),
ThermostattedMDRENSSwapParameterInfo(samplers[1], samplers[2], rens_timesteps[1],
rens_trajectory_length, rens_gradients[1])]
>>> # Initialize thermostatted RENS algorithm:
>>> algorithm = ThermostattedMDRENS(samplers, params)
>>> # Initialize swapping scheme:
>>> swapper = AlternatingAdjacentSwapScheme(algorithm)
>>> # Initialize empty list which will store the samples:
>>> states = []
>>> for i in range(5000):
if i % 5 == 0:
swapper.swap_all()
states.append(algorithm.sample())
>>> # Print acceptance rates:
>>> print('HMC acceptance rates:', [s.acceptance_rate for s in samplers])
>>> print('swap acceptance rates:', algorithm.acceptance_rates)
>>> # Create and plot histogram for first sampler and numpy.random.normal reference:
>>> chart = Chart()
>>> rawstates = [state[0].position[0] for state in states]
>>> chart.plot.hist([numpy.random.normal(size=5000, scale=std_devs[0]), rawstates], bins=30, normed=True)
>>> chart.plot.legend(['numpy.random.normal', 'RENS + HMC'])
>>> chart.show()
For L{ReplicaExchangeMC} (RE), the procedure is easier because apart from the
two sampler instances the corresponding L{RESwapParameterInfo} objects take
no arguments.
Every replica exchange algorithm in this module (L{ReplicaExchangeMC}, L{MDRENS},
L{ThermostattedMDRENS}) is used in a similar way. A simulation is always
initialized with a list of samplers (instances of classes derived from
L{AbstractSingleChainMC}) and a list of L{AbstractSwapParameterInfo} objects
suited for the algorithm under consideration. Every L{AbstractSwapParameterInfo}
object holds all the information needed to perform a swap between two samplers.
The usual scheme is to swap only adjacent replicae in a scheme::
1 <--> 2, 3 <--> 4, ...
2 <--> 3, 4 <--> 5, ...
1 <--> 2, 3 <--> 4, ...
This swapping scheme is implemented in the L{AlternatingAdjacentSwapScheme} class,
but different schemes can be easily implemented by deriving from L{AbstractSwapScheme}.
Then the simulation is run by looping over the number of samples to be drawn
and calling the L{AbstractExchangeMC.sample} method of the algorithm. By calling
the L{AbstractSwapScheme.swap_all} method of the specific L{AbstractSwapScheme}
implementation, all swaps defined in the list of L{AbstractSwapParameterInfo}
objects are performed according to the swapping scheme. The
L{AbstractSwapScheme.swap_all} method may be called for example after sampling
intervals of a fixed length or randomly.
"""
import numpy
import csb.numeric
from abc import ABCMeta, abstractmethod
from csb.statistics.samplers import EnsembleState
from csb.statistics.samplers.mc import AbstractMC, Trajectory, MCCollection, augment_state
from csb.statistics.samplers.mc.propagators import MDPropagator, ThermostattedMDPropagator
from csb.statistics.samplers.mc.neqsteppropagator import NonequilibriumStepPropagator
from csb.statistics.samplers.mc.neqsteppropagator import Protocol, Step, ReducedHamiltonian
from csb.statistics.samplers.mc.neqsteppropagator import ReducedHamiltonianPerturbation
from csb.statistics.samplers.mc.neqsteppropagator import HMCPropagation, HMCPropagationParam
from csb.statistics.samplers.mc.neqsteppropagator import HamiltonianSysInfo, NonequilibriumTrajectory
from csb.numeric.integrators import AbstractGradient, FastLeapFrog
class AbstractEnsembleMC(AbstractMC):
"""
Abstract class for Monte Carlo sampling algorithms simulating several ensembles.
@param samplers: samplers which sample from their respective equilibrium distributions
@type samplers: list of L{AbstractSingleChainMC}
"""
__metaclass__ = ABCMeta
def __init__(self, samplers):
self._samplers = MCCollection(samplers)
state = EnsembleState([x.state for x in self._samplers])
super(AbstractEnsembleMC, self).__init__(state)
def sample(self):
"""
Draw an ensemble sample.
@rtype: L{EnsembleState}
"""
sample = EnsembleState([sampler.sample() for sampler in self._samplers])
self.state = sample
return sample
@property
def energy(self):
"""
Total ensemble energy.
"""
return sum([x.energy for x in self._samplers])
class AbstractExchangeMC(AbstractEnsembleMC):
"""
Abstract class for Monte Carlo sampling algorithms employing some replica exchange method.
@param samplers: samplers which sample from their respective equilibrium distributions
@type samplers: list of L{AbstractSingleChainMC}
@param param_infos: list of ParameterInfo instances providing information needed
for performing swaps
@type param_infos: list of L{AbstractSwapParameterInfo}
"""
__metaclass__ = ABCMeta
def __init__(self, samplers, param_infos):
super(AbstractExchangeMC, self).__init__(samplers)
self._swaplist1 = []
self._swaplist2 = []
self._currentswaplist = self._swaplist1
self._param_infos = param_infos
self._statistics = SwapStatistics(self._param_infos)
def _checkstate(self, state):
if not isinstance(state, EnsembleState):
raise TypeError(state)
def swap(self, index):
"""
Perform swap between sampler pair described by param_infos[index]
and return outcome (true = accepted, false = rejected).
@param index: index of swap pair in param_infos
@type index: int
@rtype: boolean
"""
param_info = self._param_infos[index]
swapcom = self._propose_swap(param_info)
swapcom = self._calc_pacc_swap(swapcom)
result = self._accept_swap(swapcom)
self.state = EnsembleState([x.state for x in self._samplers])
self.statistics.stats[index].update(result)
return result
@abstractmethod
def _propose_swap(self, param_info):
"""
Calculate proposal states for a swap between two samplers.
@param param_info: ParameterInfo instance holding swap parameters
@type param_info: L{AbstractSwapParameterInfo}
@rtype: L{AbstractSwapCommunicator}
"""
pass
@abstractmethod
def _calc_pacc_swap(self, swapcom):
"""
Calculate probability to accept a swap given initial and proposal states.
@param swapcom: SwapCommunicator instance holding information to be communicated
between distinct swap substeps
@type swapcom: L{AbstractSwapCommunicator}
@rtype: L{AbstractSwapCommunicator}
"""
pass
def _accept_swap(self, swapcom):
"""
Accept / reject an exchange between two samplers given proposal states and
the acceptance probability and returns the outcome (true = accepted, false = rejected).
@param swapcom: SwapCommunicator instance holding information to be communicated
between distinct swap substeps
@type swapcom: L{AbstractSwapCommunicator}
@rtype: boolean
"""
if numpy.random.random() < swapcom.acceptance_probability:
if swapcom.sampler1.state.momentum is None and swapcom.sampler2.state.momentum is None:
swapcom.traj12.final.momentum = None
swapcom.traj21.final.momentum = None
swapcom.sampler1.state = swapcom.traj21.final
swapcom.sampler2.state = swapcom.traj12.final
return True
else:
return False
@property
def acceptance_rates(self):
"""
Return swap acceptance rates.
@rtype: list of floats
"""
return self.statistics.acceptance_rates
@property
def param_infos(self):
"""
List of SwapParameterInfo instances holding all necessary parameters.
@rtype: list of L{AbstractSwapParameterInfo}
"""
return self._param_infos
@property
def statistics(self):
return self._statistics
def _update_statistics(self, index, accepted):
"""
Update statistics of a given swap process.
@param index: position of swap statistics to be updated
@type index: int
@param accepted: outcome of the swap
@type accepted: boolean
"""
self._stats[index][0] += 1
self._stats[index][1] += int(accepted)
class AbstractSwapParameterInfo(object):
"""
Subclass instances hold all parameters necessary for performing a swap
between two given samplers.
"""
__metaclass__ = ABCMeta
def __init__(self, sampler1, sampler2):
"""
@param sampler1: First sampler
@type sampler1: L{AbstractSingleChainMC}
@param sampler2: Second sampler
@type sampler2: L{AbstractSingleChainMC}
"""
self._sampler1 = sampler1
self._sampler2 = sampler2
@property
def sampler1(self):
return self._sampler1
@property
def sampler2(self):
return self._sampler2
class AbstractSwapCommunicator(object):
"""
Holds all the information which needs to be communicated between
distinct swap substeps.
@param param_info: ParameterInfo instance holding swap parameters
@type param_info: L{AbstractSwapParameterInfo}
@param traj12: Forward trajectory
@type traj12: L{Trajectory}
@param traj21: Reverse trajectory
@type traj21: L{Trajectory}
"""
__metaclass__ = ABCMeta
def __init__(self, param_info, traj12, traj21):
self._sampler1 = param_info.sampler1
self._sampler2 = param_info.sampler2
self._traj12 = traj12
self._traj21 = traj21
self._param_info = param_info
self._acceptance_probability = None
self._accepted = False
@property
def sampler1(self):
return self._sampler1
@property
def sampler2(self):
return self._sampler2
@property
def traj12(self):
return self._traj12
@property
def traj21(self):
return self._traj21
@property
def acceptance_probability(self):
return self._acceptance_probability
@acceptance_probability.setter
def acceptance_probability(self, value):
self._acceptance_probability = value
@property
def accepted(self):
return self._accepted
@accepted.setter
def accepted(self, value):
self._accepted = value
@property
def param_info(self):
return self._param_info
class ReplicaExchangeMC(AbstractExchangeMC):
"""
Replica Exchange (RE, Swendsen & Yang 1986) implementation.
"""
def _propose_swap(self, param_info):
return RESwapCommunicator(param_info, Trajectory([param_info.sampler1.state,
param_info.sampler1.state]),
Trajectory([param_info.sampler2.state,
param_info.sampler2.state]))
def _calc_pacc_swap(self, swapcom):
E1 = lambda x:-swapcom.sampler1._pdf.log_prob(x)
E2 = lambda x:-swapcom.sampler2._pdf.log_prob(x)
T1 = swapcom.sampler1.temperature
T2 = swapcom.sampler2.temperature
state1 = swapcom.traj12.initial
state2 = swapcom.traj21.initial
proposal1 = swapcom.traj21.final
proposal2 = swapcom.traj12.final
swapcom.acceptance_probability = csb.numeric.exp(-E1(proposal1.position) / T1
+ E1(state1.position) / T1
- E2(proposal2.position) / T2
+ E2(state2.position) / T2)
return swapcom
class RESwapParameterInfo(AbstractSwapParameterInfo):
"""
Holds parameters for a standard Replica Exchange swap.
"""
pass
class RESwapCommunicator(AbstractSwapCommunicator):
"""
Holds all the information which needs to be communicated between distinct
RE swap substeps.
See L{AbstractSwapCommunicator} for constructor signature.
"""
pass
class AbstractRENS(AbstractExchangeMC):
"""
Abstract Replica Exchange with Nonequilibrium Switches
(RENS, Ballard & Jarzynski 2009) class.
Subclasses implement various ways of generating trajectories
(deterministic or stochastic).
"""
__metaclass__ = ABCMeta
def _propose_swap(self, param_info):
init_state1 = param_info.sampler1.state
init_state2 = param_info.sampler2.state
trajinfo12 = RENSTrajInfo(param_info, init_state1, direction="fw")
trajinfo21 = RENSTrajInfo(param_info, init_state2, direction="bw")
traj12 = self._run_traj_generator(trajinfo12)
traj21 = self._run_traj_generator(trajinfo21)
return RENSSwapCommunicator(param_info, traj12, traj21)
def _setup_protocol(self, traj_info):
"""
Sets the protocol lambda(t) to either the forward or the reverse protocol.
@param traj_info: TrajectoryInfo object holding information neccessary to
calculate the rens trajectories.
@type traj_info: L{RENSTrajInfo}
"""
if traj_info.direction == "fw":
return traj_info.param_info.protocol
else:
return lambda t, tau: traj_info.param_info.protocol(tau - t, tau)
return protocol
def _get_init_temperature(self, traj_info):
"""
Determine the initial temperature of a RENS trajectory.
@param traj_info: TrajectoryInfo object holding information neccessary to
calculate the RENS trajectory.
@type traj_info: L{RENSTrajInfo}
"""
if traj_info.direction == "fw":
return traj_info.param_info.sampler1.temperature
else:
return traj_info.param_info.sampler2.temperature
@abstractmethod
def _calc_works(self, swapcom):
"""
Calculates the works expended during the nonequilibrium
trajectories.
@param swapcom: Swap communicator object holding all the
neccessary information.
@type swapcom: L{RENSSwapCommunicator}
@return: The expended during the forward and the backward
trajectory.
@rtype: 2-tuple of floats
"""
pass
def _calc_pacc_swap(self, swapcom):
work12, work21 = self._calc_works(swapcom)
swapcom.acceptance_probability = csb.numeric.exp(-work12 - work21)
return swapcom
@abstractmethod
def _propagator_factory(self, traj_info):
"""
Factory method which produces the propagator object used to calculate
the RENS trajectories.
@param traj_info: TrajectoryInfo object holding information neccessary to
calculate the rens trajectories.
@type traj_info: L{RENSTrajInfo}
@rtype: L{AbstractPropagator}
"""
pass
def _run_traj_generator(self, traj_info):
"""
Run the trajectory generator which generates a trajectory
of a given length between the states of two samplers.
@param traj_info: TrajectoryInfo instance holding information
needed to generate a nonequilibrium trajectory
@type traj_info: L{RENSTrajInfo}
@rtype: L{Trajectory}
"""
init_temperature = self._get_init_temperature(traj_info)
init_state = traj_info.init_state.clone()
if init_state.momentum is None:
init_state = augment_state(init_state,
init_temperature,
traj_info.param_info.mass_matrix)
gen = self._propagator_factory(traj_info)
traj = gen.generate(init_state, int(traj_info.param_info.traj_length))
return traj
class AbstractRENSSwapParameterInfo(RESwapParameterInfo):
"""
Holds parameters for a RENS swap.
"""
__metaclass__ = ABCMeta
def __init__(self, sampler1, sampler2, protocol):
super(AbstractRENSSwapParameterInfo, self).__init__(sampler1, sampler2)
## Can't pass the linear protocol as a default argument because of a reported bug
## in epydoc parsing which makes it fail building the docs.
self._protocol = None
if protocol is None:
self._protocol = lambda t, tau: t / tau
else:
self._protocol = protocol
@property
def protocol(self):
"""
Switching protocol determining the time dependence
of the switching parameter.
"""
return self._protocol
@protocol.setter
def protocol(self, value):
self._protocol = value
class RENSSwapCommunicator(AbstractSwapCommunicator):
"""
Holds all the information which needs to be communicated between distinct
RENS swap substeps.
See L{AbstractSwapCommunicator} for constructor signature.
"""
pass
class RENSTrajInfo(object):
"""
Holds information necessary for calculating a RENS trajectory.
@param param_info: ParameterInfo instance holding swap parameters
@type param_info: L{AbstractSwapParameterInfo}
@param init_state: state from which the trajectory is supposed to start
@type init_state: L{State}
@param direction: Either "fw" or "bw", indicating a forward or backward
trajectory. This is neccessary to pick the protocol or
the reversed protocol, respectively.
@type direction: string, either "fw" or "bw"
"""
def __init__(self, param_info, init_state, direction):
self._param_info = param_info
self._init_state = init_state
self._direction = direction
@property
def param_info(self):
return self._param_info
@property
def init_state(self):
return self._init_state
@property
def direction(self):
return self._direction
class MDRENS(AbstractRENS):
"""
Replica Exchange with Nonequilibrium Switches (RENS, Ballard & Jarzynski 2009)
with Molecular Dynamics (MD) trajectories.
@param samplers: Samplers which sample their
respective equilibrium distributions
@type samplers: list of L{AbstractSingleChainMC}
@param param_infos: ParameterInfo instance holding
information required to perform a MDRENS swap
@type param_infos: list of L{MDRENSSwapParameterInfo}
@param integrator: Subclass of L{AbstractIntegrator} to be used to
calculate the non-equilibrium trajectories
@type integrator: type
"""
def __init__(self, samplers, param_infos,
integrator=csb.numeric.integrators.FastLeapFrog):
super(MDRENS, self).__init__(samplers, param_infos)
self._integrator = integrator
def _propagator_factory(self, traj_info):
protocol = self._setup_protocol(traj_info)
tau = traj_info.param_info.traj_length * traj_info.param_info.timestep
factory = InterpolationFactory(protocol, tau)
gen = MDPropagator(factory.build_gradient(traj_info.param_info.gradient),
traj_info.param_info.timestep,
mass_matrix=traj_info.param_info.mass_matrix,
integrator=self._integrator)
return gen
def _calc_works(self, swapcom):
T1 = swapcom.param_info.sampler1.temperature
T2 = swapcom.param_info.sampler2.temperature
heat12 = swapcom.traj12.heat
heat21 = swapcom.traj21.heat
proposal1 = swapcom.traj21.final
proposal2 = swapcom.traj12.final
state1 = swapcom.traj12.initial
state2 = swapcom.traj21.initial
if swapcom.param_info.mass_matrix.is_unity_multiple:
inverse_mass_matrix = 1.0 / swapcom.param_info.mass_matrix[0][0]
else:
inverse_mass_matrix = swapcom.param_info.mass_matrix.inverse
E1 = lambda x:-swapcom.sampler1._pdf.log_prob(x)
E2 = lambda x:-swapcom.sampler2._pdf.log_prob(x)
K = lambda x: 0.5 * numpy.dot(x.T, numpy.dot(inverse_mass_matrix, x))
w12 = (K(proposal2.momentum) + E2(proposal2.position)) / T2 - \
(K(state1.momentum) + E1(state1.position)) / T1 - heat12
w21 = (K(proposal1.momentum) + E1(proposal1.position)) / T1 - \
(K(state2.momentum) + E2(state2.position)) / T2 - heat21
return w12, w21
class MDRENSSwapParameterInfo(RESwapParameterInfo):
"""
Holds parameters for a MDRENS swap.
@param sampler1: First sampler
@type sampler1: L{AbstractSingleChainMC}
@param sampler2: Second sampler
@type sampler2: L{AbstractSingleChainMC}
@param timestep: Integration timestep
@type timestep: float
@param traj_length: Trajectory length in number of timesteps
@type traj_length: int
@param gradient: Gradient which determines the dynamics during a trajectory
@type gradient: L{AbstractGradient}
@param protocol: Switching protocol determining the time dependence of the
switching parameter. It is a function M{f} taking the running
time t and the switching time tau to yield a value in M{[0, 1]}
with M{f(0, tau) = 0} and M{f(tau, tau) = 1}. Default is a linear
protocol, which is being set manually due to an epydoc bug
@type protocol: callable
@param mass_matrix: Mass matrix
@type mass_matrix: n-dimensional matrix of type L{InvertibleMatrix} with n being the dimension
of the configuration space, that is, the dimension of
the position / momentum vectors
"""
def __init__(self, sampler1, sampler2, timestep, traj_length, gradient,
protocol=None, mass_matrix=None):
super(MDRENSSwapParameterInfo, self).__init__(sampler1, sampler2)
self._mass_matrix = mass_matrix
if self.mass_matrix is None:
d = len(sampler1.state.position)
self.mass_matrix = csb.numeric.InvertibleMatrix(numpy.eye(d), numpy.eye(d))
self._traj_length = traj_length
self._gradient = gradient
self._timestep = timestep
## Can't pass the linear protocol as a default argument because of a reported bug
## in epydoc parsing which makes it fail building the docs.
self._protocol = None
if protocol is None:
self._protocol = lambda t, tau: t / tau
else:
self._protocol = protocol
@property
def timestep(self):
"""
Integration timestep.
"""
return self._timestep
@timestep.setter
def timestep(self, value):
self._timestep = float(value)
@property
def traj_length(self):
"""
Trajectory length in number of integration steps.
"""
return self._traj_length
@traj_length.setter
def traj_length(self, value):
self._traj_length = int(value)
@property
def gradient(self):
"""
Gradient which governs the equations of motion.
"""
return self._gradient
@property
def mass_matrix(self):
return self._mass_matrix
@mass_matrix.setter
def mass_matrix(self, value):
self._mass_matrix = value
@property
def protocol(self):
"""
Switching protocol determining the time dependence
of the switching parameter.
"""
return self._protocol
@protocol.setter
def protocol(self, value):
self._protocol = value
class ThermostattedMDRENS(MDRENS):
"""
Replica Exchange with Nonequilibrium Switches (RENS, Ballard & Jarzynski, 2009)
with Andersen-thermostatted Molecular Dynamics (MD) trajectories.
@param samplers: Samplers which sample their
respective equilibrium distributions
@type samplers: list of L{AbstractSingleChainMC}
@param param_infos: ParameterInfo instance holding
information required to perform a MDRENS swap
@type param_infos: list of L{ThermostattedMDRENSSwapParameterInfo}
@param integrator: Subclass of L{AbstractIntegrator} to be used to
calculate the non-equilibrium trajectories
@type integrator: type
"""
def __init__(self, samplers, param_infos, integrator=csb.numeric.integrators.LeapFrog):
super(ThermostattedMDRENS, self).__init__(samplers, param_infos, integrator)
def _propagator_factory(self, traj_info):
protocol = self._setup_protocol(traj_info)
tau = traj_info.param_info.traj_length * traj_info.param_info.timestep
factory = InterpolationFactory(protocol, tau)
grad = factory.build_gradient(traj_info.param_info.gradient)
temp = factory.build_temperature(traj_info.param_info.temperature)
gen = ThermostattedMDPropagator(grad,
traj_info.param_info.timestep, temperature=temp,
collision_probability=traj_info.param_info.collision_probability,
update_interval=traj_info.param_info.collision_interval,
mass_matrix=traj_info.param_info.mass_matrix,
integrator=self._integrator)
return gen
class ThermostattedMDRENSSwapParameterInfo(MDRENSSwapParameterInfo):
"""
@param sampler1: First sampler
@type sampler1: subclass instance of L{AbstractSingleChainMC}
@param sampler2: Second sampler
@type sampler2: subclass instance of L{AbstractSingleChainMC}
@param timestep: Integration timestep
@type timestep: float
@param traj_length: Trajectory length in number of timesteps
@type traj_length: int
@param gradient: Gradient which determines the dynamics during a trajectory
@type gradient: subclass instance of L{AbstractGradient}
@param mass_matrix: Mass matrix
@type mass_matrix: n-dimensional L{InvertibleMatrix} with n being the dimension
of the configuration space, that is, the dimension of
the position / momentum vectors
@param protocol: Switching protocol determining the time dependence of the
switching parameter. It is a function f taking the running
time t and the switching time tau to yield a value in [0, 1]
with f(0, tau) = 0 and f(tau, tau) = 1
@type protocol: callable
@param temperature: Temperature interpolation function.
@type temperature: Real-valued function mapping from [0,1] to R.
T(0) = temperature of the ensemble sampler1 samples from, T(1) = temperature
of the ensemble sampler2 samples from
@param collision_probability: Probability for a collision with the heatbath during one timestep
@type collision_probability: float
@param collision_interval: Interval during which collision may occur with probability
collision_probability
@type collision_interval: int
"""
def __init__(self, sampler1, sampler2, timestep, traj_length, gradient, mass_matrix=None,
protocol=None, temperature=lambda l: 1.0,
collision_probability=0.1, collision_interval=1):
super(ThermostattedMDRENSSwapParameterInfo, self).__init__(sampler1, sampler2, timestep,
traj_length, gradient,
mass_matrix=mass_matrix,
protocol=protocol)
self._collision_probability = None
self._collision_interval = None
self._temperature = temperature
self.collision_probability = collision_probability
self.collision_interval = collision_interval
@property
def collision_probability(self):
"""
Probability for a collision with the heatbath during one timestep.
"""
return self._collision_probability
@collision_probability.setter
def collision_probability(self, value):
self._collision_probability = float(value)
@property
def collision_interval(self):
"""
Interval during which collision may occur with probability
C{collision_probability}.
"""
return self._collision_interval
@collision_interval.setter
def collision_interval(self, value):
self._collision_interval = int(value)
@property
def temperature(self):
return self._temperature
class AbstractStepRENS(AbstractRENS):
"""
Replica Exchange with Nonequilibrium Switches (RENS, Ballard & Jarzynski 2009)
with stepwise trajectories as described in Nilmeier et al., "Nonequilibrium candidate
Monte Carlo is an efficient tool for equilibrium simulation", PNAS 2011.
The switching parameter dependence of the Hamiltonian is a linear interpolation
between the PDFs of the sampler objects,
M{H(S{lambda}) = H_2 * S{lambda} + (1 - S{lambda}) * H_1}.
The perturbation kernel is a thermodynamic perturbation and the propagation is subclass
responsibility.
Note that due to the linear interpolations between the two Hamiltonians, the
log-probability has to be evaluated four times per perturbation step which can be
costly. In this case it is advisable to define the intermediate log probabilities
in _run_traj_generator differently.
@param samplers: Samplers which sample their respective equilibrium distributions
@type samplers: list of L{AbstractSingleChainMC}
@param param_infos: ParameterInfo instances holding
information required to perform a HMCStepRENS swaps
@type param_infos: list of L{AbstractSwapParameterInfo}
"""
__metaclass__ = ABCMeta
def __init__(self, samplers, param_infos):
super(AbstractStepRENS, self).__init__(samplers, param_infos)
self._evaluate_im_works = True
@abstractmethod
def _setup_propagations(self, im_sys_infos, param_info):
"""
Set up the propagation steps using the information about the current system
setup and parameters from the SwapParameterInfo object.
@param im_sys_infos: Information about the intermediate system setups
@type im_sys_infos: List of L{AbstractSystemInfo}
@param param_info: SwapParameterInfo object containing parameters for the
propagations like timesteps, trajectory lengths etc.
@type param_info: L{AbstractSwapParameterInfo}
"""
pass
@abstractmethod
def _add_gradients(self, im_sys_infos, param_info, t_prot):
"""
If needed, set im_sys_infos.hamiltonian.gradient.
@param im_sys_infos: Information about the intermediate system setups
@type im_sys_infos: List of L{AbstractSystemInfo}
@param param_info: SwapParameterInfo object containing parameters for the
propagations like timesteps, trajectory lengths etc.
@type param_info: L{AbstractSwapParameterInfo}
@param t_prot: Switching protocol defining the time dependence of the switching
parameter.
@type t_prot: callable
"""
pass
def _setup_stepwise_protocol(self, traj_info):
"""
Sets up the stepwise protocol consisting of perturbation and relaxation steps.
@param traj_info: TrajectoryInfo instance holding information
needed to generate a nonequilibrium trajectory
@type traj_info: L{RENSTrajInfo}
@rtype: L{Protocol}
"""
pdf1 = traj_info.param_info.sampler1._pdf
pdf2 = traj_info.param_info.sampler2._pdf
T1 = traj_info.param_info.sampler1.temperature
T2 = traj_info.param_info.sampler2.temperature
traj_length = traj_info.param_info.intermediate_steps
prot = self._setup_protocol(traj_info)
t_prot = lambda i: prot(float(i), float(traj_length))
im_log_probs = [lambda x, i=i: pdf2.log_prob(x) * t_prot(i) + \
(1 - t_prot(i)) * pdf1.log_prob(x)
for i in range(traj_length + 1)]
im_temperatures = [T2 * t_prot(i) + (1 - t_prot(i)) * T1
for i in range(traj_length + 1)]
im_reduced_hamiltonians = [ReducedHamiltonian(im_log_probs[i],
temperature=im_temperatures[i])
for i in range(traj_length + 1)]
im_sys_infos = [HamiltonianSysInfo(im_reduced_hamiltonians[i])
for i in range(traj_length + 1)]
perturbations = [ReducedHamiltonianPerturbation(im_sys_infos[i], im_sys_infos[i+1])
for i in range(traj_length)]
if self._evaluate_im_works == False:
for p in perturbations:
p.evaluate_work = False
im_sys_infos = self._add_gradients(im_sys_infos, traj_info.param_info, t_prot)
propagations = self._setup_propagations(im_sys_infos, traj_info.param_info)
steps = [Step(perturbations[i], propagations[i]) for i in range(traj_length)]
return Protocol(steps)
def _propagator_factory(self, traj_info):
protocol = self._setup_stepwise_protocol(traj_info)
gen = NonequilibriumStepPropagator(protocol)
return gen
def _run_traj_generator(self, traj_info):
init_temperature = self._get_init_temperature(traj_info)
gen = self._propagator_factory(traj_info)
traj = gen.generate(traj_info.init_state)
return NonequilibriumTrajectory([traj_info.init_state, traj.final], jacobian=1.0,
heat=traj.heat, work=traj.work, deltaH=traj.deltaH)
class HMCStepRENS(AbstractStepRENS):
"""
Replica Exchange with Nonequilibrium Switches (RENS, Ballard & Jarzynski 2009)
with stepwise trajectories as described in Nilmeier et al., "Nonequilibrium candidate
Monte Carlo is an efficient tool for equilibrium simulation", PNAS 2011.
The switching parameter dependence of the Hamiltonian is a linear interpolation
between the PDFs of the sampler objects,
M{H(S{lambda}) = H_2 * S{lambda} + (1 - S{lambda}) * H_1}.
The perturbation kernel is a thermodynamic perturbation and the propagation is done using HMC.
Note that due to the linear interpolations between the two Hamiltonians, the
log-probability and its gradient has to be evaluated four times per perturbation step which
can be costly. In this case it is advisable to define the intermediate log probabilities
in _run_traj_generator differently.
@param samplers: Samplers which sample their respective equilibrium distributions
@type samplers: list of L{AbstractSingleChainMC}
@param param_infos: ParameterInfo instances holding
information required to perform a HMCStepRENS swaps
@type param_infos: list of L{HMCStepRENSSwapParameterInfo}
"""
def __init__(self, samplers, param_infos):
super(HMCStepRENS, self).__init__(samplers, param_infos)
@staticmethod
def _add_gradients(im_sys_infos, param_info, t_prot):
im_gradients = [lambda x, t, i=i: param_info.gradient(x, t_prot(i))
for i in range(param_info.intermediate_steps + 1)]
for i, s in enumerate(im_sys_infos):
s.hamiltonian.gradient = im_gradients[i]
return im_sys_infos
@staticmethod
def _setup_propagations(im_sys_infos, param_info):
propagation_params = [HMCPropagationParam(param_info.timestep,
param_info.hmc_traj_length,
im_sys_infos[i+1].hamiltonian.gradient,
param_info.hmc_iterations,
mass_matrix=param_info.mass_matrix,
integrator=param_info.integrator)
for i in range(param_info.intermediate_steps)]
propagations = [HMCPropagation(im_sys_infos[i+1], propagation_params[i], evaluate_heat=False)
for i in range(param_info.intermediate_steps)]
return propagations
def _calc_works(self, swapcom):
return swapcom.traj12.work, swapcom.traj21.work
class HMCStepRENSSwapParameterInfo(AbstractRENSSwapParameterInfo):
"""
Holds all required information for performing HMCStepRENS swaps.
@param sampler1: First sampler
@type sampler1: subclass instance of L{AbstractSingleChainMC}
@param sampler2: Second sampler
@type sampler2: subclass instance of L{AbstractSingleChainMC}
@param timestep: integration timestep
@type timestep: float
@param hmc_traj_length: HMC trajectory length
@type hmc_traj_length: int
@param hmc_iterations: number of HMC iterations in the propagation step
@type hmc_iterations: int
@param gradient: gradient governing the equations of motion, function of
position array and switching protocol
@type gradient: callable
@param intermediate_steps: number of steps in the protocol; this is a discrete version
of the switching time in "continuous" RENS implementations
@type intermediate_steps: int
@param protocol: Switching protocol determining the time dependence of the
switching parameter. It is a function f taking the running
time t and the switching time tau to yield a value in [0, 1]
with f(0, tau) = 0 and f(tau, tau) = 1
@type protocol: callable
@param mass_matrix: mass matrix for kinetic energy definition
@type mass_matrix: L{InvertibleMatrix}
@param integrator: Integration scheme to be utilized
@type integrator: l{AbstractIntegrator}
"""
def __init__(self, sampler1, sampler2, timestep, hmc_traj_length, hmc_iterations,
gradient, intermediate_steps, parametrization=None, protocol=None,
mass_matrix=None, integrator=FastLeapFrog):
super(HMCStepRENSSwapParameterInfo, self).__init__(sampler1, sampler2, protocol)
self._mass_matrix = None
self.mass_matrix = mass_matrix
if self.mass_matrix is None:
d = len(sampler1.state.position)
self.mass_matrix = csb.numeric.InvertibleMatrix(numpy.eye(d), numpy.eye(d))
self._hmc_traj_length = None
self.hmc_traj_length = hmc_traj_length
self._gradient = None
self.gradient = gradient
self._timestep = None
self.timestep = timestep
self._hmc_iterations = None
self.hmc_iterations = hmc_iterations
self._intermediate_steps = None
self.intermediate_steps = intermediate_steps
self._integrator = None
self.integrator = integrator
@property
def timestep(self):
"""
Integration timestep.
"""
return self._timestep
@timestep.setter
def timestep(self, value):
self._timestep = float(value)
@property
def hmc_traj_length(self):
"""
HMC trajectory length in number of integration steps.
"""
return self._hmc_traj_length
@hmc_traj_length.setter
def hmc_traj_length(self, value):
self._hmc_traj_length = int(value)
@property
def gradient(self):
"""
Gradient which governs the equations of motion.
"""
return self._gradient
@gradient.setter
def gradient(self, value):
self._gradient = value
@property
def mass_matrix(self):
return self._mass_matrix
@mass_matrix.setter
def mass_matrix(self, value):
self._mass_matrix = value
@property
def hmc_iterations(self):
return self._hmc_iterations
@hmc_iterations.setter
def hmc_iterations(self, value):
self._hmc_iterations = value
@property
def intermediate_steps(self):
return self._intermediate_steps
@intermediate_steps.setter
def intermediate_steps(self, value):
self._intermediate_steps = value
@property
def integrator(self):
return self._integrator
@integrator.setter
def integrator(self, value):
self._integrator = value
class AbstractSwapScheme(object):
"""
Provides the interface for classes defining schemes according to which swaps in
Replica Exchange-like simulations are performed.
@param algorithm: Exchange algorithm that performs the swaps
@type algorithm: L{AbstractExchangeMC}
"""
__metaclass__ = ABCMeta
def __init__(self, algorithm):
self._algorithm = algorithm
@abstractmethod
def swap_all(self):
"""
Advises the Replica Exchange-like algorithm to perform swaps according to
the schedule defined here.
"""
pass
class AlternatingAdjacentSwapScheme(AbstractSwapScheme):
"""
Provides a swapping scheme in which tries exchanges between neighbours only
following the scheme 1 <-> 2, 3 <-> 4, ... and after a sampling period 2 <-> 3, 4 <-> 5, ...
@param algorithm: Exchange algorithm that performs the swaps
@type algorithm: L{AbstractExchangeMC}
"""
def __init__(self, algorithm):
super(AlternatingAdjacentSwapScheme, self).__init__(algorithm)
self._current_swap_list = None
self._swap_list1 = []
self._swap_list2 = []
self._create_swap_lists()
def _create_swap_lists(self):
if len(self._algorithm.param_infos) == 1:
self._swap_list1.append(0)
self._swap_list2.append(0)
else:
i = 0
while i < len(self._algorithm.param_infos):
self._swap_list1.append(i)
i += 2
i = 1
while i < len(self._algorithm.param_infos):
self._swap_list2.append(i)
i += 2
self._current_swap_list = self._swap_list1
def swap_all(self):
for x in self._current_swap_list:
self._algorithm.swap(x)
if self._current_swap_list == self._swap_list1:
self._current_swap_list = self._swap_list2
else:
self._current_swap_list = self._swap_list1
class SingleSwapStatistics(object):
"""
Tracks swap statistics of a single sampler pair.
@param param_info: ParameterInfo instance holding swap parameters
@type param_info: L{AbstractSwapParameterInfo}
"""
def __init__(self, param_info):
self._total_swaps = 0
self._accepted_swaps = 0
@property
def total_swaps(self):
return self._total_swaps
@property
def accepted_swaps(self):
return self._accepted_swaps
@property
def acceptance_rate(self):
"""
Acceptance rate of the sampler pair.
"""
if self.total_swaps > 0:
return float(self.accepted_swaps) / float(self.total_swaps)
else:
return 0.
def update(self, accepted):
"""
Updates swap statistics.
"""
self._total_swaps += 1
self._accepted_swaps += int(accepted)
class SwapStatistics(object):
"""
Tracks swap statistics for an AbstractExchangeMC subclass instance.
@param param_infos: list of ParameterInfo instances providing information
needed for performing swaps
@type param_infos: list of L{AbstractSwapParameterInfo}
"""
def __init__(self, param_infos):
self._stats = [SingleSwapStatistics(x) for x in param_infos]
@property
def stats(self):
return tuple(self._stats)
@property
def acceptance_rates(self):
"""
Returns acceptance rates for all swaps.
"""
return [x.acceptance_rate for x in self._stats]
class InterpolationFactory(object):
"""
Produces interpolations for functions changed during non-equilibrium
trajectories.
@param protocol: protocol to be used to generate non-equilibrium trajectories
@type protocol: function mapping t to [0...1] for fixed tau
@param tau: switching time
@type tau: float
"""
def __init__(self, protocol, tau):
self._protocol = None
self._tau = None
self.protocol = protocol
self.tau = tau
@property
def protocol(self):
return self._protocol
@protocol.setter
def protocol(self, value):
if not hasattr(value, '__call__'):
raise TypeError(value)
self._protocol = value
@property
def tau(self):
return self._tau
@tau.setter
def tau(self, value):
self._tau = float(value)
def build_gradient(self, gradient):
"""
Create a gradient instance with according to given protocol
and switching time.
@param gradient: gradient with G(0) = G_1 and G(1) = G_2
@type gradient: callable
"""
return Gradient(gradient, self._protocol, self._tau)
def build_temperature(self, temperature):
"""
Create a temperature function according to given protocol and
switching time.
@param temperature: temperature with T(0) = T_1 and T(1) = T_2
@type temperature: callable
"""
return lambda t: temperature(self.protocol(t, self.tau))
class Gradient(AbstractGradient):
def __init__(self, gradient, protocol, tau):
self._protocol = protocol
self._gradient = gradient
self._tau = tau
def evaluate(self, q, t):
return self._gradient(q, self._protocol(t, self._tau))
class ReplicaHistory(object):
'''
Replica history object, works with both RE and RENS for
the AlternatingAdjacentSwapScheme.
@param samples: list holding ensemble states
@type samples: list
@param swap_interval: interval with which swaps were attempted, e.g.,
5 means that every 5th regular MC step is replaced
by a swap
@type swap_interval: int
@param first_swap: sample index of the first sample generated by a swap attempt.
If None, the first RE sampled is assumed to have sample index
swap_interval. If specified, it has to be greater than zero
@type first_swap: int
'''
def __init__(self, samples, swap_interval, first_swap=None):
self.samples = samples
self.swap_interval = swap_interval
if first_swap == None:
self.first_swap = swap_interval - 1
elif first_swap > 0:
self.first_swap = first_swap - 1
else:
raise(ValueError("Sample index of first swap has to be greater than zero!"))
self.n_replicas = len(samples[0])
@staticmethod
def _change_direction(x):
if x == 1:
return -1
if x == -1:
return 1
def calculate_history(self, start_ensemble):
'''
Calculates the replica history of the first state of ensemble #start_ensemble.
@param start_ensemble: index of the ensemble to start at, zero-indexed
@type start_ensemble: int
@return: replica history as a list of ensemble indices
@rtype: list of ints
'''
sample_counter = 0
# determine the direction (up = 1, down = -1) in the "temperature ladder" of
# the first swap attempt. Remember: first swap series is always 0 <-> 1, 2 <-> 3, ...
if start_ensemble % 2 == 0:
direction = +1
else:
direction = -1
# if number of replicas is not even and the start ensemble is the highest-temperature-
# ensemble, the first swap will be attempted "downwards"
if start_ensemble % 2 == 0 and start_ensemble == self.n_replicas - 1:
direction = -1
# will store the indices of the ensembles the state will visit in chronological order
history = []
# the ensemble the state is currently in
ens = start_ensemble
while sample_counter < len(self.samples):
if self.n_replicas == 2:
if (sample_counter - self.first_swap - 1) % self.swap_interval == 0 and \
sample_counter >= self.first_swap:
## swap attempt: determine whether it was successfull or not
# state after swap attempt
current_sample = self.samples[sample_counter][ens]
# state before swap attempt
previous_sample = self.samples[sample_counter - 1][history[-1]]
# swap was accepted when position of the current state doesn't equal
# the position of the state before the swap attempt, that is, the last
# state in the history
swap_accepted = not numpy.all(current_sample.position ==
previous_sample.position)
if swap_accepted:
if ens == 0:
ens = 1
else:
ens = 0
history.append(ens)
else:
history.append(ens)
else:
if (sample_counter - self.first_swap - 1) % self.swap_interval == 0 and \
sample_counter >= self.first_swap:
# state after swap attempt
current_sample = self.samples[sample_counter][ens]
# state before swap attempt
previous_sample = self.samples[sample_counter - 1][ens]
# swap was accepted when position of the current state doesn't equal
# the position of the state before the swap attempt, that is, the last
# state in the history
swap_accepted = not numpy.all(current_sample.position == previous_sample.position)
if swap_accepted:
ens += direction
else:
if ens == self.n_replicas - 1:
# if at the top of the ladder, go downwards again
direction = -1
elif ens == 0:
# if at the bottom of the ladder, go upwards
direction = +1
else:
# in between, reverse the direction of the trajectory
# in temperature space
direction = self._change_direction(direction)
history.append(ens)
sample_counter += 1
return history
def calculate_projected_trajectories(self, ensemble):
'''
Calculates sequentially correlated trajectories projected on a specific ensemble.
@param ensemble: ensemble index of ensemble of interest, zero-indexed
@type ensemble: int
@return: list of Trajectory objects containg sequentially correlated trajectories
@rtype: list of L{Trajectory} objects.
'''
trajectories = []
for i in range(self.n_replicas):
history = self.calculate_history(i)
traj = [x[ensemble] for k, x in enumerate(self.samples) if history[k] == ensemble]
trajectories.append(Trajectory(traj))
return trajectories
def calculate_trajectories(self):
'''
Calculates sequentially correlated trajectories.
@return: list of Trajectory objects containg sequentially correlated trajectories
@rtype: list of L{Trajectory} objects.
'''
trajectories = []
for i in range(self.n_replicas):
history = self.calculate_history(i)
traj = [x[history[k]] for k, x in enumerate(self.samples)]
trajectories.append(Trajectory(traj))
return trajectories
|
csb-toolbox/CSB
|
csb/statistics/samplers/mc/multichain.py
|
Python
|
mit
| 55,707
|
[
"VisIt"
] |
e65b53258145a227810fb6300d95e1b1fbe4373d3cd7666bc31aed25cd3b022a
|
import ovito
from ovito.io import (import_file, export_file)
from ovito.vis import *
import os
test_data_dir = "../../files/"
node1 = import_file(test_data_dir + "LAMMPS/class2.data", atom_style = "full")
node1.add_to_scene()
node1.source.particle_properties.position.display.shape = ParticleDisplay.Shape.Square
node1.source.particle_properties.position.display.radius = 0.3
export_file(node1, "test.pov", "povray")
export_file(None, "test.pov", "povray")
os.remove("test.pov")
|
srinath-chakravarthy/ovito
|
tests/scripts/test_suite/povray_exporter.py
|
Python
|
gpl-3.0
| 481
|
[
"LAMMPS",
"OVITO"
] |
accbdc526370e97d69cecede569c7bb47d128abb096060c01db2960c11629708
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'stsmith'
# easylist_pac: Convert EasyList Tracker and Adblocking rules to an efficient Proxy Auto Configuration file
# Copyright (C) 2017-2020 by Steven T. Smith <steve dot t dot smith at gmail dot com>, GPL
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse as ap, copy, datetime, functools as fnt, numpy as np, os, re, sys, time, urllib.request, warnings
try:
machine_learning_flag = True
import multiprocessing as mp, scipy.sparse as sps
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
except ImportError as e:
machine_learning_flag = False
print(e)
warnings.warn("Install scikit-learn for more accurate EasyList rule selection.")
try:
plot_flag = True
import matplotlib as mpl, matplotlib.pyplot as plt
# Legible plot style defaults
# http://matplotlib.org/api/matplotlib_configuration_api.html
# http://matplotlib.org/users/customizing.html
mpl.rcParams['figure.figsize'] = (10.0, 5.0)
mpl.rc('font', **{'family': 'sans-serif', 'weight': 'bold', 'size': 14})
mpl.rc('axes', **{'titlesize': 20, 'titleweight': 'bold', 'labelsize': 16, 'labelweight': 'bold'})
mpl.rc('legend', **{'fontsize': 14})
mpl.rc('figure', **{'titlesize': 16, 'titleweight': 'bold'})
mpl.rc('lines', **{'linewidth': 2.5, 'markersize': 18, 'markeredgewidth': 0})
mpl.rc('mathtext',
**{'fontset': 'custom', 'rm': 'sans:bold', 'bf': 'sans:bold', 'it': 'sans:italic', 'sf': 'sans:bold',
'default': 'it'})
# plt.rc('text',usetex=False) # [default] usetex should be False
mpl.rcParams['text.latex.preamble'] = [r'\\usepackage{amsmath,sfmath} \\boldmath']
except ImportError as e:
plot_flag = False
print(e)
warnings.warn("Install matplotlib to plot rule priorities.")
class EasyListPAC:
'''Create a Proxy Auto Configuration file from EasyList rule sets.'''
def __init__(self):
self.parseArgs()
self.easylists_download_latest()
self.parse_and_filter_rule_files()
self.prioritize_rules()
if not self.my_extra_rules_off:
self.easylist_append_rules(my_extra_rules)
if self.debug:
print("Good rules and strengths:\n" + '\n'.join('{: 5d}:\t{}\t\t[{:2.1f}]'.format(i,r,s) for (i,(r,s)) in enumerate(zip(self.good_rules,self.good_signal))))
print("\nBad rules and strengths:\n" + '\n'.join('{: 5d}:\t{}\t\t[{:2.1f}]'.format(i,r,s) for (i,(r,s)) in enumerate(zip(self.bad_rules,self.bad_signal))))
if plot_flag:
# plt.plot(np.arange(len(self.good_signal)), self.good_signal, '.')
# plt.show()
plt.plot(np.arange(len(self.bad_signal)), self.bad_signal, '.')
plt.xlabel('Rule index')
plt.ylabel('Bad rule distance (logit)')
plt.show()
return
self.parse_easylist_rules()
self.create_pac_file()
def parseArgs(self):
# blackhole specification in arguments
# best choice is the LAN IP address of the http://hostname/proxy.pac web server or a dedicated blackhole server, e.g. 192.168.0.2:8119
parser = ap.ArgumentParser()
parser.add_argument('-b', '--blackhole', help="Blackhole IP:port", type=str, default='127.0.0.1:8119')
parser.add_argument('-d', '--download-dir', help="Download directory", type=str, default='~/Downloads')
parser.add_argument('-g', '--debug', help="Debug: Just print rules", action='store_true')
parser.add_argument('-moff', '--my_extra_rules_turnoff_flag', help="Turn off adding my extra rules", default=False, action='store_true')
parser.add_argument('-p', '--proxy', help="Proxy host:port", type=str, default='')
parser.add_argument('-P', '--PAC-original', help="Original proxy.pac file", type=str, default='proxy.pac.orig')
parser.add_argument('-rb', '--bad-rule-max', help="Maximum number of bad rules (-1 for unlimited)", type=int,
default=19999)
parser.add_argument('-rg', '--good-rule-max', help="Maximum number of good rules (-1 for unlimited)",
type=int, default=1099)
parser.add_argument('-th', '--truncate_hash', help="Truncate hash object length to maximum number", type=int,
default=3999)
parser.add_argument('-tr', '--truncate_regex', help="Truncate regex rules to maximum number", type=int,
default=499)
parser.add_argument('-w', '--sliding-window', help="Sliding window training and test (slow)", action='store_true')
parser.add_argument('-x', '--Extra_EasyList_URLs', help="Extra Easylsit URLs", type=str, nargs='+', default=[])
parser.add_argument('-*', '--wildcard-limit', help="Limit the number of wildcards", type=int, default=999)
parser.add_argument('-@@', '--exceptions_include_flag', help="Include exception rules", action='store_true')
args = parser.parse_args()
self.args = parser.parse_args()
self.blackhole_ip_port = args.blackhole
self.easylist_dir = os.path.expanduser(args.download_dir)
self.debug = args.debug
self.my_extra_rules_off = args.my_extra_rules_turnoff_flag
self.proxy_host_port = args.proxy
self.orig_pac_file = os.path.join(self.easylist_dir, args.PAC_original)
# n.b. negative limits are set to no limits using [:None] slicing trick
self.good_rule_max = args.good_rule_max if args.good_rule_max >= 0 else None
self.bad_rule_max = args.bad_rule_max if args.bad_rule_max >= 0 else None
self.truncate_hash_max = args.truncate_hash if args.truncate_hash >= 0 else None
self.truncate_alternatives_max = args.truncate_regex if args.truncate_regex >= 0 else None
self.sliding_window = args.sliding_window
self.exceptions_include_flag = args.exceptions_include_flag
self.wildcard_named_group_limit = args.wildcard_limit if args.wildcard_limit >= 0 else None
self.extra_easylist_urls = args.Extra_EasyList_URLs
return self.args
def easylists_download_latest(self):
easylist_url = 'https://easylist.to/easylist/easylist.txt'
easyprivacy_url = 'https://easylist.to/easylist/easyprivacy.txt'
fanboy_annoyance_url = 'https://easylist.to/easylist/fanboy-annoyance.txt'
fanboy_antifacebook = 'https://raw.githubusercontent.com/ryanbr/fanboy-adblock/master/fanboy-antifacebook.txt'
self.download_list = [fanboy_antifacebook, fanboy_annoyance_url, easyprivacy_url, easylist_url] + self.extra_easylist_urls
self.file_list = []
for url in self.download_list:
fname = os.path.basename(url)
fname_full = os.path.join(self.easylist_dir, fname)
file_utc = file_to_utc(fname_full) if os.path.isfile(os.path.join(self.easylist_dir, fname)) else 0.
resp = urllib.request.urlopen(urllib.request.Request(url, headers={'User-Agent': user_agent}))
url_utc = last_modified_to_utc(last_modified_resp(resp))
if (url_utc > file_utc) or (os.path.getsize(fname_full) == 0): # download the newer file
with open(fname_full, mode='w', encoding='utf-8') as out_file:
out_file.write(resp.read().decode('utf-8'))
self.file_list.append(fname_full)
def parse_and_filter_rule_files(self):
"""Parse all rules into good and bad lists. Use flags to specify included/excluded rules."""
self.good_rules = []
self.bad_rules = []
self.good_opts = []
self.bad_opts = []
self.good_rules_include_flag = []
self.bad_rules_include_flag = []
for file in self.file_list:
with open(file, 'r', encoding='utf-8') as fd:
self.easylist_append_rules(fd)
def easylist_append_rules(self, fd):
"""Append EasyList rules from file to good and bad lists."""
for line in fd:
line = line.rstrip()
try:
self.easylist_append_one_rule(line)
except self.RuleIgnored as e:
if self.debug: print(e,flush=True)
continue
class RuleIgnored(Exception):
pass
def easylist_append_one_rule(self, line):
"""Append EasyList rules from line to good and bad lists."""
ignore_rules_flag = False
ignored_rules_count = 0
line_orig = line
# configuration lines and selector rules should already be filtered out
if re_test(configuration_re, line) or re_test(selector_re, line): raise self.RuleIgnored("Rule '{}' not added.".format(line))
exception_flag = exception_filter(line) # block default; pass if True
line = exception_re.sub(r'\1', line)
option_exception_re = not3dimppuposgh_option_exception_re # ignore these options by default
# delete all easylist options **prior** to regex and selector cases
# ignore domain limits for now
opts = '' # default: no options in the rule
if re_test(option_re, line):
opts = option_re.sub(r'\2', line)
# domain-specific and other option exceptions: ignore
# too many rules (>~ 10k) bog down the browser; make reasonable exclusions here
line = option_re.sub(r'\1', line) # delete all the options and continue
# ignore these cases
# comment case: ignore
if re_test(comment_re, line):
if re_test(commentname_sections_ignore_re, line):
ignored_rules_comment_start = comment_re.sub('', line)
if not ignore_rules_flag:
ignored_rules_count = 0
ignore_rules_flag = True
print('Ignore rules following comment ', end='', flush=True)
print('"{}"… '.format(ignored_rules_comment_start), end='', flush=True)
else:
if ignore_rules_flag: print('\n {:d} rules ignored.'.format(ignored_rules_count), flush=True)
ignored_rules_count = 0
ignore_rules_flag = False
raise self.RuleIgnored("Rule '{}' not added.".format(line))
if ignore_rules_flag:
ignored_rules_count += 1
self.append_rule(exception_flag, line, opts, False)
raise self.RuleIgnored("Rule '{}' not added.".format(line))
# blank url case: ignore
if re_test(httpempty_re, line): raise self.RuleIgnored("Rule '{}' not added.".format(line))
# blank line case: ignore
if not bool(line): raise self.RuleIgnored("Rule '{}' not added.".format(line))
# block default or pass exception
if exception_flag:
option_exception_re = not3dimppuposgh_option_exception_re # ignore these options within exceptions
if not self.exceptions_include_flag:
self.append_rule(exception_flag, line, opts, False)
raise self.RuleIgnored("Rule '{}' not added.".format(line))
# specific options: ignore
if re_test(option_exception_re, opts):
self.append_rule(exception_flag, line, opts, False)
raise self.RuleIgnored("Rule '{}' not added.".format(line))
# add all remaining rules
self.append_rule(exception_flag, line, opts, True)
def append_rule(self,exception_flag,rule, opts, include_rule_flag):
if not bool(rule): return # last chance to reject blank lines -- shouldn't happen
if exception_flag:
self.good_rules.append(rule)
self.good_opts.append(option_tokenizer(opts))
self.good_rules_include_flag.append(include_rule_flag)
else:
self.bad_rules.append(rule)
self.bad_opts.append(option_tokenizer(opts))
self.bad_rules_include_flag.append(include_rule_flag)
def good_class_test(self,rule,opts=''):
return not bool(badregex_regex_filters_re.search(rule))
def bad_class_test(self,rule,opts=''):
"""Bad rule of interest if a match for the bad regex's or specific rule options,
e.g. non-domain specific popups or images."""
return bool(badregex_regex_filters_re.search(rule)) \
or (bool(opts) and bool(thrdp_im_pup_os_option_re.search(opts))
and not bool(not3dimppupos_option_exception_re.search(opts)))
def prioritize_rules(self):
# use bootstrap regex preferences
# https://github.com/seatgeek/fuzzywuzzy would be great here if there were such a thing for regex
self.good_signal = np.array([self.good_class_test(x,opts) for (x,opts,f) in zip(self.good_rules,self.good_opts,self.good_rules_include_flag) if f], dtype=np.int)
self.bad_signal = np.array([self.bad_class_test(x,opts) for (x,opts,f) in zip(self.bad_rules,self.bad_opts,self.bad_rules_include_flag) if f], dtype=np.int)
self.good_columns = np.array([i for (i,f) in enumerate(self.good_rules_include_flag) if f],dtype=int)
self.bad_columns = np.array([i for (i,f) in enumerate(self.bad_rules_include_flag) if f],dtype=int)
# Logistic Regression for more accurate rule priorities
if machine_learning_flag:
print("Performing logistic regression on rule sets. This will take a few minutes…",end='',flush=True)
self.logreg_priorities()
print(" done.", flush=True)
# truncate to positive signal strengths
if not self.debug:
self.good_rule_max = min(self.good_rule_max,np.count_nonzero(self.good_signal > 0)) \
if isinstance(self.good_rule_max,(int,np.int)) else np.count_nonzero(self.good_signal > 0)
self.bad_rule_max = min(self.bad_rule_max, np.count_nonzero(self.bad_signal > 0)) \
if isinstance(self.bad_rule_max,(int,np.int)) else np.count_nonzero(self.bad_signal > 0)
# prioritize and limit the rules
good_pridx = np.array([e[0] for e in sorted(enumerate(self.good_signal),key=lambda e: e[1],reverse=True)],dtype=int)[:self.good_rule_max]
self.good_columns = self.good_columns[good_pridx]
self.good_signal = self.good_signal[good_pridx]
self.good_rules = [self.good_rules[k] for k in self.good_columns]
bad_pridx = np.array([e[0] for e in sorted(enumerate(self.bad_signal),key=lambda e: e[1],reverse=True)],dtype=int)[:self.bad_rule_max]
self.bad_columns = self.bad_columns[bad_pridx]
self.bad_signal = self.bad_signal[bad_pridx]
self.bad_rules = [self.bad_rules[k] for k in self.bad_columns]
# include hardcoded rules
for rule in include_these_good_rules:
if rule not in self.good_rules: self.good_rules.append(rule)
for rule in include_these_bad_rules:
if rule not in self.bad_rules: self.bad_rules.append(rule)
# rules are now ordered
self.good_columns = np.arange(0,len(self.good_rules),dtype=self.good_columns.dtype)
self.bad_columns = np.arange(0,len(self.bad_rules),dtype=self.bad_columns.dtype)
return
def logreg_priorities(self):
"""Rule prioritization using logistic regression on bootstrap preferences."""
self.good_fv_json = {}
self.good_column_hash = {}
for col, (rule,opts) in enumerate(zip(self.good_rules,self.good_opts)):
feature_vector_append_column(rule, opts, col, self.good_fv_json)
self.good_column_hash[rule] = col
self.bad_fv_json = {}
self.bad_column_hash = {}
for col, (rule,opts) in enumerate(zip(self.bad_rules,self.bad_opts)):
feature_vector_append_column(rule, opts, col, self.bad_fv_json)
self.bad_column_hash[rule] = col
self.good_fv_mat, self.good_row_hash = fv_to_mat(self.good_fv_json, self.good_rules)
self.bad_fv_mat, self.bad_row_hash = fv_to_mat(self.bad_fv_json, self.bad_rules)
self.good_X_all = StandardScaler(with_mean=False).fit_transform(self.good_fv_mat.astype(np.float))
self.good_y_all = np.array([self.good_class_test(x,opts) for (x,opts) in zip(self.good_rules, self.good_opts)], dtype=np.int)
self.bad_X_all = StandardScaler(with_mean=False).fit_transform(self.bad_fv_mat.astype(np.float))
self.bad_y_all = np.array([self.bad_class_test(x,opts) for (x,opts) in zip(self.bad_rules, self.bad_opts)], dtype=np.int)
self.logit_fit_method_sample_weights()
# inverse regularization signal; smaller values give more sparseness, less model rigidity
self.C = 1.e1
self.logreg_test_in_training()
if self.sliding_window: self.logreg_sliding_window()
return
def debug_feature_vector(self,rule_substring=r'google.com/pagead'):
for j, rule in enumerate(self.bad_rules):
if rule.find(rule_substring) >= 0: break
col = j
print(self.bad_rules[col])
_, rows = self.bad_fv_mat[col,:].nonzero() # fv_mat is transposed
print(rows)
for row in rows:
print('Row {:d}: {}:: {:g}'.format(row, self.bad_row_hash[int(row)], self.bad_fv_mat[col, row]))
def logit_fit_method_sample_weights(self):
# weights for LogisticRegression.fit()
self.good_w_all = np.ones(len(self.good_y_all))
self.bad_w_all = np.ones(len(self.bad_y_all))
# add more weight for each of these regex matches
for i, rule in enumerate(self.bad_rules):
self.bad_w_all[i] += 1/max(1,len(rule)) # slight disadvantage for longer rules
for regex in high_weight_regex:
self.bad_w_all[i] += len(regex.findall(rule))
# these options have more weight
self.bad_w_all[i] += bool(thrdp_im_pup_os_option_re.search(self.bad_opts[i]))
return
def logreg_test_in_training(self):
"""fast, initial method: test vectors in the training data"""
self.good_fv_logreg = LogisticRegression(C=self.C, penalty='l2', solver='liblinear', tol=0.01)
self.bad_fv_logreg = LogisticRegression(C=self.C, penalty='l2', solver='liblinear', tol=0.01)
good_x_test = self.good_X_all[self.good_columns]
good_X = self.good_X_all
good_y = self.good_y_all
good_w = self.good_w_all
bad_x_test = self.bad_X_all[self.bad_columns]
bad_X = self.bad_X_all
bad_y = self.bad_y_all
bad_w = self.bad_w_all
if good_x_test.shape[0] > 0:
self.good_fv_logreg.fit(good_X, good_y, sample_weight=good_w)
self.good_signal = self.good_fv_logreg.decision_function(good_x_test)
if bad_x_test.shape[0] > 0:
self.bad_fv_logreg.fit(bad_X, bad_y, sample_weight=bad_w)
self.bad_signal = self.bad_fv_logreg.decision_function(bad_x_test)
return
def logreg_sliding_window(self):
"""bootstrap the signal strengths by removing test vectors from training"""
# pre-prioritize using test-in-target values and limit the rules
if not self.debug:
good_preidx = np.array([e[0] for e in sorted(enumerate(self.good_signal),key=lambda e: e[1],reverse=True)],dtype=int)[:int(np.ceil(1.4*self.good_rule_max))]
self.good_columns = self.good_columns[good_preidx]
bad_preidx = np.array([e[0] for e in sorted(enumerate(self.bad_signal),key=lambda e: e[1],reverse=True)],dtype=int)[:int(np.ceil(1.4*self.bad_rule_max))]
self.bad_columns = self.bad_columns[bad_preidx]
# multithreaded loop for speed
use_blocked_not_sklearn_mp = True # it's a lot faster to block it yourself
if use_blocked_not_sklearn_mp:
# init w/ target-in-training results
good_fv_logreg = copy.deepcopy(self.good_fv_logreg)
good_fv_logreg.penalty = 'l2'
good_fv_logreg.solver = 'sag'
good_fv_logreg.warm_start = True
good_fv_logreg.n_jobs = 1 # achieve parallelism via block processing
bad_fv_logreg = copy.deepcopy(self.bad_fv_logreg)
bad_fv_logreg.penalty = 'l2'
bad_fv_logreg.solver = 'sag'
bad_fv_logreg.warm_start = True
bad_fv_logreg.n_jobs = 1 # achieve parallelism via block processing
if False: # debug mp: turn off multiprocessing with a monkeypatch
class NotAMultiProcess(mp.Process):
def start(self): self.run()
def join(self): pass
mp.Process = NotAMultiProcess
# this is probably efficient with Linux's copy-on-write fork(); unsure about BSD/macOS
# must refactor to use shared Array() [along with warm_start coeff's] to ensure
# see https://stackoverflow.com/questions/5549190/is-shared-readonly-data-copied-to-different-processes-for-python-multiprocessing/
# distribute training and tests across multiprocessors
def training_op(queue, X_all, y_all, w_all, fv_logreg, columns, column_block):
"""Training and test operation put into a mp.Queue.
columns[column_block] and signal[column_block] are the rule columns and corresponding signal strengths
"""
res = np.zeros(len(column_block))
for k in range(len(column_block)):
mask = np.zeros(len(y_all), dtype=bool)
mask[columns[column_block[k]]] = True
mask = np.logical_not(mask)
x_test = X_all[np.logical_not(mask)]
X = X_all[mask]
y = y_all[mask]
w = w_all[mask]
fv_logreg.fit(X, y, sample_weight=w)
res[k] = fv_logreg.decision_function(x_test)[0]
queue.put((column_block,res)) # signal[column_block] = res
return
num_threads = mp.cpu_count()
# good
q = mp.Queue()
jobs = []
self.good_signal = np.zeros(len(self.good_columns))
block_length = len(self.good_columns) // num_threads
column_block = np.arange(0, block_length)
while len(column_block) > 0:
column_block = column_block[np.where(column_block < len(self.good_columns))]
fv_logreg = copy.deepcopy(good_fv_logreg) # each process gets its own .coeff_'s
column_block_copy = np.copy(column_block) # each process gets its own block of columns
p = mp.Process(target=training_op, args=(q, self.good_X_all, self.good_y_all, self.good_w_all, fv_logreg, self.good_columns, column_block_copy))
p.start()
jobs.append(p)
column_block += len(column_block)
# process the results in the queue
for i in range(len(jobs)):
column_block, res = q.get()
self.good_signal[column_block] = res
# join all jobs and wait for them to complete
for p in jobs: p.join()
# bad
q = mp.Queue()
jobs = []
self.bad_signal = np.zeros(len(self.bad_columns))
block_length = len(self.bad_columns) // num_threads
column_block = np.arange(0, block_length)
while len(column_block) > 0:
column_block = column_block[np.where(column_block < len(self.bad_columns))]
fv_logreg = copy.deepcopy(bad_fv_logreg) # each process gets its own .coeff_'s
column_block_copy = np.copy(column_block) # each process gets its own block of columns
p = mp.Process(target=training_op, args=(q, self.bad_X_all, self.bad_y_all, self.bad_w_all, fv_logreg, self.bad_columns, column_block_copy))
p.start()
jobs.append(p)
column_block += len(column_block)
# process the results in the queue
for i in range(len(jobs)):
column_block, res = q.get()
self.bad_signal[column_block] = res
# join all jobs and wait for them to complete
for p in jobs: p.join()
else: # if use_blocked_not_sklearn_mp:
def training_op(X_all, y_all, w_all, fv_logreg, columns, signal):
"""Training and test operations reusing results with multiprocessing."""
res = np.zeros(len(signal))
for k in range(len(res)):
mask = np.zeros(len(y_all), dtype=bool)
mask[columns[k]] = True
mask = np.logical_not(mask)
x_test = X_all[np.logical_not(mask)]
X = X_all[mask]
y = y_all[mask]
w = w_all[mask]
fv_logreg.fit(X, y, sample_weight=w)
res[k] = fv_logreg.decision_function(x_test)[0]
signal[:] = res
return
# good
training_op(self.good_X_all, self.good_y_all, self.good_w_all, self.good_fv_logreg, self.good_columns, self.good_signal)
# bad
training_op(self.bad_X_all, self.bad_y_all, self.bad_w_all, self.bad_fv_logreg, self.bad_columns, self.bad_signal)
return
def parse_easylist_rules(self):
for rule in self.good_rules: self.easylist_to_javascript_vars(rule)
for rule in self.bad_rules: self.easylist_to_javascript_vars(rule)
ordered_unique_all_js_var_lists()
return
def easylist_to_javascript_vars(self,rule,ignore_huge_url_regex_rule_list=False):
rule = rule.rstrip()
rule_orig = rule
exception_flag = exception_filter(rule) # block default; pass if True
rule = exception_re.sub(r'\1', rule)
option_exception_re = not3dimppuposgh_option_exception_re # ignore these options by default
opts = '' # default: no options in the rule
if re_test(option_re, rule):
opts = option_re.sub(r'\2', rule)
# domain-specific and other option exceptions: ignore
# too many rules (>~ 10k) bog down the browser; make reasonable exclusions here
rule = option_re.sub(r'\1', rule) # delete all the options and continue
# ignore these cases
# comment case: ignore
if re_test(comment_re, rule): return
# block default or pass exception
if exception_flag:
option_exception_re = not3dimppuposgh_option_exception_re # ignore these options within exceptions
if not self.exceptions_include_flag: return
# specific options: ignore
if re_test(option_exception_re, opts): return
# blank url case: ignore
if re_test(httpempty_re, rule): return
# blank line case: ignore
if not rule: return
# treat each of the these cases separately, here and in Javascript
# regex case
if re_test(regex_re, rule):
if regex_ignore_test(rule): return
rule = regex_re.sub(r'\1', rule)
if exception_flag:
good_url_regex.append(rule)
else:
if not re_test(badregex_regex_filters_re,
rule): return # limit bad regex's to those in the filter
bad_url_regex.append(rule)
return
# now that regex's are handled, delete unnecessary wildcards, e.g. /.../*
rule = wildcard_begend_re.sub(r'\1', rule)
# domain anchors, || or '|http://a.b' -> domain anchor 'a.b' for regex efficiency in JS
if re_test(domain_anch_re, rule) or re_test(scheme_anchor_re, rule):
# strip off initial || or |scheme://
if re_test(domain_anch_re, rule):
rule = domain_anch_re.sub(r'\1', rule)
elif re_test(scheme_anchor_re, rule):
rule = scheme_anchor_re.sub("", rule)
# host subcase
if re_test(da_hostonly_re, rule):
rule = da_hostonly_re.sub(r'\1', rule)
if not re_test(wild_anch_sep_exc_re, rule): # exact subsubcase
if not re_test(badregex_regex_filters_re, rule):
return # limit bad regex's to those in the filter
if exception_flag:
good_da_host_exact.append(rule)
else:
bad_da_host_exact.append(rule)
return
else: # regex subsubcase
if regex_ignore_test(rule): return
if exception_flag:
good_da_host_regex.append(rule)
else:
if not re_test(badregex_regex_filters_re,
rule): return # limit bad regex's to those in the filter
bad_da_host_regex.append(rule)
return
# hostpath subcase
if re_test(da_hostpath_re, rule):
rule = da_hostpath_re.sub(r'\1', rule)
if not re_test(wild_sep_exc_noanch_re, rule) and re_test(pathend_re, rule): # exact subsubcase
rule = re.sub(r'\|$', '', rule) # strip EOL anchors
if not re_test(badregex_regex_filters_re, rule):
return # limit bad regex's to those in the filter
if exception_flag:
good_da_hostpath_exact.append(rule)
else:
bad_da_hostpath_exact.append(rule)
return
else: # regex subsubcase
if regex_ignore_test(rule): return
# ignore option rules for some regex rules
if re_test(alloption_exception_re, opts): return
if exception_flag:
good_da_hostpath_regex.append(rule)
else:
if not re_test(badregex_regex_filters_re,
rule): return # limit bad regex's to those in the filter
bad_da_hostpath_regex.append(rule)
return
# hostpathquery default case
if True:
# if re_test(re.compile(r'^go\.'),rule):
# pass
if regex_ignore_test(rule): return
if exception_flag:
good_da_regex.append(rule)
else:
bad_da_regex.append(rule)
return
# all other non-regex patterns
if True:
if regex_ignore_test(rule): return
if not ignore_huge_url_regex_rule_list:
if re_test(alloption_exception_re, opts): return
if exception_flag:
good_url_parts.append(rule)
else:
if not re_test(badregex_regex_filters_re,
rule): return # limit bad regex's to those in the filter
bad_url_parts.append(rule)
return # superfluous return
def create_pac_file(self):
self.proxy_pac_init()
self.proxy_pac = self.proxy_pac_preamble \
+ "\n".join(["// " + l for l in self.easylist_strategy.split("\n")]) \
+ self.js_init_object('good_da_host_exact') \
+ self.js_init_regexp('good_da_host_regex', True) \
+ self.js_init_object('good_da_hostpath_exact') \
+ self.js_init_regexp('good_da_hostpath_regex', True) \
+ self.js_init_regexp('good_da_regex', True) \
+ self.js_init_object('good_da_host_exceptions_exact') \
+ self.js_init_object('bad_da_host_exact') \
+ self.js_init_regexp('bad_da_host_regex', True) \
+ self.js_init_object('bad_da_hostpath_exact') \
+ self.js_init_regexp('bad_da_hostpath_regex', True) \
+ self.js_init_regexp('bad_da_regex', True) \
+ self.js_init_regexp('good_url_parts') \
+ self.js_init_regexp('bad_url_parts') \
+ self.js_init_regexp('good_url_regex', regex_flag=True) \
+ self.js_init_regexp('bad_url_regex', regex_flag=True) \
+ self.proxy_pac_postamble
for l in ['good_da_host_exact',
'good_da_host_regex',
'good_da_hostpath_exact',
'good_da_hostpath_regex',
'good_da_regex',
'good_da_host_exceptions_exact',
'bad_da_host_exact',
'bad_da_host_regex',
'bad_da_hostpath_exact',
'bad_da_hostpath_regex',
'bad_da_regex',
'good_url_parts',
'bad_url_parts',
'good_url_regex',
'bad_url_regex']:
print("{}: {:d} rules".format(l, len(globals()[l])), flush=True)
with open(os.path.join(self.easylist_dir, 'proxy.pac'), 'w', encoding='utf-8') as fd:
fd.write(self.proxy_pac)
def proxy_pac_init(self):
self.pac_proxy = 'PROXY {}'.format(self.proxy_host_port) if self.proxy_host_port else 'DIRECT'
# define a default, user-supplied FindProxyForURL function
self.default_FindProxyForURL_function = '''\
function FindProxyForURL(url, host)
{
if (
isPlainHostName(host) ||
shExpMatch(host, "10.*") ||
shExpMatch(host, "172.16.*") ||
shExpMatch(host, "192.168.*") ||
shExpMatch(host, "127.*") ||
dnsDomainIs(host, ".local") || dnsDomainIs(host, ".LOCAL")
)
return "DIRECT";
else if (
/*
Proxy bypass hostnames
*/
/*
Fix iOS 13 PAC file issue with Mail.app
See: https://forums.developer.apple.com/thread/121928
*/
// Apple
(host == "imap.mail.me.com") || (host == "smtp.mail.me.com") ||
dnsDomainIs(host, "imap.mail.me.com") || dnsDomainIs(host, "smtp.mail.me.com") ||
(host == "p03-imap.mail.me.com") || (host == "p03-smtp.mail.me.com") ||
dnsDomainIs(host, "p03-imap.mail.me.com") || dnsDomainIs(host, "p03-smtp.mail.me.com") ||
(host == "p66-imap.mail.me.com") || (host == "p66-smtp.mail.me.com") ||
dnsDomainIs(host, "p66-imap.mail.me.com") || dnsDomainIs(host, "p66-smtp.mail.me.com") ||
// Google
(host == "imap.gmail.com") || (host == "smtp.gmail.com") ||
dnsDomainIs(host, "imap.gmail.com") || dnsDomainIs(host, "smtp.gmail.com") ||
// Yahoo
(host == "imap.mail.yahoo.com") || (host == "smtp.mail.yahoo.com") ||
dnsDomainIs(host, "imap.mail.yahoo.com") || dnsDomainIs(host, "smtp.mail.yahoo.com") ||
// Comcast
(host == "imap.comcast.net") || (host == "smtp.comcast.net") ||
dnsDomainIs(host, "imap.comcast.net") || dnsDomainIs(host, "smtp.comcast.net") ||
// Apple Enterprise Network Domains; https://support.apple.com/en-us/HT210060
(host == "albert.apple.com") || dnsDomainIs(host, "albert.apple.com") ||
(host == "captive.apple.com") || dnsDomainIs(host, "captive.apple.com") ||
(host == "gs.apple.com") || dnsDomainIs(host, "gs.apple.com") ||
(host == "humb.apple.com") || dnsDomainIs(host, "humb.apple.com") ||
(host == "static.ips.apple.com") || dnsDomainIs(host, "static.ips.apple.com") ||
(host == "tbsc.apple.com") || dnsDomainIs(host, "tbsc.apple.com") ||
(host == "time-ios.apple.com") || dnsDomainIs(host, "time-ios.apple.com") ||
(host == "time.apple.com") || dnsDomainIs(host, "time.apple.com") ||
(host == "time-macos.apple.com") || dnsDomainIs(host, "time-macos.apple.com") ||
dnsDomainIs(host, ".push.apple.com") ||
(host == "gdmf.apple.com") || dnsDomainIs(host, "gdmf.apple.com") ||
(host == "deviceenrollment.apple.com") || dnsDomainIs(host, "deviceenrollment.apple.com") ||
(host == "deviceservices-external.apple.com") || dnsDomainIs(host, "deviceservices-external.apple.com") ||
(host == "identity.apple.com") || dnsDomainIs(host, "identity.apple.com") ||
(host == "iprofiles.apple.com") || dnsDomainIs(host, "iprofiles.apple.com") ||
(host == "mdmenrollment.apple.com") || dnsDomainIs(host, "mdmenrollment.apple.com") ||
(host == "setup.icloud.com") || dnsDomainIs(host, "setup.icloud.com") ||
(host == "appldnld.apple.com") || dnsDomainIs(host, "appldnld.apple.com") ||
(host == "gg.apple.com") || dnsDomainIs(host, "gg.apple.com") ||
(host == "gnf-mdn.apple.com") || dnsDomainIs(host, "gnf-mdn.apple.com") ||
(host == "gnf-mr.apple.com") || dnsDomainIs(host, "gnf-mr.apple.com") ||
(host == "gs.apple.com") || dnsDomainIs(host, "gs.apple.com") ||
(host == "ig.apple.com") || dnsDomainIs(host, "ig.apple.com") ||
(host == "mesu.apple.com") || dnsDomainIs(host, "mesu.apple.com") ||
(host == "oscdn.apple.com") || dnsDomainIs(host, "oscdn.apple.com") ||
(host == "osrecovery.apple.com") || dnsDomainIs(host, "osrecovery.apple.com") ||
(host == "skl.apple.com") || dnsDomainIs(host, "skl.apple.com") ||
(host == "swcdn.apple.com") || dnsDomainIs(host, "swcdn.apple.com") ||
(host == "swdist.apple.com") || dnsDomainIs(host, "swdist.apple.com") ||
(host == "swdownload.apple.com") || dnsDomainIs(host, "swdownload.apple.com") ||
(host == "swpost.apple.com") || dnsDomainIs(host, "swpost.apple.com") ||
(host == "swscan.apple.com") || dnsDomainIs(host, "swscan.apple.com") ||
(host == "updates-http.cdn-apple.com") || dnsDomainIs(host, "updates-http.cdn-apple.com") ||
(host == "updates.cdn-apple.com") || dnsDomainIs(host, "updates.cdn-apple.com") ||
(host == "xp.apple.com") || dnsDomainIs(host, "xp.apple.com") ||
dnsDomainIs(host, ".itunes.apple.com") ||
dnsDomainIs(host, ".apps.apple.com") ||
dnsDomainIs(host, ".mzstatic.com") ||
(host == "ppq.apple.com") || dnsDomainIs(host, "ppq.apple.com") ||
(host == "lcdn-registration.apple.com") || dnsDomainIs(host, "lcdn-registration.apple.com") ||
(host == "crl.apple.com") || dnsDomainIs(host, "crl.apple.com") ||
(host == "crl.entrust.net") || dnsDomainIs(host, "crl.entrust.net") ||
(host == "crl3.digicert.com") || dnsDomainIs(host, "crl3.digicert.com") ||
(host == "crl4.digicert.com") || dnsDomainIs(host, "crl4.digicert.com") ||
(host == "ocsp.apple.com") || dnsDomainIs(host, "ocsp.apple.com") ||
(host == "ocsp.digicert.com") || dnsDomainIs(host, "ocsp.digicert.com") ||
(host == "ocsp.entrust.net") || dnsDomainIs(host, "ocsp.entrust.net") ||
(host == "ocsp.verisign.net") || dnsDomainIs(host, "ocsp.verisign.net") ||
// Zoom
dnsDomainIs(host, ".zoom.us")
)
return "PROXY localhost:3128";
else
return "PROXY localhost:3128";
}
'''
if os.path.isfile(self.orig_pac_file):
with open(self.orig_pac_file, 'r', encoding='utf-8') as fd:
self.original_FindProxyForURL_function = fd.read()
else:
self.original_FindProxyForURL_function = self.default_FindProxyForURL_function
# change last 'return "PROXY ..."' to 'return EasyListFindProxyForURL(url, host)'
def re_sub_last(pattern, repl, string, **kwargs):
'''re.sub on the last match in a string'''
# ensure that pattern is grouped
# (note that (?:) is not caught)
pattern_grouped = pattern if bool(re.match(r'\(.+\)',pattern)) else r'({})'.format(pattern)
spl = re.split(pattern_grouped, string, **kwargs)
if len(spl) == 1: return string
spl[-2] = re.sub(pattern, repl, spl[-2], **kwargs)
return ''.join(spl)
self.original_FindProxyForURL_function = re_sub_last(r'return[\s]+"PROXY[^"]+"', 'return EasyListFindProxyForURL(url, host)',
self.original_FindProxyForURL_function)
# proxy.pac preamble
self.calling_command = ' '.join([os.path.basename(sys.argv[0])] + sys.argv[1:])
self.proxy_pac_preamble = '''\
// PAC (Proxy Auto Configuration) Filter from EasyList rules
//
// Copyright (C) 2017 by Steven T. Smith <steve dot t dot smith at gmail dot com>, GPL
// https://github.com/essandess/easylist-pac-privoxy/
//
// PAC file created on {}
// Created with command: {}
//
// http://www.gnu.org/licenses/lgpl.txt
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
// If you normally use a proxy, replace "DIRECT" below with
// "PROXY MACHINE:PORT"
// where MACHINE is the IP address or host name of your proxy
// server and PORT is the port number of your proxy server.
//
// Influenced in part by code from King of the PAC from http://securemecca.com/pac.html
// Define the blackhole proxy for blocked adware and trackware
var normal = "DIRECT";
var proxy = "{}"; // e.g. 127.0.0.1:3128
// var blackhole_ip_port = "127.0.0.1:8119"; // ngnix-hosted blackhole
// var blackhole_ip_port = "8.8.8.8:53"; // GOOG DNS blackhole; do not use: no longer works with iOS 11—causes long waits on some sites
var blackhole_ip_port = "{}"; // on iOS a working blackhole requires return code 200;
// e.g. use the adblock2privoxy nginx server as a blackhole
var blackhole = "PROXY " + blackhole_ip_port;
// The hostnames must be consistent with EasyList format.
// These special RegExp characters will be escaped below: [.?+@]
// This EasyList wildcard will be transformed to an efficient RegExp: *
//
// EasyList format references:
// https://adblockplus.org/filters
// https://adblockplus.org/filter-cheatsheet
// Create object hashes or compile efficient NFA's from all filters
// Various alternate filtering and regex approaches were timed using node and at jsperf.com
// Too many rules (>~ 10k) bog down the browser; make reasonable exclusions here:
'''.format(time.strftime("%a, %d %b %Y %X GMT", time.gmtime()),self.calling_command,self.pac_proxy,self.blackhole_ip_port)
self.proxy_pac_postamble = '''
// Add any good networks here. Format is network folowed by a comma and
// optional white space, and then the netmask.
// LAN, loopback, Apple (direct and Akamai e.g. e4805.a.akamaiedge.net), Microsoft (updates and services)
// Apple Enterprise Network; https://support.apple.com/en-us/HT210060
var GoodNetworks_Array = [ "10.0.0.0, 255.0.0.0",
"172.16.0.0, 255.240.0.0",
"17.248.128.0, 255.255.192.0",
"17.250.64.0, 255.255.192.0",
"17.248.192.0, 255.255.224.0",
"192.168.0.0, 255.255.0.0",
"127.0.0.0, 255.0.0.0",
"17.0.0.0, 255.0.0.0",
"23.2.8.68, 255.255.255.255",
"23.2.145.78, 255.255.255.255",
"23.39.179.17, 255.255.255.255",
"23.63.98.0, 255.255.254.0",
"104.70.71.223, 255.255.255.255",
"104.73.77.224, 255.255.255.255",
"104.96.184.235, 255.255.255.255",
"104.96.188.194, 255.255.255.255",
"65.52.0.0, 255.255.252.0" ];
// Apple iAd, Microsoft telemetry
var GoodNetworks_Exceptions_Array = [ "17.172.28.11, 255.255.255.255",
"134.170.30.202, 255.255.255.255",
"137.116.81.24, 255.255.255.255",
"157.56.106.189, 255.255.255.255",
"184.86.53.99, 255.255.255.255",
"2.22.61.43, 255.255.255.255",
"2.22.61.66, 255.255.255.255",
"204.79.197.200, 255.255.255.255",
"23.218.212.69, 255.255.255.255",
"65.39.117.230, 255.255.255.255",
"65.52.108.33, 255.255.255.255",
"65.55.108.23, 255.255.255.255",
"64.4.54.254, 255.255.255.255" ];
// Akamai: 23.64.0.0/14, 23.0.0.0/12, 23.32.0.0/11, 104.64.0.0/10
// Add any bad networks here. Format is network folowed by a comma and
// optional white space, and then the netmask.
// From securemecca.com: Adobe marketing cloud, 2o7, omtrdc, Sedo domain parking, flyingcroc, accretive
var BadNetworks_Array = [ "61.139.105.128, 255.255.255.192",
"63.140.35.160, 255.255.255.248",
"63.140.35.168, 255.255.255.252",
"63.140.35.172, 255.255.255.254",
"63.140.35.174, 255.255.255.255",
"66.150.161.32, 255.255.255.224",
"66.235.138.0, 255.255.254.0",
"66.235.141.0, 255.255.255.0",
"66.235.143.48, 255.255.255.254",
"66.235.143.64, 255.255.255.254",
"66.235.153.16, 255.255.255.240",
"66.235.153.32, 255.255.255.248",
"81.31.38.0, 255.255.255.128",
"82.98.86.0, 255.255.255.0",
"89.185.224.0, 255.255.224.0",
"207.66.128.0, 255.255.128.0" ];
// block these schemes; use the command line for ftp, rsync, etc. instead
var bad_schemes_RegExp = RegExp("^(?:ftp|sftp|tftp|ftp-data|rsync|finger|gopher)", "i")
// RegExp for schemes; lengths from
// perl -lane 'BEGIN{$l=0;} {!/^#/ && do{$ll=length($F[0]); if($ll>$l){$l=$ll;}};} END{print $l;}' /etc/services
var schemepart_RegExp = RegExp("^([\\\\w*+-]{2,15}):\\\\/{0,2}","i");
var hostpart_RegExp = RegExp("^((?:[\\\\w-]+\\\\.)+[a-zA-Z0-9-]{2,24}\\\\.?)", "i");
var querypart_RegExp = RegExp("^((?:[\\\\w-]+\\\\.)+[a-zA-Z0-9-]{2,24}\\\\.?[\\\\w~%.\\\\/^*-]*)(\\\\??\\\\S*?)$", "i");
var domainpart_RegExp = RegExp("^(?:[\\\\w-]+\\\\.)*((?:[\\\\w-]+\\\\.)[a-zA-Z0-9-]{2,24})\\\\.?", "i");
//////////////////////////////////////////////////
// Define the is_ipv4_address function and vars //
//////////////////////////////////////////////////
var ipv4_RegExp = /^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})$/;
function is_ipv4_address(host)
{
var ipv4_pentary = host.match(ipv4_RegExp);
var is_valid_ipv4 = false;
if (ipv4_pentary) {
is_valid_ipv4 = true;
for( i = 1; i <= 4; i++) {
if (ipv4_pentary[i] >= 256) {
is_valid_ipv4 = false;
}
}
}
return is_valid_ipv4;
}
// object hashes
// Note: original stackoverflow-based hasOwnProperty does not woth within iOS kernel
var hasOwnProperty = function(obj, prop) {
return obj.hasOwnProperty(prop);
}
/////////////////////
// Done Setting Up //
/////////////////////
// debug with Chrome at chrome://net-export
// alert("Debugging message.")
//////////////////////////////////
// Define the FindProxyFunction //
//////////////////////////////////
var use_pass_rules_parts_flag = true; // use the pass rules for url parts, then apply the block rules
var alert_flag = false; // use for short-circuit '&&' to print debugging statements
var debug_flag = false; // use for short-circuit '&&' to print debugging statements
// EasyList filtering for FindProxyForURL(url, host)
function EasyListFindProxyForURL(url, host)
{
var host_is_ipv4 = is_ipv4_address(host);
var host_ipv4_address;
alert_flag && alert("url is: " + url);
alert_flag && alert("host is: " + host);
// Extract scheme and url without scheme
var scheme = url.match(schemepart_RegExp)
scheme = scheme.length > 0? scheme[1] : "";
// Remove the scheme and extract the path for regex efficiency
var url_noscheme = url.replace(schemepart_RegExp,"");
var url_pathonly = url_noscheme.replace(hostpart_RegExp,"");
var url_noquery = url_noscheme.replace(querypart_RegExp,"$1");
// Remove the server name from the url and host if host is not an IPv4 address
var url_noserver = !host_is_ipv4 ? url_noscheme.replace(domainpart_RegExp,"$1") : url_noscheme;
var url_noservernoquery = !host_is_ipv4 ? url_noquery.replace(domainpart_RegExp,"$1") : url_noscheme;
var host_noserver = !host_is_ipv4 ? host.replace(domainpart_RegExp,"$1") : host;
// Debugging results
if (debug_flag && alert_flag) {
alert("url_noscheme is: " + url_noscheme);
alert("url_pathonly is: " + url_pathonly);
alert("url_noquery is: " + url_noquery);
alert("url_noserver is: " + url_noserver);
alert("url_noservernoquery is: " + url_noservernoquery);
alert("host_noserver is: " + host_noserver);
}
// Short circuit to blackhole for good_da_host_exceptions
if ( hasOwnProperty(good_da_host_exceptions_exact_JSON,host) ) {
alert_flag && alert("good_da_host_exceptions_exact_JSON blackhole!");
return blackhole;
}
///////////////////////////////////////////////////////////////////////
// Check to make sure we can get an IPv4 address from the given host //
// name. If we cannot do that then skip the Networks tests. //
///////////////////////////////////////////////////////////////////////
host_ipv4_address = host_is_ipv4 ? host : (isResolvable(host) ? dnsResolve(host) : false);
if (host_ipv4_address) {
alert_flag && alert("host ipv4 address is: " + host_ipv4_address);
/////////////////////////////////////////////////////////////////////////////
// If the IP translates to one of the GoodNetworks_Array (with exceptions) //
// we pass it because it is considered safe. //
/////////////////////////////////////////////////////////////////////////////
for (i in GoodNetworks_Exceptions_Array) {
tmpNet = GoodNetworks_Exceptions_Array[i].split(/,\s*/);
if (isInNet(host_ipv4_address, tmpNet[0], tmpNet[1])) {
alert_flag && alert("GoodNetworks_Exceptions_Array Blackhole: " + host_ipv4_address);
return blackhole;
}
}
for (i in GoodNetworks_Array) {
tmpNet = GoodNetworks_Array[i].split(/,\s*/);
if (isInNet(host_ipv4_address, tmpNet[0], tmpNet[1])) {
alert_flag && alert("GoodNetworks_Array PASS: " + host_ipv4_address);
return proxy;
}
}
///////////////////////////////////////////////////////////////////////
// If the IP translates to one of the BadNetworks_Array we fail it //
// because it is not considered safe. //
///////////////////////////////////////////////////////////////////////
for (i in BadNetworks_Array) {
tmpNet = BadNetworks_Array[i].split(/,\s*/);
if (isInNet(host_ipv4_address, tmpNet[0], tmpNet[1])) {
alert_flag && alert("BadNetworks_Array Blackhole: " + host_ipv4_address);
return blackhole;
}
}
}
//////////////////////////////////////////////////////////////////////////////
// HTTPS: https scheme can only use domain information //
// unless PacHttpsUrlStrippingEnabled == false [Chrome] or //
// network.proxy.autoconfig_url.include_path == true [Firefox, about:config] //
// E.g. on macOS: //
// defaults write com.google.Chrome PacHttpsUrlStrippingEnabled -bool false //
// Check setting at page chrome://policy //
//////////////////////////////////////////////////////////////////////////////
// Assume browser has disabled path access if scheme is https and path is '/'
if ( scheme == "https" && url_pathonly == "/" ) {
///////////////////////////////////////////////////////////////////////
// PASS LIST: domains matched here will always be allowed. //
///////////////////////////////////////////////////////////////////////
if ( (good_da_host_exact_flag && (hasOwnProperty(good_da_host_exact_JSON,host_noserver)||hasOwnProperty(good_da_host_exact_JSON,host)))
&& !hasOwnProperty(good_da_host_exceptions_exact_JSON,host) ) {
alert_flag && alert("HTTPS PASS: " + host + ", " + host_noserver);
return proxy;
}
//////////////////////////////////////////////////////////
// BLOCK LIST: stuff matched here here will be blocked //
//////////////////////////////////////////////////////////
if ( (bad_da_host_exact_flag && (hasOwnProperty(bad_da_host_exact_JSON,host_noserver)||hasOwnProperty(bad_da_host_exact_JSON,host))) ) {
alert_flag && alert("HTTPS blackhole: " + host + ", " + host_noserver);
return blackhole;
}
}
////////////////////////////////////////
// HTTPS and HTTP: full path analysis //
////////////////////////////////////////
if (scheme == "https" || scheme == "http") {
///////////////////////////////////////////////////////////////////////
// PASS LIST: domains matched here will always be allowed. //
///////////////////////////////////////////////////////////////////////
if ( !hasOwnProperty(good_da_host_exceptions_exact_JSON,host)
&& ((good_da_host_exact_flag && (hasOwnProperty(good_da_host_exact_JSON,host_noserver)||hasOwnProperty(good_da_host_exact_JSON,host))) || // fastest test first
(use_pass_rules_parts_flag &&
(good_da_hostpath_exact_flag && (hasOwnProperty(good_da_hostpath_exact_JSON,url_noservernoquery)||hasOwnProperty(good_da_hostpath_exact_JSON,url_noquery)) ) ||
// test logic: only do the slower test if the host has a (non)suspect fqdn
(good_da_host_regex_flag && (good_da_host_regex_RegExp.test(host_noserver)||good_da_host_regex_RegExp.test(host))) ||
(good_da_hostpath_regex_flag && (good_da_hostpath_regex_RegExp.test(url_noservernoquery)||good_da_hostpath_regex_RegExp.test(url_noquery))) ||
(good_da_regex_flag && (good_da_regex_RegExp.test(url_noserver)||good_da_regex_RegExp.test(url_noscheme))) ||
(good_url_parts_flag && good_url_parts_RegExp.test(url)) ||
(good_url_regex_flag && good_url_regex_RegExp.test(url)))) ) {
return proxy;
}
//////////////////////////////////////////////////////////
// BLOCK LIST: stuff matched here here will be blocked //
//////////////////////////////////////////////////////////
// Debugging results
if (debug_flag && alert_flag) {
alert("hasOwnProperty(bad_da_host_exact_JSON," + host_noserver + "): " + (bad_da_host_exact_flag && hasOwnProperty(bad_da_host_exact_JSON,host_noserver)));
alert("hasOwnProperty(bad_da_host_exact_JSON," + host + "): " + (bad_da_host_exact_flag && hasOwnProperty(bad_da_host_exact_JSON,host)));
alert("hasOwnProperty(bad_da_hostpath_exact_JSON," + url_noservernoquery + "): " + (bad_da_hostpath_exact_flag && hasOwnProperty(bad_da_hostpath_exact_JSON,url_noservernoquery)));
alert("hasOwnProperty(bad_da_hostpath_exact_JSON," + url_noquery + "): " + (bad_da_hostpath_exact_flag && hasOwnProperty(bad_da_hostpath_exact_JSON,url_noquery)));
alert("bad_da_host_regex_RegExp.test(" + host_noserver + "): " + (bad_da_host_regex_flag && bad_da_host_regex_RegExp.test(host_noserver)));
alert("bad_da_host_regex_RegExp.test(" + host + "): " + (bad_da_host_regex_flag && bad_da_host_regex_RegExp.test(host)));
alert("bad_da_hostpath_regex_RegExp.test(" + url_noservernoquery + "): " + (bad_da_hostpath_regex_flag && bad_da_hostpath_regex_RegExp.test(url_noservernoquery)));
alert("bad_da_hostpath_regex_RegExp.test(" + url_noquery + "): " + (bad_da_hostpath_regex_flag && bad_da_hostpath_regex_RegExp.test(url_noquery)));
alert("bad_da_regex_RegExp.test(" + url_noserver + "): " + (bad_da_regex_flag && bad_da_regex_RegExp.test(url_noserver)));
alert("bad_da_regex_RegExp.test(" + url_noscheme + "): " + (bad_da_regex_flag && bad_da_regex_RegExp.test(url_noscheme)));
alert("bad_url_parts_RegExp.test(" + url + "): " + (bad_url_parts_flag && bad_url_parts_RegExp.test(url)));
alert("bad_url_regex_RegExp.test(" + url + "): " + (bad_url_regex_flag && bad_url_regex_RegExp.test(url)));
}
if ( (bad_da_host_exact_flag && (hasOwnProperty(bad_da_host_exact_JSON,host_noserver)||hasOwnProperty(bad_da_host_exact_JSON,host))) || // fastest test first
(bad_da_hostpath_exact_flag && (hasOwnProperty(bad_da_hostpath_exact_JSON,url_noservernoquery)||hasOwnProperty(bad_da_hostpath_exact_JSON,url_noquery)) ) ||
// test logic: only do the slower test if the host has a (non)suspect fqdn
(bad_da_host_regex_flag && (bad_da_host_regex_RegExp.test(host_noserver)||bad_da_host_regex_RegExp.test(host))) ||
(bad_da_hostpath_regex_flag && (bad_da_hostpath_regex_RegExp.test(url_noservernoquery)||bad_da_hostpath_regex_RegExp.test(url_noquery))) ||
(bad_da_regex_flag && (bad_da_regex_RegExp.test(url_noserver)||bad_da_regex_RegExp.test(url_noscheme))) ||
(bad_url_parts_flag && bad_url_parts_RegExp.test(url)) ||
(bad_url_regex_flag && bad_url_regex_RegExp.test(url)) ) {
alert_flag && alert("Blackhole: " + url + ", " + host);
return blackhole;
}
}
// default pass
alert_flag && alert("Default PASS: " + url + ", " + host);
return proxy;
}
// User-supplied FindProxyForURL()
''' + self.original_FindProxyForURL_function
self.easylist_strategy = """\
EasyList rules:
https://adblockplus.org/filters
https://adblockplus.org/filter-cheatsheet
https://opnsrce.github.io/javascript-performance-tip-precompile-your-regular-expressions
https://adblockplus.org/blog/investigating-filter-matching-algorithms
Strategies to convert EasyList rules to Javascript tests:
In general:
1. Preference for performance over 1:1 EasyList functionality
2. Limit number of rules to ~O(10k) to avoid computational burden on mobile devices
3. Exact matches: use Object hashing (very fast); use efficient NFA RegExp's for all else
4. Divide and conquer specific cases to avoid large RegExp's
5. Based on testing code performance on an iPhone: mobile Safari, Chrome with System Activity Monitor.app
6. Backstop these proxy.pac rules with Privoxy rules and a browser plugin
scheme://host/path?query ; FindProxyForURL(url, host) has full url and host strings
EasyList rules:
|| domain anchor
||host is exact e.g. ||a.b^ ? then hasOwnProperty(hash,host)
||host is wildcard e.g. ||a.* ? then RegExp.test(host)
||host/path is exact e.g. ||a.b/c? ? then hasOwnProperty(hash,url_path_noquery) [strip ?'s]
||host/path is wildcard e.g. ||a.*/c? ? then RegExp.test(url_path_noquery) [strip ?'s]
||host/path?query is exact e.g. ||a.b/c?d= ? assume none [handle small number within RegExp's]
||host/path?query is wildcard e.g. ||a.*/c?d= ? then RegExp.test(url)
url parts e.g. a.b^c&d|
All cases RegExp.test(url)
Except: |http://a.b. Treat these as domain anchors after stripping the scheme
regex e.g. /r/
All cases RegExp.test(url)
@@ exceptions
Flag as "good" versus "bad" default
Variable name conventions (example that defines the rule):
bad_da_host_exact == bad domain anchor with host/path type, exact matching with Object hash
bad_da_host_regex == bad domain anchor with host/path type, RegExp matching
"""
return
# Use to define js object hashes (much faster than string conversion)
def js_init_object(self,object_name):
obj = globals()[object_name]
if bool(self.truncate_hash_max) and len(obj) > self.truncate_hash_max:
warnings.warn("Truncating regex alternatives rule set '{}' from {:d} to {:d}.".format(object_name,len(obj),self.truncate_hash_max))
obj = obj[:self.truncate_hash_max]
return '''\
// {:d} rules:
var {}_JSON = {}{}{};
var {}_flag = {} > 0 ? true : false; // test for non-zero number of rules
'''.format(len(obj),object_name,'{ ',",\n".join('"{}": null'.format(x) for x in obj),' }',object_name,len(obj))
def js_init_regexp(self,array_name,domain_anchor=False,regex_flag=False):
global n_wildcard
n_wildcard = 1
domain_anchor_replace = "^(?:[\\w-]+\\.)*?" if domain_anchor else ""
match_nothing_regexp = "/^$/"
# no wildcard sorting
# arr = [easylist_to_jsre(x) for x in globals()[array_name] if wildcard_test(x)]
arr_nostar = [x for x in globals()[array_name] if not re_test(wildcard_re,x)]
arr_star = [x for x in globals()[array_name] if re_test(wildcard_re,x)]
def wildcard_preferences(rule):
track_test = not re_test(re.compile(r'track',re.IGNORECASE),rule) # MSB
beacon_test = not re_test(re.compile(r'beacon]',re.IGNORECASE),rule) # LSB
stats_test = not re_test(re.compile(r'stat[is]]',re.IGNORECASE),rule) # LSB
analysis_test = not re_test(re.compile(r'anal[iy]]',re.IGNORECASE),rule) # LSB
return 8*track_test + 4*beacon_test + 2*stats_test + analysis_test
arr_star.sort(key=wildcard_preferences)
# Wildcard regex's use named groups. Limit their number to to an assumed maximum
# e.g. Python's re limit is 100
k_wildcard = 0
rule_kdx = self.wildcard_named_group_limit
for rule_kdx, rule in enumerate(arr_star):
k_wildcard += len(arr_star[rule_kdx].split('*'))-1
if k_wildcard > self.wildcard_named_group_limit: break
arr_star = arr_star[:rule_kdx]
arr = arr_nostar + arr_star
if re_test(r'(?:_parts|_regex)$',array_name) and bool(self.truncate_alternatives_max) and len(arr) > self.truncate_alternatives_max:
warnings.warn("Truncating regex alternatives rule set '{}' from {:d} to {:d}.".format(array_name,len(arr),self.truncate_alternatives_max))
arr = arr[:self.truncate_alternatives_max]
if not regex_flag:
arr = [easylist_to_jsre(x) for x in arr]
else:
# ensure that '/' is escaped
arr = [re.sub(r'([^\\])/','\\1\/',x) for x in arr]
arr_regexp = "/" + domain_anchor_replace + "(?:" + "|".join(arr) + ")/i"
if len(arr) == 0: arr_regexp = match_nothing_regexp
return '''\
// {:d} rules as an efficient NFA RegExp:
var {}_RegExp = {};
var {}_flag = {} > 0 ? true : false; // test for non-zero number of rules
'''.format(len(arr),array_name,arr_regexp,array_name,len(arr))
# end of EasyListPAC definition
# global variables and functions
def last_modified_resp(req):
header_dict = dict(req.getheaders())
lm = header_dict.get("Last-Modified") if "Last-Modified" in header_dict else \
header_dict.get("Date","Sun, 01 Apr 2018 00:00:00 GMT")
return lm
last_modified_to_utc = lambda lm: time.mktime(datetime.datetime.strptime(lm,"%a, %d %b %Y %X GMT").timetuple())
file_to_utc = lambda f: time.mktime(datetime.datetime.utcfromtimestamp(os.path.getmtime(f)).timetuple())
user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko'
# Monkey patch `re.sub` (***groan***)
# See https://gist.github.com/gromgull/3922244
if (sys.version_info < (3, 5)):
def re_sub(pattern, replacement, string):
def _r(m):
# Now this is ugly.
# Python has a "feature" where unmatched groups return None
# then re.sub chokes on this.
# see http://bugs.python.org/issue1519638
# this works around and hooks into the internal of the re module...
# the match object is replaced with a wrapper that
# returns "" instead of None for unmatched groups
class _m():
def __init__(self, m):
self.m = m
self.string = m.string
def group(self, n):
return m.group(n) or ""
return re._expand(pattern, _m(m), replacement)
return re.sub(pattern, _r, string)
else:
re_sub = re.sub
# print(re_sub('(ab)|(a)', r'(1:\1 2:\2)', 'abc'))
# prints '(1:ab 2:)c'
# My extra rules
my_extra_rules = ['||outbrain.com^',
'||taboola.com^']
# EasyList regular expressions
comment_re = re.compile(r'^\s*?!') # ! commment
configuration_re = re.compile(r'^\s*?\[[^]]*?\]') # [Adblock Plus 2.0]
easylist_opts = r'~?\b(?:third\-party|domain|script|image|stylesheet|object(?!-subrequest)|object\-subrequest|xmlhttprequest|subdocument|ping|websocket|webrtc|document|elemhide|generichide|genericblock|other|sitekey|match-case|collapse|donottrack|popup|media|font)\b'
option_re = re.compile(r'^(.*?)\$(' + easylist_opts + r'.*?)$')
# regex's used to exclude options for specific cases
alloption_exception_re = re.compile(easylist_opts) # discard all options from rules
not3dimppupos_option_exception_re = re.compile(r'~?\b(?:domain|script|stylesheet|object(?!-subrequest)|xmlhttprequest|subdocument|ping|websocket|webrtc|document|elemhide|generichide|genericblock|other|sitekey|match-case|collapse|donottrack|media|font)\b')
not3dimppuposgh_option_exception_re = re.compile(r'~?\b(?:domain|script|stylesheet|object(?!-subrequest)|xmlhttprequest|subdocument|ping|websocket|webrtc|document|elemhide|genericblock|other|sitekey|match-case|collapse|donottrack|media|font)\b')
thrdp_im_pup_os_option_re = re.compile(r'\b(?:third\-party|image|popup|object\-subrequest)\b')
selector_re = re.compile(r'^(.*?)#\@?#*?.*?$') # #@##div [should be #+?, but old style still used]
regex_re = re.compile(r'^\@{0,2}\/(.*?)\/$')
wildcard_begend_re = re.compile(r'^(?:\**?([^*]*?)\*+?|\*+?([^*]*?)\**?)$')
wild_anch_sep_exc_re = re.compile(r'[*|^@]')
wild_sep_exc_noanch_re = re.compile(r'(?:[*^@]|\|[\s\S])')
exception_re = re.compile(r'^@@(.*?)$')
wildcard_re = re.compile(r'\*+?')
httpempty_re = re.compile(r'^\|?https?://$')
# Note: assume path end rules the end in '/' are partial, not exact, e.g. host.com/path/
pathend_re = re.compile(r'(?:\||\.(?:jsp?|php|xml|jpe?g|png|p?gif|img|swf|flv|[sp]?html?|f?cgi|pl?|aspx|ashx|css|jsonp?|asp|search|cfm|ico|act|act(?:ion)?|spy|do|stm|cms|txt|imu|dll|io|smjs|xhr|ount|bin|py|dyn|gne|mvc|lv|nap|jam|nhn))$',re.IGNORECASE)
domain_anch_re = re.compile(r'^\|\|(.+?)$')
# omit scheme from start of rule -- this will also be done in JS for efficiency
scheme_anchor_re = re.compile(r'^(\|?(?:[\w*+-]{1,15})?://)'); # e.g. '|http://' at start
# (Almost) fully-qualified domain name extraction (with EasyList wildcards)
# Example case: banner.3ddownloads.com^
da_hostonly_re = re.compile(r'^((?:[\w*-]+\.)+[a-zA-Z0-9*-]{1,24}\.?)(?:$|[/^?])$')
da_hostpath_re = re.compile(r'^((?:[\w*-]+\.)+[a-zA-Z0-9*-]{1,24}\.?[\w~%./^*-]*?)\??$')
ipv4_re = re.compile(r'(?:\d{1,3}\.){3}\d{1,3}')
host_path_parts_re = re.compile(r'^(?:https?://)?((?:\d{1,3}\.){3}\d{1,3}|(?:[\w-]+\.)+[a-zA-Z0-9-]{2,24})?\.?(\S+)?',re.IGNORECASE)
punct_str = r'][{}()<>.,;:?/~!#$%^&*_=+`\'"|\s-'
punct_class = r'[{}]'.format(punct_str)
nopunct_class = r'[^{}]'.format(punct_str)
specialword_re = r'<\w+>'
hostpunct_str = punct_str[:-1] # everything but '-'
hostpunct_class = r'[{}]'.format(hostpunct_str)
# regex logic: (keep1|keep2)|([::discard class::]+?)
# (<\w+>|\b(?:\w+[.])+[a-zA-Z0-9-]{2,24}\b)|([][()<>.;-]+?)
punct_deletepreserve_re = r'({}|\b{}\b)|({}+?)'.format(specialword_re,ipv4_re.pattern,punct_class)
punct_deletepreserve_reprog = re.compile(punct_deletepreserve_re)
punct_deletepreserve_replace = '\\1 '
hostpunct_deletepreserve_re = r'({}|\b{}\b)|({}+?)'.format(specialword_re,ipv4_re.pattern,hostpunct_class)
hostpunct_deletepreserve_reprog = re.compile(hostpunct_deletepreserve_re)
whitespace_reprog = re.compile(r'\s+')
whitespace_replace = ' '
def exception_filter(line):
return bool(exception_re.search(line))
def line_hostpath_rule(line):
line = exception_re.sub(r'\1',line)
line = domain_anch_re.sub(r'\1',line)
line = option_re.sub(r'\1',line)
return line
def punct_delete(line,punct_re=punct_deletepreserve_reprog):
res = line
res = re_sub(punct_re,punct_deletepreserve_replace,res)
res = re_sub(whitespace_reprog,whitespace_replace,res)
return res
def rule_tokenizer(rule):
rule = line_hostpath_rule(rule)
host_part = re_sub(host_path_parts_re,r'\1',rule)
path_part = re_sub(host_path_parts_re,r'\2',rule)
toks = ' '.join([punct_delete(host_part,punct_re=hostpunct_deletepreserve_reprog), punct_delete(path_part)]).strip()
toks = re_sub(whitespace_reprog,whitespace_replace,toks)
return toks
easylist_name_opts_re = re.compile(r'^~?\b(third\-party|domain|script|image|stylesheet|object(?!-subrequest)|object\-subrequest|xmlhttprequest|subdocument|ping|websocket|webrtc|document|elemhide|generichide|genericblock|other|sitekey|match-case|collapse|donottrack|popup|media|font)(?:=.+?)?$')
def option_tokenizer(opts):
toks = ' '.join([easylist_name_opts_re.sub(r'\1',o) for o in opts.split(',')])
return toks
# use or not use regular expression rules of any kind
def regex_ignore_test(line,opts=''):
res = False # don't ignore any rule
# ignore wildcards and anchors
# res = re_test(r'[*^]',line)
return res
def re_test(regex,string):
if isinstance(regex,str): regex = re.compile(regex)
return bool(regex.search(string))
# Logistic Regression functions
# feature vector hashes
# JSON structure: {"token": { "column": list of int, "count": list of int, "row_index": int }
# create adjacency lists for memory efficient sparse COO array construction
default_row = {"column": [], "count": []}
def feature_vector_append_column(rule,opts,col,feature_vector={}):
# rule grams
toks = re.split(r'\s+',rule_tokenizer(rule))
for k in range(len(toks)):
# 1- and 2-grams
grams = [toks[k], toks[k] + ' ' + toks[k + 1]] if k < len(toks) - 1 else [toks[k]]
feature_vector_append_grams(grams, col, feature_vector, weight=1/np.sqrt(len(toks)))
if bool(opts):
# option tokens (1-grams)
grams = ['option: ' + x for x in re.split(r'\s+', option_tokenizer(opts))]
feature_vector_append_grams(grams, col, feature_vector, weight=min(0.5, 1.e-1/np.sqrt(len(grams))))
if len(toks) <= 3:
"""Add information from available options and high weight regex matches."""
# regex tokens used to relate for short, unique rules
grams = []
for regex in high_weight_regex:
if bool(regex.search(rule)): grams.append('regex: ' + regex.pattern)
if bool(grams): feature_vector_append_grams(grams, col, feature_vector, weight=1/np.sqrt(len(grams)))
def feature_vector_append_grams(grams, col, feature_vector={}, weight=1.):
for ky in grams:
feature_vector[ky] = feature_vector.get(ky, copy.deepcopy(default_row))
if not feature_vector[ky]["column"] or feature_vector[ky]["column"][-1] is not col:
feature_vector[ky]["column"].append(col)
feature_vector[ky]["count"].append(0)
feature_vector[ky]["count"][-1] += weight
# store feature vectors as sparse arrays
def fv_to_mat(feature_vector=copy.deepcopy(default_row),rules=[]):
"""Compute sparse, transposed, CSR matrix and row hash from a feature vector."""
row_hash = {}
rows = []
cols = []
vals = []
for i, tok in enumerate(feature_vector):
feature_vector[tok]["row_index"] = i
row_hash[i] = tok
j_new = feature_vector[tok]["column"]
i_new = [i]*len(j_new)
v_new = feature_vector[tok]["count"]
rows += i_new
cols += j_new
vals += v_new
fv_mat = sps.coo_matrix((vals,(cols,rows)),shape=(len(rules),len(feature_vector)),dtype=np.float).tocsr()
return fv_mat, row_hash
# convert EasyList wildcard '*', separator '^', and anchor '|' to regexp; ignore '?' globbing
# http://blogs.perl.org/users/mauke/2017/05/converting-glob-patterns-to-efficient-regexes-in-perl-and-javascript.html
# For efficiency this these are converted in Python; observed to be important in iSO kernel
# var domain_anchor_RegExp = RegExp("^\\\\|\\\\|");
# // performance: use a simplified, less inclusive of subdomains, regex for domain anchors
# // also assume that RexgExp("^https?//") stripped from url string beforehand
# //var domain_anchor_replace = "^(?:[\\\\w\\-]+\\\\.)*?";
# var domain_anchor_replace = "^";
# var n_wildcard = 1;
# function easylist2re(pat) {
# function tr(pat) {
# return pat.replace(/[-\\/.?:!+^|$()[\\]{}]/g, function (m0, mp, ms) { // url, regex, EasyList special chars
# // res = m0 === "?" ? "[\\s\\S]" : "\\\\" + m0;
# // https://adblockplus.org/filters#regexps, separator "^" == [^\\w.%-]
# var res = "\\\\" + m0;
# switch (m0) {
# case "^":
# res = "[^\\\\w.%-]";
# break;
# case "|":
# res = mp + m0.length === ms.length ? "$" : "^";
# break;
# default:
# res = "\\\\" + m0; // escape special characters
# }
# return res;
# });
# }
#
# // EasyList domain anchor "||"
# var bos = "";
# if (domain_anchor_RegExp.test(pat)) {
# pat = pat.replace(domain_anchor_RegExp, ""); // strip "^||"
# bos = domain_anchor_replace;
# }
#
# // EasyList wildcards '*', separators '^', and start/end anchors '|'
# // define n_wildcard outside the function for concatenation of these patterns
# // var n_wildcard = 1;
# pat = bos + pat.replace(/\\W[^*]*/g, function (m0, mp, ms) {
# if (m0.charAt(0) !== "*") {
# return tr(m0);
# }
# // var eos = mp + m0.length === ms.length ? "$" : "";
# var eos = "";
# return "(?=([\\\\s\\\\S]*?" + tr(m0.substr(1)) + eos + "))\\\\" + n_wildcard++;
# });
# return pat;
# }
n_wildcard = 1
def easylist_to_jsre(pat):
def re_easylist(match):
mg = match.group()[0]
# https://adblockplus.org/filters#regexps, separator "^" == [^\\w.%-]
if mg == "^":
res = "[^\\w.%-]"
elif mg == "|":
res = "^" if match.span()[0] == 0 else "$"
else:
res = '\\' + mg
return res
def tr(pat):
return re.sub(r'[][\-/.?:!+^|$(){}]', re_easylist, pat)
def re_wildcard(match):
global n_wildcard
mg = match.group()
if mg[0] != "*": return tr(mg)
res = '(?=([\\s\\S]*?' + tr(mg[1:]) + '))\\' + '{:d}'.format(n_wildcard)
n_wildcard += 1
return res
domain_anchor_replace = "^(?:[\\w-]+\\.)*?"
bos = ''
if re_test(domain_anch_re,pat):
pat = domain_anch_re.sub(r'\1',pat)
bos = domain_anchor_replace
pat = bos + re.sub(r'(\W[^*]*)', re_wildcard, pat)
return pat
def ordered_unique_all_js_var_lists():
global good_da_host_exact
global good_da_host_regex
global good_da_hostpath_exact
global good_da_hostpath_regex
global good_da_regex
global good_da_host_exceptions_exact
global bad_da_host_exact
global bad_da_host_regex
global bad_da_hostpath_exact
global bad_da_hostpath_regex
global bad_da_regex
global good_url_parts
global bad_url_parts
global good_url_regex
global bad_url_regex
good_da_host_exact = ordered_unique_nonempty(good_da_host_exact)
good_da_host_regex = ordered_unique_nonempty(good_da_host_regex)
good_da_hostpath_exact = ordered_unique_nonempty(good_da_hostpath_exact)
good_da_hostpath_regex = ordered_unique_nonempty(good_da_hostpath_regex)
good_da_regex = ordered_unique_nonempty(good_da_regex)
good_da_host_exceptions_exact = ordered_unique_nonempty(good_da_host_exceptions_exact)
bad_da_host_exact = ordered_unique_nonempty(bad_da_host_exact)
bad_da_host_regex = ordered_unique_nonempty(bad_da_host_regex)
bad_da_hostpath_exact = ordered_unique_nonempty(bad_da_hostpath_exact)
bad_da_hostpath_regex = ordered_unique_nonempty(bad_da_hostpath_regex)
bad_da_regex = ordered_unique_nonempty(bad_da_regex)
good_url_parts = ordered_unique_nonempty(good_url_parts)
bad_url_parts = ordered_unique_nonempty(bad_url_parts)
good_url_regex = ordered_unique_nonempty(good_url_regex)
bad_url_regex = ordered_unique_nonempty(bad_url_regex)
# ordered uniqueness, https://stackoverflow.com/questions/12897374/get-unique-values-from-a-list-in-python
ordered_unique_nonempty = lambda listable: fnt.reduce(lambda l, x: l.append(x) or l if x not in l and bool(x) else l, listable, [])
# list variables based on EasyList strategies above
# initial values prepended before EasyList rules
# pass updates and services from these domains
# handle organization-specific ad and tracking servers in later commit
# https://support.apple.com/en-us/HT210060
good_da_host_exact = ['apple.com',
'albert.apple.com',
'captive.apple.com',
'gs.apple.com',
'humb.apple.com',
'static.ips.apple.com',
'tbsc.apple.com',
'time-ios.apple.com',
'time.apple.com',
'time-macos.apple.com',
'gdmf.apple.com',
'deviceenrollment.apple.com',
'deviceservices-external.apple.com',
'identity.apple.com',
'iprofiles.apple.com',
'mdmenrollment.apple.com',
'setup.icloud.com',
'appldnld.apple.com',
'gg.apple.com',
'gnf-mdn.apple.com',
'gnf-mr.apple.com',
'gs.apple.com',
'ig.apple.com',
'mesu.apple.com',
'oscdn.apple.com',
'osrecovery.apple.com',
'skl.apple.com',
'swcdn.apple.com',
'swdist.apple.com',
'swdownload.apple.com',
'swpost.apple.com',
'swscan.apple.com',
'updates-http.cdn-apple.com',
'updates.cdn-apple.com',
'xp.apple.com',
'ppq.apple.com',
'lcdn-registration.apple.com',
'crl.apple.com',
'crl.entrust.net',
'crl3.digicert.com',
'crl4.digicert.com',
'ocsp.apple.com',
'ocsp.digicert.com',
'ocsp.entrust.net',
'ocsp.verisign.net',
'icloud.com',
'apple-dns.net',
'swcdn.apple.com',
'init.itunes.apple.com', # use nslookup to determine canonical names
'init-cdn.itunes-apple.com.akadns.net',
'itunes.apple.com.edgekey.net',
'setup.icloud.com',
'p32-escrowproxy.icloud.com',
'p32-escrowproxy.fe.apple-dns.net',
'keyvalueservice.icloud.com',
'keyvalueservice.fe.apple-dns.net',
'p32-bookmarks.icloud.com',
'p32-bookmarks.fe.apple-dns.net',
'p32-ckdatabase.icloud.com',
'p32-ckdatabase.fe.apple-dns.net',
'configuration.apple.com',
'configuration.apple.com.edgekey.net',
'mesu.apple.com',
'mesu-cdn.apple.com.akadns.net',
'mesu.g.aaplimg.com',
'gspe1-ssl.ls.apple.com',
'gspe1-ssl.ls.apple.com.edgekey.net',
'api-glb-bos.smoot.apple.com',
'query.ess.apple.com',
'query-geo.ess-apple.com.akadns.net',
'query.ess-apple.com.akadns.net',
'setup.fe.apple-dns.net',
'gsa.apple.com',
'gsa.apple.com.akadns.net',
'icloud-content.com',
'usbos-edge.icloud-content.com',
'usbos.ce.apple-dns.net',
'lcdn-locator.apple.com',
'lcdn-locator.apple.com.akadns.net',
'lcdn-locator-usuqo.apple.com.akadns.net',
'cl1.apple.com',
'cl2.apple.com',
'cl3.apple.com',
'cl4.apple.com',
'cl5.apple.com',
'cl1-cdn.origin-apple.com.akadns.net',
'cl2-cdn.origin-apple.com.akadns.net',
'cl3-cdn.origin-apple.com.akadns.net',
'cl4-cdn.origin-apple.com.akadns.net',
'cl5-cdn.origin-apple.com.akadns.net',
'cl1.apple.com.edgekey.net',
'cl2.apple.com.edgekey.net',
'cl3.apple.com.edgekey.net',
'cl4.apple.com.edgekey.net',
'cl5.apple.com.edgekey.net',
'xp.apple.com',
'xp.itunes-apple.com.akadns.net',
'mt-ingestion-service-pv.itunes.apple.com',
'p32-sharedstreams.icloud.com',
'p32-sharedstreams.fe.apple-dns.net',
'p32-fmip.icloud.com',
'p32-fmip.fe.apple-dns.net',
'gsp-ssl.ls.apple.com',
'gsp-ssl.ls-apple.com.akadns.net',
'gsp-ssl.ls2-apple.com.akadns.net',
'gspe35-ssl.ls.apple.com',
'gspe35-ssl.ls-apple.com.akadns.net',
'gspe35-ssl.ls.apple.com.edgekey.net',
'gsp64-ssl.ls.apple.com',
'gsp64-ssl.ls-apple.com.akadns.net',
'mt-ingestion-service-st11.itunes.apple.com',
'mt-ingestion-service-st11.itunes-apple.com.akadns.net',
'microsoft.com', 'mozilla.com', 'mozilla.org']
good_da_host_regex = ['||push.apple.com^',
'||itunes.apple.com^',
'||apps.apple.com^',
'||mzstatic.com^']
good_da_hostpath_exact = []
good_da_hostpath_regex = []
good_da_regex = []
bad_da_host_exact = []
bad_da_host_regex = []
bad_da_hostpath_exact = []
bad_da_hostpath_regex = []
bad_da_regex = []
good_url_parts = []
bad_url_parts = []
good_url_regex = []
bad_url_regex = []
# provide explicit expceptions to good hosts or domains, e.g. iad.apple.com
good_da_host_exceptions_exact = [ 'iad.apple.com',
'iadsdk.apple.com',
'iadsdk.apple.com.edgekey.net',
'bingads.microsoft.com',
'azure.bingads.trafficmanager.net',
'choice.microsoft.com',
'choice.microsoft.com.nsatc.net',
'corpext.msitadfs.glbdns2.microsoft.com',
'corp.sts.microsoft.com',
'df.telemetry.microsoft.com',
'diagnostics.support.microsoft.com',
'feedback.search.microsoft.com',
'i1.services.social.microsoft.com',
'i1.services.social.microsoft.com.nsatc.net',
'redir.metaservices.microsoft.com',
'reports.wes.df.telemetry.microsoft.com',
'services.wes.df.telemetry.microsoft.com',
'settings-sandbox.data.microsoft.com',
'settings-win.data.microsoft.com',
'sqm.df.telemetry.microsoft.com',
'sqm.telemetry.microsoft.com',
'sqm.telemetry.microsoft.com.nsatc.net',
'statsfe1.ws.microsoft.com',
'statsfe2.update.microsoft.com.akadns.net',
'statsfe2.ws.microsoft.com',
'survey.watson.microsoft.com',
'telecommand.telemetry.microsoft.com',
'telecommand.telemetry.microsoft.com.nsatc.net',
'telemetry.urs.microsoft.com',
'vortex.data.microsoft.com',
'vortex-sandbox.data.microsoft.com',
'vortex-win.data.microsoft.com',
'cy2.vortex.data.microsoft.com.akadns.net',
'watson.microsoft.com',
'watson.ppe.telemetry.microsoft.com'
'watson.telemetry.microsoft.com',
'watson.telemetry.microsoft.com.nsatc.net',
'wes.df.telemetry.microsoft.com',
'win10.ipv6.microsoft.com',
'www.bingads.microsoft.com',
'survey.watson.microsoft.com' ]
# Long regex filter """here""" documents
# ignore any rules following comments with these strings, until the next non-ignorable comment
commentname_sections_ignore_re = r'(?:{})'.format('|'.join(re.sub(r'([.])','\\.',x) for x in '''\
gizmodo.in
shink.in
project-free-tv.li
vshare.eu
pencurimovie.ph
filmlinks4u.is
Spiegel.de
bento.de
German
French
Arabic
Armenian
Belarusian
Bulgarian
Chinese
Croatian
Czech
Danish
Dutch
Estonian
Finnish
Georgian
Greek
Hebrew
Hungarian
Icelandic
Indian
Indonesian
Italian
Japanese
Korean
Latvian
Lithuanian
Norwegian
Persian
Polish
Portuguese
Romanian
Russian
Serbian
Singaporean
Slovene
Slovak
Spanish
Swedish
Thai
Turkish
Ukranian
Ukrainian
Vietnamese
Gamestar.de
Focus.de
tvspielfilm.de
Prosieben
Wetter.com
Woxikon.de
Fanfiktion.de
boote-forum.de
comunio.de
planetsnow.de'''.split('\n')))
# include these rules, no matter their priority
# necessary to include desired rules that fall below the threshold for a reasonably-sized PAC
# Refs: https://guardianapp.com/ios-app-location-report-sep2018.html
include_these_good_rules = []
include_these_bad_rules = [x for x in """\
/securepubads.
||google.com/pagead
||facebook.com/plugins/*
||connect.facebook.com
||connect.facebook.net
||platform.twitter.com
||api.areametrics.com
||in.cuebiq.com
||et.intake.factual.com
||api.factual.com
||api.beaconsinspace.com
||api.huq.io
||m2m-api.inmarket.com
||mobileapi.mobiquitynetworks.com
||sdk.revealmobile.com
||api.safegraph.com
||incoming-data-sense360.s3.amazonaws.com
||ios-quinoa-personal-identify-prod.sense360eng.com
||ios-quinoa-events-prod.sense360eng.com
||ios-quinoa-high-frequency-events-prod.sense360eng.com
||v1.blueberry.cloud.databerries.com
||pie.wirelessregistry.com""".split('\n') if not bool(re.search(r'^\s*?(?:#|$)',x))]
# regex's for highly weighted rules
high_weight_regex_strings = """\
trac?k
beacon
stat[is]?
anal[iy]
goog
facebook
yahoo
amazon
adob
msn
# 2-grams
goog\\S+?ad
amazon\\S+?ad
yahoo\\S+?ad
facebook\\S+?ad
adob\\S+?ad
msn\\S+ad
doubleclick
cooki
twitter
krxd
pagead
syndicat
(?:\\bad|ad\\b)
securepub
static
\\boas\\b
ads
cdn
cloud
banner
financ
share
traffic
creativ
media
host
affil
^mob
data
your?
watch
survey
stealth
invisible
brand
site
merch
kli[kp]
clic?k
popup
log
assets
count
metric
score
event
tool
quant
chart
opti?m
partner
sponsor
affiliate"""
high_weight_regex = [re.compile(x,re.IGNORECASE) for x in high_weight_regex_strings.split('\n') if not bool(re.search(r'^\s*?(?:#|$)',x))]
# regex to limit regex filters (bootstrapping in part from securemecca.com PAC regex keywords)
if False:
badregex_regex_filters = '' # Accept everything
else:
badregex_regex_filters = high_weight_regex_strings + '\n' + '''\
cooki
pagead
syndicat
(?:\\bad|ad\\b)
cdn
cloud
banner
image
img
pop
game
free
financ
film
fast
farmville
fan
exp
share
cash
money
dollar
buck
dump
deal
daily
content
kick
down
file
video
score
partner
match
ifram
cam
widget
monk
rapid
platform
google
follow
shop
love
content
#^(\\d{1,3})\\.(\d{1,3})\\.(\\d{1,3})\.(\\d{1,3})$
#^([A-Za-z]{12}|[A-Za-z]{8}|[A-Za-z]{50})\\.com$
smile
happy
traffic
dash
board
tube
torrent
down
creativ
host
affil
\\.(biz|ru|tv|stream|cricket|online|racing|party|trade|webcam|science|win|accountant|loan|faith|cricket|date)
^mob
join
data
your?
watch
survey
stealth
invisible
social
brand
site
script
xchang
merch
kli(k|p)
clic?k
zip
invest
arstech
buzzfeed
imdb
twitter
baidu
yandex
youtube
ebay
discovercard
chase
hsbc
usbank
santander
kaspersky
symantec
brightcove
hidden
invisible
macromedia
flash
[^i]scan[^dy]
secret
skype
tsbbank
tunnel
ubs\\.com
unblock
unlock
usaa\\.com
usbank\\.com
ustreas\\.gov
ustreasury
verifiedbyvisa\\.com
viagra
wachovia
wellsfargo\\.com
westernunion
windowsupdate
plugin
nielsen
oas-config
oas\\/oas
pix
video-plugin
videodownloader
visit
voxmedia\\.com
vtrack\\.php
w3track\\.com
web_?ad
webiq
weblog
webtrek
webtrend
wget\\.exe
widgets
winstart\\.exe
winstart\\.zip
wired\\.com
ad-limits\\.js
ad-manager
ad_engine
adx\\.js
\\.bat
\\.bin
[^ck]anal[^_]
\\.com\/a\\.gif
\\.com\/p\\.gif
\\.com\\.au\\/ads
\\.cpl
[^bhmz]eros
\\.exe
\\.exe
\\.msi
\\.net\\/p\\.gif
\\.pac
\\.pdf
\\.pdf\\.exe
\\.rar
\\.scr
\\.sh
transparent1x1\\.gif
\\/travidia
__utm\\.js
whv2_001\\.js
xtcore\\.js
\\.zip
sharethis\\.com
stats\\.wp\\.com
[^i]crack
virgins\\.com
\\.xyz
shareasale\\.com
financialcontent\\.com
netdna-cdn\\.com
gstatic\\.com
taboola\\.com
ooyala\\.com
pinimg\\.com
cloudfront\\.net
d21rhj7n383afu
d19rpgkrjeba2z
outbrain\\.com
themindcircle\\.com
google-analytics\\.com
nocookie\\.net
jwpsrv\\.com
doubleclick\\.net
d2c8v52ll5s99u
d3qdfnco3bamip
yarn\\.co
visura\\.co
gatehousmedia\\.com
imore\\.com
openx\\.net
gigya\\.com
shopify\\.com
tiqcdn\\.com
criteo\\.net
ntv\\.io
getyarn\\.io
d15zn84cat5tp0
d1pz6dax0t5mop
allinviews\\.com
pinterest\\.com
media\\.net
selectmedia\\.asia
jsdelivr\\.net
pubmatic\\.com
aurubis\\.com
cloudflare\\.com
blueconic\\.net
krxd\\.net
cdn-mw\\.com
serving-sys\\.com
openx\\.net
segment\\.com
viglink\\.com
viafoura\\.net
aolcdn\\.net
shoofl\\.tv
inq\\.com
optimizely\\.com
kinja-static\\.com
d3926qxcw0e1bh
yieldmo\\.com
indexww\\.com
2mdn\\.net
newrelic\\.com
guim\\.co\\.uk
futurecdn\\.net
vidible\\.tv
vindicosuite\\.com
fsdn\\.com
cpanel\\.net
perfectmarket\\.com
about\\.me
omnigroup\\.com
lightboxcdn\\.com
hotjar\\.com
addthis\\.com
art19\\.com
lkqd\\.net
mathtag\\.com
dc8xl0ndzn2cb
d1z2jf7jlzjs58
chowstatic\\.com
spokenlayer\\.com
akamaized\\.net
d2qi7ewimk4e2w
stickyadstv\\.com
fastly\\.net
ddkpmexz7bq23
newscgp\\.com
privy\\.com
aspnetcdn\\.com
parsley\\.com
demdex\\.net
d3alqb8vzo7fun
netdna-ssl\\.com
yottaa\\.net
go-mpulse\\.net
bkrtx\\.com
crwdcntrl\\.net
ggpht\\.com
alamy\\.com
spokeo\\.com
d2gatte9o95jao
dawm7kda6y2v0
dwgyu36up6iuz
litix\\.io
sail-horizon\\.com
cnevids\\.com
dz310nzuyimx0
skimresources\\.com
jwpcdn\\.com
dwin2\\.com
htl\\.bid
df80k0z3fi8zg
o0bg\\.com
d8rk54i4mohrb
simplereach\\.com
adsrvr\\.com
vertamedia\\.com
disqusads\\.com
polipace\\.com
jwplatform\\.com
dianomi\\.com
kinja-img\\.com
marketingvideonow\\.com
beachfrontmedia\\.com
mfcreative\\.com
msecdn\\.com
syndetics\\.com
keycdn\\.com
uservoice\\.com
ravenjs\\.com
d1fc8wv8zag5ca
broaddoor\\.com
d3s44e87wooplq
d2x3bkdslnxkuj
selectablemedia\\.com
yldbt\\.com
streamrail\\.net
seriable\\.com
thoughtco\\.com
perimeterx\\.net
owneriq\\.net
ml314\\.com
d1e9d0h8gakqc
dtcn\\.com
trustarc\\.com
licdn\\.com
effectivemeasure\\.net
list-manage\\.com
mtvnservices\\.com
npttech\\.com
dc8na2hxrj29i
tubemogul\\.com
d1lqe9temigv1p
dna8twue3dlxq
adroll\\.com
googleadservices\\.com
localytics\\.com
gfx\\.ms
adsensecustomsearchads\\.com
upsellit\\.com
parrable\\.com
ads-twitter\\.com
atlanticinsights\\.com
pagefair\\.com
areyouahuman\\.com
custhelp\\.com
turn\\.com
connatix\\.com
printfriendly\\.com
scroll\\.com
cybersource\\.com
zergnet\\.com
jsintegrity\\.com
cedexis\\.com
3lift\\.com
onestore\\.ms
mdpcdn\\.com
iperceptions\\.com
dotomi\\.com
pardot\\.com
marketo\\.net
rfksrv\\.com
adnxs\\.com
shartethis\\.com
d31qbv1cthcecs
douyfz3utcehi
scorecardresearch\\.com
nonembed\\.com
peer39\\.com
d3p2jlw8pmhccg
dnkzzz1hlto79
zqtk\\.net
cloudinary\\.com
omtrdc\\.net
d5nxst8fruw4z
d1p6rqiydn62x8
dmtracker\\.com
dp8hsntg6do36
buysellads\\.com
intercomcdn\\.net
dpstvy7p9whsy
cpx\\.to
b-cdn\\.net
googlecommerce\\.com
insightexpressai\\.com
evidon\\.com
footprint\\.net
advertising\\.com
specificmedia\\.com
quantcount\\.com
amgdgt\\.com
bluekai\\.com
smartclip\\.net
azureedge\\.net
iesnare\\.com
medscape\\.com
agkn\\.com
cliipa\\.com
digiday\\.com
convertro\\.com
linksynergy\\.com
woobi\\.com
adx1\\.com
254a\\.com
mediaforge\\.com
videostat\\.net
theadtech\\.com
emxdgt\\.com
acuityplatform\\.com
header\\.direct'''
badregex_regex_filters = '\n'.join(x for x in badregex_regex_filters.split('\n') if not bool(re.search(r'^\s*?(?:#|$)',x)))
badregex_regex_filters_re = re.compile(r'(?:{})'.format('|'.join(badregex_regex_filters.split('\n'))),re.IGNORECASE)
if __name__ == "__main__":
res = EasyListPAC()
sys.exit()
|
essandess/easylist-pac-privoxy
|
easylist_pac.py
|
Python
|
gpl-3.0
| 94,607
|
[
"VisIt"
] |
c2fe474223f8646ebdea124e669d99fe32b41154447bfe9e75be18c60394bca0
|
from JumpScale import j
import mongoengine
from eve import Eve
from eve_mongoengine import EveMongoengine
from flask.ext.bootstrap import Bootstrap
from eve_docs import eve_docs
# create some dummy model class
# default eve settings
my_settings = {
'MONGO_HOST': 'localhost',
'MONGO_PORT': 27017,
'MONGO_DBNAME': 'eve',
'DOMAIN': {'eve-mongoengine': {}} # sadly this is needed for eve
}
import JumpScale.grid.osis
client = j.core.osis.getClientByInstance('main')
json=client.getOsisSpecModel("oss")
from generators.MongoEngineGenerator import *
gen=MongoEngineGenerator("generated/oss.py")
gen.generate(json)
# init application
app = Eve(settings=my_settings)
# init extension
ext = EveMongoengine(app)
# register model to eve
from generated.oss import *
for classs in classes:
ext.add_model(classs)
Bootstrap(app)
app.register_blueprint(eve_docs, url_prefix='/docs')
print "visit:\nhttp://localhost:5000/docs/"
# let's roll
app.run()
|
Jumpscale/web
|
examples/test/start.py
|
Python
|
apache-2.0
| 968
|
[
"VisIt"
] |
f07b3f5acac4f5d8d3457cd15745d144613fa8a6b1c764210aec64bce1c48651
|
import numpy as np
import scipy.linalg as linalg
np.set_printoptions(linewidth=200)
#invSqrt2pi = np.sqrt(np.pi*2)
#invSqrtPiHalf = 1/np.sqrt(np.pi*0.5)
gauss_renorm0 = 1/( 4*np.pi * np.sqrt(np.pi*0.5) )
# =========================== Functions
def applyH( f, ddfR, ddfT, V, k_h2m=0.1, bDebug=False ):
Tf = -( ddfR + 2*ddfT )*k_h2m
Vf = f*V
if bDebug:
ff = np.trapz( f*f*S, r )
fTf = np.trapz( Tf*f*S, r )
fVf = np.trapz( Vf*f*S, r )
print "<f|f>", ff ,"<f|T|f> : ", fTf, " <f|V|f> ", fVf, " E tot ", fTf + fVf
return Vf + Tf
def Gauss( r, r2=None, s=1.0, pre=gauss_renorm0, bNumRenorm=True ):
s2 = s*s
b = 1./(-2*s**2)
#invS = 1./s
#invS2 = invS*invS
#invS3 = invS*invS2
if r2 is None:
r2 = r**2
g = np.exp(b*r2) # *invS3*pre
if bNumRenorm:
rho = g*g
S = 4*np.pi*r2
norm = np.sqrt( np.trapz( rho*S, r ) )
print "norm ", norm
g/= norm
dg = g*( r )*2*b
ddgR = g*( 2*b*r*r + 1 )*2*b
ddgT = g*( + 1 )*2*b
return g,dg,ddgR,ddgT
def makeBasis( r, sigmas=[0.2,0.5,0.9] ):
r2=r**2
basis = []
for s in sigmas:
basis.append( Gauss(r, r2, s=s ) )
return basis
def Hbasis_1D(basis, V, k_h2m=0.1 ):
Hchis = []
for bas in basis:
f = bas[0]
ddfR = bas[2]
Hchis.append( applyH( f, ddfR, 0, V, k_h2m ) )
return Hchis
def Hbasis_3D(basis, V, k_h2m=0.1, bDebug=False ):
Hchis = []
for bas in basis:
f = bas[0]
ddfR = bas[2]
ddfT = bas[3]
Hchis.append( applyH( f, ddfR, ddfT, V, k_h2m, bDebug=bDebug ) )
return Hchis
def numDeriv( r, f ):
return (f[2:]-f[:-2])/(r[2]-r[0])
# =========================== Main
if __name__ == "__main__":
import matplotlib.pyplot as plt
xmax = 5.0
N = 1000+1
Rcut = 4.0
Rmax = 10.0
r = np.linspace(0,Rmax,N)
r2 = r**2
S = 4*np.pi*r2
w = 0.3
w2 = w**2
COULOMB_CONST = 14.399644
V = -np.sqrt( COULOMB_CONST/(r2 + w2 ) )
plt.figure()
g,dg,ddg,ddgT = Gauss( r, r2=None, s=0.6 )
plt.plot(r,g,'b')
plt.plot(r,dg,'g')
plt.plot(r,ddg,'r')
rd = r[1:-1]
rdd = r[2:-2]
dg_ = numDeriv( r, g ); plt.plot(rd ,dg_ , 'g:',lw=4)
ddg_ = numDeriv( rd, dg_ ); plt.plot(rdd,ddg_, 'r:',lw=4)
plt.plot(r,V,'k')
plt.grid()
plt.title( "check derivs of Gaussian (numeric/analytic) " )
#plt.show()
#exit()
colors = ['r' ,'g','b','m','c','y']
sigmas = [0.25,0.5,1.0,1.5,2.0]
nbas = len(sigmas)
# ======== plot bais
basis = makeBasis( r, sigmas=sigmas )
plt.figure()
for i,bas in enumerate(basis):
#plt.plot(r,basf[0])
name = "basis[%i]" %i
f = bas[0]
rho = f*f
print name,".norm() : ", np.trapz( rho*S, r )
c = colors[i]
plt.plot(r,bas[0] ,c=c,lw=2.,ls='-', label=name)
plt.plot(r,bas[0]*S,c=c,lw=1., ls=':' )
plt.plot(r,bas[2]*S,c=c,lw=1., ls='--' )
plt.plot(r,bas[3]*S,c=c,lw=1., ls='-.' )
plt.plot(r,V,'k')
plt.xlim(0,xmax)
plt.legend()
plt.grid()
plt.title( "Gaussian Basis" )
#plt.show()
#exit()
b3D = True
k_h2m=0.0500001
if b3D:
Hchis = Hbasis_3D( basis, V, k_h2m=k_h2m, bDebug=True )
else:
Hchis = Hbasis_1D( basis, V, k_h2m=k_h2m )
Hmat = np.zeros( (nbas,nbas) )
Smat = np.zeros( (nbas,nbas) )
chis = [ bas[0] for bas in basis ]
chis_ddR = [ bas[2] for bas in basis ]
chis_ddT = [ bas[3] for bas in basis ]
for i in xrange(nbas):
for j in xrange(nbas):
#Hmat[i,j] = np.trapz( Hchis[i] * basis[j][0] , r )
#Hmat[i,j] = np.trapz( Hchis[i] * basis[j][0] * S, r )
if b3D:
Hmat[i,j] = np.trapz( chis[j] * Hchis[i] * S , r )
Smat[i,j] = np.trapz( chis[i] * chis[j] * S , r )
else:
Hmat[i,j] = np.trapz( chis[j] * Hchis[i], r )
Smat[i,j] = np.trapz( chis[i] * chis[j], r )
print " Hmat \n", Hmat
print " Smat \n", Smat
# Generalized eigenproblem Hmat*Cs = Es*S*Cs
# Result should be B-orthogonal
'''
#Es,Cs = np.linalg.eig( Hmat, b=Smat )
Es,Cs = linalg.eig( Hmat, b=Smat )
#Cs=Cs.T
#Cs = np.dot( Smat, Cs )
CC = np.dot( Cs.T, Cs )
print " CC \n", CC
CSC = np.dot( Cs.T, np.dot( Smat, Cs ) )
print " CSC \n", CSC
'''
# ================= Lowdin
# 1) ------ Orthogonalize Basis Set
Ses,SVs = np.linalg.eig( Smat )
SVs=SVs.T
print "eigval(S) ", Ses
print "SVs \n", SVs
SVVS = np.dot(SVs.T,SVs)
print "SVVS \n", SVVS
sSes = 1.0/np.sqrt( Ses )
D = np.diag(sSes)
print "sqrt(eigval(S)) ", sSes
for i,e in enumerate(sSes):
SVs[i,:]*=e
Uchis = np.dot( SVs, chis )
Uchis_ddR = np.dot( SVs, chis_ddR )
Uchis_ddT = np.dot( SVs, chis_ddT )
print "Uchis.shape ", Uchis.shape
xUUx = np.zeros((nbas,nbas))
for i in xrange(nbas):
for j in xrange(nbas):
xUUx[i,j] = np.trapz( Uchis[i] * Uchis[j] * S, r )
#xUUx = np.dot( Uchis, Uchis.T )
print "xUUx \n", xUUx
# ...... plot S eigstates
plt.figure()
for i in range(nbas):
plt.plot( r, Uchis[i], label="Uchi[%i]" %i )
plt.plot(r,V,'k')
plt.xlim(0,xmax)
plt.grid()
plt.title( "S-eigenstates" )
plt.legend()
#plt.show()
# 2) ------ Solve in Orthogonal Basis Set
UHU = np.zeros((nbas,nbas))
for i in xrange(nbas):
HUchi_i = applyH( Uchis[i], Uchis_ddR[i], Uchis_ddT[i], V, k_h2m=k_h2m, bDebug=True )
for j in xrange(nbas):
UHU[i,j] = np.trapz( HUchi_i * Uchis[j] * S, r )
print "UHU\n", UHU
Es,Clow = np.linalg.eig( UHU )
print "Es ", Es
print "Clow \n", Clow
CUchis = np.dot(Clow.T, Uchis )
#print "CUchis \n", CUchis
#UCCU = np.dot( CUchis, CUchis.T )
UCCU = np.zeros((nbas,nbas))
for i in xrange(nbas):
for j in xrange(nbas):
UCCU[i,j] = np.trapz( CUchis[i] * CUchis[j] * S, r )
print "UCCU \n", UCCU
# ...... plot H eigstates
plt.figure()
for i in range(nbas):
plt.plot( r, CUchis[i], label="CUchi[%i]" %i )
plt.plot(r,V,'k')
plt.xlim(0,xmax)
plt.grid()
plt.title( "H-eigenstates" )
plt.legend()
plt.show()
'''
sSHSs = np.dot( SVs, np.dot(Hmat,SVs.T) )
print "sSHSs \n", sSHSs
Es,Vs = np.linalg.eig ( sSHSs )
print "eigenval(Hlow) ", Es
print "eigenvec(Hlow) \n", Vs
VV = np.dot( Vs,Vs.T )
print "<Psi|Psi>\n" , VV
Hdiag = np.dot( Vs, np.dot( sSHSs,Vs.T ) )
print "Hdiag \n", Hdiag
Psis = np.dot(Vs,chis )
Psis_ddR = np.dot(Vs,chis_ddR)
Psis_ddT = np.dot(Vs,chis_ddT)
SSmat = np.zeros( (nbas,nbas) )
for i in xrange(nbas):
for j in xrange(nbas):
SSmat[i,j] = np.trapz( Psis[i] * Psis[j] * S, r )
print " SSmat \n", SSmat
exit()
for i in xrange(nbas):
applyH( Psis[i], Psis_ddR[i], Psis_ddT[i], V, k_h2m=k_h2m, bDebug=True )
for i in xrange(nbas):
print "eig [%i] ei"%i , Es[i]," vi ",Cs[i]
# ======== plot eigstates
plt.figure()
for i in range(nbas):
Ci = Cs[i]
plt.plot( r, np.dot(Ci,chis), label="Psi[%i]" %i )
plt.plot(r,V,'k')
plt.xlim(0,xmax)
plt.grid()
plt.legend()
plt.show()
'''
|
ProkopHapala/SimpleSimulationEngine
|
python/pyGaussAtom/GaussAtom.py
|
Python
|
mit
| 7,583
|
[
"Gaussian"
] |
0cf3e11fea42172389f8500a2f570747805e72f966af85a0246cf72290fc1137
|
import json
import pprint
master_ingrd_dict = {
'spices' : ['paprika',
'cayenne pepper',
'chili powder',
'curry powder',
'vanilla extract',
'vanilla bean',
'kosher salt',
'bay leaf',
'bay leaves',
'crushed red pepper',
'ginger',
'baking powder',
'baking soda',
'cinnamon',
'saffron',
'mint',
'tarragon',
'chives',
'fennel',
'parsley',
'sage',
'allspice',
'dill',
'marjoram',
'cumin',
'oregano',
'thyme',
'rosemary',
'basil',
'tumeric',
'cardamom',
'nutmeg',
'clove',
'star anise',
'anise',
'basil',
'smoked paprika',
'garlic powder',
'onion powder',
'almond extract',
'coriander',
'salt',
'garlic salt',
'celery salt',
'black pepper',
'peppercorns',
'white pepper',
'five spice',
'5-spice',
'five spice powder',
'5-spice powder',
'cilantro',
'old bay',
'mustard powder',
'pepper flakes',
'sesame seeds' ],
'others':[ 'worcestershire sauce',
'soy sauce',
'cocoa powder',
'chocolate chip',
'light soy sauce',
'dark soy sauce',
'hoisin sauce',
'corn starch',
'water',
'capers',
'granulated sugar',
'sugar',
'brown sugar',
'molasses',
"confectioner's sugar",
'lemon juice',
'lime juice',
'lemon zest',
'lime zest',
'zest',
'v-8 juice',
'white wine',
'red wine',
'red wine vinegar',
'white wine vinegar',
'white vinegar',
'vegetable stock',
'beef stock',
'chicken stock',
'fish sauce',
'whole grain mustard',
'mustard',
'ketchup',
'dijon mustard',
'honey',
'agave',
'mayonnaise',
'beer',
'whiskey',
'cognac',
'teriyaki sauce',
'brandy',
'vodka',
'espresso',
'sherry' ],
'oils':[ 'sunflower oil',
'peanut oil',
'palm oil',
'cottonseed oil',
'olive oil',
'extra virgin olive oil',
'coconut oil',
'canola oil',
'corn oil'
'sesame oil',
'soybean oil',
'vegetable oil',
'rapeseed oil',
'lard',
'vegetable shortening',
'shortening',
'suet',
'fat' ],
'milk':[ 'salted butter',
'unsalted butter',
'butter',
'margarine',
'buttermilk',
'condensed milk'
'custard',
'dulce de leche',
'evaporated milk',
'frozen yogurt',
'whole milk',
'skim milk',
'reduced fat milk',
'whey' ],
'cream':[ 'sour cream',
'clotted cream',
'cream',
'heavy cream',
'whipped cream',
'creme fraiche',
'ice cream' ],
'yogurt':[ 'yogurt',
'greek yogurt',
'plain yogurt' ],
'cheese':[ 'cheddar cheese',
'cream cheese',
'goat cheese',
'feta',
'brie',
'ricotta cheese',
'jalapeno jack',
'cream cheese',
'cottage cheese',
'mozzarella',
'parmigiano-reggiano',
'blue cheese',
'gouda cheese',
'american cheese',
'camembert',
'roquefort',
'provolone',
'gruyere cheese',
'monterey jack',
'stilton cheese',
'gorgonzola',
'emmental cheese',
'ricotta',
'swiss cheese',
'colby cheese',
'parmesan cheese',
'muenster cheese',
'pecorino',
'manchego',
'edam',
'halloumi',
'havarti',
'pecorino romano',
'comte cheese',
'grana',
'asiago cheese'
'pepper jack cheese'
'mascarpone',
'limburger',
'American Cheese',
'processed cheese' ],
'potatoes':['potato',
'sweet potato',
'taro',
'yam'
'idaho potato',
'russet potato',
'yukon gold',
'fingerlings' ],
'rice':[ 'brown rice',
'white rice',
'basmati',
'wild rice',
'jasmine rice',
'glutinous rice' ],
'breads':[ 'barley',
'millet',
'buckwheat',
'corn',
'oats',
'steel-cut oats',
'rolled oats',
'instant oats',
'quinoa',
'rye',
'granola',
'all-purpose flour',
'semolina',
'whole-wheat flour',
'enriched flour',
'cake flour',
'self-rising flour',
'sourdough',
'white bread',
'rye bread',
'pita',
'baguette',
'focaccia',
'naan',
'banana bread',
'bagel',
'pumpernickel',
'challah',
'croissant',
'english muffin',
'raisin bread',
'garlic bread',
'biscuit',
'bun',
'hot dog bun',
'hamburger bun' ],
'pastas':[ 'angel hair',
'linguine',
'fettuccine',
'orecchiette',
'orzo',
'rigatoni',
'spaghetti',
'gnocchi',
'fusilli',
'farfalle',
'penne'
'tortellini',
'rotelle',
'lasagne',
'vermicelli',
'ramen',
'soba',
'udon',
'rice vermicelli',
'noodle' ],
'shrooms':[ 'shittake',
'morel',
'enokitake',
'oyster mushroom',
'white mushroom',
'white button',
'portobello' ],
'fruits':[ "apple",
"pineapple",
"grapefruit",
"banana",
"orange",
'blueberry',
"strawberry",
"grape",
'raisin',
'cranberry',
"lemon",
"cherry",
"pear",
"mango",
"avocado",
"peach",
"melon",
"apricot",
"plum",
"kiwi",
'watermelon',
'blackberry',
'papaya',
'cantaloupe',
'berry',
'tangerine',
'coconut',
'cranberry',
'lychee',
'date',
'passion fruit'
'gooseberry',
'persimmon',
'lime',
"nectarine",
"fig",
"pomegranate" ],
'greens':[ 'spinach',
'kale',
'cabbage',
'broccoli',
'dandelion',
'leafy green',
'chard',
'lettuce',
'rapini',
'endive',
'napa cabbage',
'cauliflower',
'tomato',
'squash',
'cucumber',
'bell pepper',
'pumpkin',
'corn',
'maize',
'brussel sprout',
'artichoke',
'bell pepper',
'chili pepper',
'red pepper',
'arugula',
'watercress'
'butternut squash',
'eggplant'
'diced tomato',
'crushed tomato',
'tomato paste',
'jalapeno',
'radish',
'bok choy' ],
'legumes':[ 'bean',
'soybean',
'nut',
'lentil',
'pea',
'okra',
'green bean',
'kidney bean',
'navy bean',
'pinto bean',
'garbanzo bean',
'wax bean',
'mung bean',
'snow pea',
'lima pea'
'alfalfa',
'clover',
'snap pea',
'sugar snap pea',
'snow pea',
'peanut butter',
'almond butter',
'cashew butter',
'peanut',
'almond',
'walnut',
'cashew',
'pecan',
'pistachio',
'hazelnut',
'brazil nut',
'pine nut',
'macadamia',
'chestnut' ],
'roots':[ 'carrot',
'parsnip',
'turnip',
'rutabaga',
'radish',
'celery',
'daikon',
'kohirabi',
'scalllion',
'jicama',
'horseradish',
'onion',
'shallot',
'vidalia onion',
'red onion',
'pearl onion',
'leek',
'water chestnut',
'spring onion',
'yellow onion',
'white onion',
'asparagus',
'chicory',
'garlic' ],
'eggs':[ 'egg',
'chicken egg',
'duck egg',
'goose egg',
'quail egg' ],
'lamb':[ 'lamb',
'lamb chop',
'lamb loin chop',
'lamb rack',
'rack of lamb',
'lamb rib',
'ground lamb',
'lamb shank',
'lamb sirloin',
'boneless lamb leg',
'bone-in lamb leg' ],
'pork':[ 'pork',
'pork shoulder',
'pork butt',
'pork loin',
'pork chop',
'loin chop',
'sirloin chop',
'sirloin steak',
'baby back rib',
'riblet',
'rack of pork',
'pork loin half rib',
'pork tenderloin',
'sirloin roast',
'spare rib',
'pork sausage',
'ground pork',
'bacon',
'ham' ],
'beef':[ 'beef',
't-bone steak',
'strip steak',
'chuck steak',
'skirt steak',
'brisket',
'flank steak',
'short loin',
'flat iron steak',
'short ribs',
'rib eye steak',
'rib steak',
'round steak',
'sirloin steak',
'top sirloin',
'bottom sirloin',
'hanger steak',
'beef tenderloin',
'ground beef',
'beef sausage' ],
'chicken':[ 'chicken',
'chicken breast',
'chicken wing',
'chicken drum',
'chicken drumstick',
'chicken thigh',
'chicken leg',
'whole chicken',
'chicken quarter' ]
}
def edit_distance(str1, str2):
mat = [ [0 for i in range(len(str2) + 1)] for i in range(len(str1) + 1) ]
for i in range(1, len(str1)+1):
mat[i][0] = i
for j in range(1, len(str2)+1):
mat[0][j] = j
sub_cost = 0
for j in range(1, len(str2)+1):
for i in range(1, len(str1)+1):
if str1[i-1] == str2[j-1] :
sub_cost = 0
else:
sub_cost = 1
mat[i][j] = min(mat[i-1][j] + 1, mat[i][j-1] + 1, mat[i-1][j-1] + sub_cost)
#print mat
return mat[-1][-1]
def word_compare(ingrd_list_rec, ingrd_list_real, match_diff=2):
list_match = list()
for word_real in ingrd_list_real:
for word_rec in ingrd_list_rec:
if edit_distance(word_rec, word_real) < match_diff:
list_match.append(word_real)
"""
for word_real in ingrd_list_real:
for word_rec in ingrd_list_rec:
dist = edit_distance(word_rec, word_real)
list_match.append((word_real, dist))
print list_match
for word_match in list_match:
if word_match[1] > 3:
list_match = list()
break
"""
if len(list_match) > 0:
#print ingrd_list_real
#print ingrd_list_rec
#print list_match
#print
return list_match
return None
def best_match(ingrd_rec, ingr_category):
ingrd_list_rec = ingrd_rec.split('(')[0].split(',')[0].split()
best_match_ingrds = list()
best_match_list = list()
best_match_diff = 10000
match_list = list()
for ingrd_real in ingr_category:
ingrd_list_real = ingrd_real.split()
match_list = word_compare(ingrd_list_rec, ingrd_list_real)
if match_list != None:
#print ingrd_rec
#print ingrd_list_real
#print match_list
#print
match_goodness = abs(len(match_list) - len(ingrd_list_rec)) # Measures how many words match
if match_goodness < best_match_diff:
best_match_diff = match_goodness
best_match_ingrds = [ingrd_real]
best_match_list = [ingrd_list_real]
elif (match_goodness == best_match_diff) and (match_goodness < 10000):
best_match_ingrds.append(ingrd_real)
best_match_list.append(ingrd_list_real)
else:
pass
"""
filtered_match_ingrds = list()
filtered_match_list = list()
if (len(best_match_ingrds) > 1):
print best_match_list
for i in range(len(best_match_list)):
accessory_words = list()
print best_match_list[i]
for word in best_match_list[i]:
print match_list
if word in match_list:
continue
else:
accessory_words.append(word)
acc_match_list = word_compare(ingrd_list_rec, accessory_words, 3)
if acc_match_list != None:
filtered_match_ingrds.append(best_match_ingrds[i])
filtered_match_list.append(best_match_list[i])
"""
#print ingrd_rec
#print best_match_ingrds
#print best_match_list
#print best_match_diff
#print
#return best_match_ingrd
if best_match_ingrds == []:
return None, None, None
else:
return best_match_ingrds, best_match_list, best_match_diff
pp = pprint.PrettyPrinter(indent=4)
with open("recipes.json", "rb") as jfile:
j = json.load(jfile)
ingrd_dict = dict()
for key, val in j.iteritems():
ingrd_dict[key] = val['ingredients']
cleaned_recipe = dict()
size = len(ingrd_dict.keys())
i = 0
for key, val in ingrd_dict.iteritems():
print "%d out of %d\n" % (i, size)
ingredients_match = dict()
for ingrd in val:
match_ingrds = list()
match_list = list
match_goodness = 10000
category = list()
cat_best_match_ingrds, cat_best_match_list, cat_best_match_goodness = best_match(ingrd, master_ingrd_dict['spices'])
#print ingrd
#print cat_best_match_ingrds, cat_best_match_list, cat_best_match_goodness
for cat, cat_list in master_ingrd_dict.iteritems():
# check spices
cat_best_match_ingrds, cat_best_match_list, cat_best_match_goodness = best_match(ingrd, cat_list)
if cat_best_match_ingrds != None:
if cat_best_match_goodness < match_goodness:
match_ingrd = [cat_best_match_ingrds]
match_list = cat_best_match_list
match_goodness = cat_best_match_goodness
category = [cat]
elif (cat_best_match_goodness == match_goodness) and (cat_best_match_goodness < 10000):
match_ingrd.append(cat_best_match_ingrds)
match_list.append(cat_best_match_list)
category.append(cat)
else:
pass
#print ingrd
#print match_ingrds
#print match_list
#print category
ingredients_match[ingrd] = { 'ingrd_real': match_ingrd,
'category' : category }
cleaned_recipe[key] = ingredients_match
i+=1
try:
with open('ingrdients_extract.json', 'wb') as jwrite:
json.dump(cleaned_recipe, jwrite, sort_keys=True, indent=4)
except:
pp.pprint(cleaned_recipe)
#check poultry
#for s in spices:
# s_list = s.split()
# word_compare(ingrd_split, s_list)
#pp.pprint(ingrd_dict)
|
mingtaiha/n.ai
|
scripts/get_ingredient.py
|
Python
|
mit
| 17,631
|
[
"ESPResSo"
] |
5ffd13b05e4e1f31d2eff734286c68579f8e39c83fedc84e2a656dbf7375cc64
|
# (C) British Crown Copyright 2010 - 2018, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Provides testing capabilities and customisations specific to Iris.
.. note:: This module needs to control the matplotlib backend, so it
**must** be imported before ``matplotlib.pyplot``.
The primary class for this module is :class:`IrisTest`.
By default, this module sets the matplotlib backend to "agg". But when
this module is imported it checks ``sys.argv`` for the flag "-d". If
found, it is removed from ``sys.argv`` and the matplotlib backend is
switched to "tkagg" to allow the interactive visual inspection of
graphical test results.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
import codecs
import collections
import contextlib
import datetime
import difflib
import filecmp
import functools
import gzip
import inspect
import json
import io
import math
import os
import os.path
import re
import shutil
import subprocess
import sys
import unittest
import threading
import warnings
import xml.dom.minidom
import zlib
try:
from unittest import mock
except ImportError:
import mock
import filelock
import numpy as np
import numpy.ma as ma
import requests
import iris.cube
import iris.config
import iris.util
# Test for availability of matplotlib.
# (And remove matplotlib as an iris.tests dependency.)
try:
import matplotlib
matplotlib.use('agg')
matplotlib.rcdefaults()
# Standardise the figure size across matplotlib versions.
# This permits matplotlib png image comparison.
matplotlib.rcParams['figure.figsize'] = [8.0, 6.0]
import matplotlib.testing.compare as mcompare
import matplotlib.pyplot as plt
except ImportError:
MPL_AVAILABLE = False
else:
MPL_AVAILABLE = True
try:
from osgeo import gdal
except ImportError:
GDAL_AVAILABLE = False
else:
GDAL_AVAILABLE = True
try:
from iris_grib.message import GribMessage
GRIB_AVAILABLE = True
except ImportError:
GRIB_AVAILABLE = False
try:
import iris_sample_data
except ImportError:
SAMPLE_DATA_AVAILABLE = False
else:
SAMPLE_DATA_AVAILABLE = True
try:
import nc_time_axis
NC_TIME_AXIS_AVAILABLE = True
except ImportError:
NC_TIME_AXIS_AVAILABLE = False
try:
requests.get('https://github.com/SciTools/iris')
INET_AVAILABLE = True
except requests.exceptions.ConnectionError:
INET_AVAILABLE = False
try:
import stratify
STRATIFY_AVAILABLE = True
except ImportError:
STRATIFY_AVAILABLE = False
#: Basepath for test results.
_RESULT_PATH = os.path.join(os.path.dirname(__file__), 'results')
#: Default perceptual hash size.
_HASH_SIZE = 16
#: Default maximum perceptual hash hamming distance.
_HAMMING_DISTANCE = 2
if '--data-files-used' in sys.argv:
sys.argv.remove('--data-files-used')
fname = '/var/tmp/all_iris_test_resource_paths.txt'
print('saving list of files used by tests to %s' % fname)
_EXPORT_DATAPATHS_FILE = open(fname, 'w')
else:
_EXPORT_DATAPATHS_FILE = None
if '--create-missing' in sys.argv:
sys.argv.remove('--create-missing')
print('Allowing creation of missing test results.')
os.environ['IRIS_TEST_CREATE_MISSING'] = 'true'
# Whether to display matplotlib output to the screen.
_DISPLAY_FIGURES = False
if (MPL_AVAILABLE and '-d' in sys.argv):
sys.argv.remove('-d')
plt.switch_backend('tkagg')
_DISPLAY_FIGURES = True
# Threading non re-entrant blocking lock to ensure thread-safe plotting.
_lock = threading.Lock()
def main():
"""A wrapper for unittest.main() which adds iris.test specific options to the help (-h) output."""
if '-h' in sys.argv or '--help' in sys.argv:
stdout = sys.stdout
buff = io.StringIO()
# NB. unittest.main() raises an exception after it's shown the help text
try:
sys.stdout = buff
unittest.main()
finally:
sys.stdout = stdout
lines = buff.getvalue().split('\n')
lines.insert(9, 'Iris-specific options:')
lines.insert(10, ' -d Display matplotlib figures (uses tkagg).')
lines.insert(11, ' NOTE: To compare results of failing tests, ')
lines.insert(12, ' use idiff.py instead')
lines.insert(13, ' --data-files-used Save a list of files used to a temporary file')
lines.insert(
14, ' -m Create missing test results')
print('\n'.join(lines))
else:
unittest.main()
def get_data_path(relative_path):
"""
Return the absolute path to a data file when given the relative path
as a string, or sequence of strings.
"""
if not isinstance(relative_path, six.string_types):
relative_path = os.path.join(*relative_path)
test_data_dir = iris.config.TEST_DATA_DIR
if test_data_dir is None:
test_data_dir = ''
data_path = os.path.join(test_data_dir, relative_path)
if _EXPORT_DATAPATHS_FILE is not None:
_EXPORT_DATAPATHS_FILE.write(data_path + '\n')
if isinstance(data_path, six.string_types) and not os.path.exists(data_path):
# if the file is gzipped, ungzip it and return the path of the ungzipped
# file.
gzipped_fname = data_path + '.gz'
if os.path.exists(gzipped_fname):
with gzip.open(gzipped_fname, 'rb') as gz_fh:
try:
with open(data_path, 'wb') as fh:
fh.writelines(gz_fh)
except IOError:
# Put ungzipped data file in a temporary path, since we
# can't write to the original path (maybe it is owned by
# the system.)
_, ext = os.path.splitext(data_path)
data_path = iris.util.create_temp_filename(suffix=ext)
with open(data_path, 'wb') as fh:
fh.writelines(gz_fh)
return data_path
class IrisTest_nometa(unittest.TestCase):
"""A subclass of unittest.TestCase which provides Iris specific testing functionality."""
_assertion_counts = collections.defaultdict(int)
@classmethod
def setUpClass(cls):
# Ensure that the CF profile if turned-off for testing.
iris.site_configuration['cf_profile'] = None
def _assert_str_same(self, reference_str, test_str, reference_filename, type_comparison_name='Strings'):
if reference_str != test_str:
diff = ''.join(difflib.unified_diff(reference_str.splitlines(1), test_str.splitlines(1),
'Reference', 'Test result', '', '', 0))
self.fail("%s do not match: %s\n%s" % (type_comparison_name, reference_filename, diff))
@staticmethod
def get_result_path(relative_path):
"""
Returns the absolute path to a result file when given the relative path
as a string, or sequence of strings.
"""
if not isinstance(relative_path, six.string_types):
relative_path = os.path.join(*relative_path)
return os.path.abspath(os.path.join(_RESULT_PATH, relative_path))
def assertStringEqual(self, reference_str, test_str,
type_comparison_name='strings'):
if reference_str != test_str:
diff = '\n'.join(difflib.unified_diff(reference_str.splitlines(),
test_str.splitlines(),
'Reference', 'Test result',
'', '', 0))
self.fail("{} do not match:\n{}".format(type_comparison_name,
diff))
def result_path(self, basename=None, ext=''):
"""
Return the full path to a test result, generated from the \
calling file, class and, optionally, method.
Optional kwargs :
* basename - File basename. If omitted, this is \
generated from the calling method.
* ext - Appended file extension.
"""
if ext and not ext.startswith('.'):
ext = '.' + ext
# Generate the folder name from the calling file name.
path = os.path.abspath(inspect.getfile(self.__class__))
path = os.path.splitext(path)[0]
sub_path = path.rsplit('iris', 1)[1].split('tests', 1)[1][1:]
# Generate the file name from the calling function name?
if basename is None:
stack = inspect.stack()
for frame in stack[1:]:
if 'test_' in frame[3]:
basename = frame[3].replace('test_', '')
break
filename = basename + ext
result = os.path.join(self.get_result_path(''),
sub_path.replace('test_', ''),
self.__class__.__name__.replace('Test_', ''),
filename)
return result
def assertCMLApproxData(self, cubes, reference_filename=None, **kwargs):
# passes args and kwargs on to approx equal
if isinstance(cubes, iris.cube.Cube):
cubes = [cubes]
if reference_filename is None:
reference_filename = self.result_path(None, 'cml')
reference_filename = [self.get_result_path(reference_filename)]
for i, cube in enumerate(cubes):
fname = list(reference_filename)
# don't want the ".cml" for the json stats file
if fname[-1].endswith(".cml"):
fname[-1] = fname[-1][:-4]
fname[-1] += '.data.%d.json' % i
self.assertDataAlmostEqual(cube.data, fname, **kwargs)
self.assertCML(cubes, reference_filename, checksum=False)
def assertCDL(self, netcdf_filename, reference_filename=None, flags='-h'):
"""
Test that the CDL for the given netCDF file matches the contents
of the reference file.
If the environment variable IRIS_TEST_CREATE_MISSING is
non-empty, the reference file is created if it doesn't exist.
Args:
* netcdf_filename:
The path to the netCDF file.
Kwargs:
* reference_filename:
The relative path (relative to the test results directory).
If omitted, the result is generated from the calling
method's name, class, and module using
:meth:`iris.tests.IrisTest.result_path`.
* flags:
Command-line flags for `ncdump`, as either a whitespace
separated string or an iterable. Defaults to '-h'.
"""
if reference_filename is None:
reference_path = self.result_path(None, 'cdl')
else:
reference_path = self.get_result_path(reference_filename)
# Convert the netCDF file to CDL file format.
cdl_filename = iris.util.create_temp_filename(suffix='.cdl')
if flags is None:
flags = []
elif isinstance(flags, six.string_types):
flags = flags.split()
else:
flags = list(map(str, flags))
with open(cdl_filename, 'w') as cdl_file:
subprocess.check_call(['ncdump'] + flags + [netcdf_filename],
stderr=cdl_file, stdout=cdl_file)
# Ingest the CDL for comparison, excluding first line.
with open(cdl_filename, 'r') as cdl_file:
lines = cdl_file.readlines()[1:]
# Sort the dimensions (except for the first, which can be unlimited).
# This gives consistent CDL across different platforms.
sort_key = lambda line: ('UNLIMITED' not in line, line)
dimension_lines = slice(lines.index('dimensions:\n') + 1,
lines.index('variables:\n'))
lines[dimension_lines] = sorted(lines[dimension_lines], key=sort_key)
cdl = ''.join(lines)
os.remove(cdl_filename)
self._check_same(cdl, reference_path, type_comparison_name='CDL')
def assertCML(self, cubes, reference_filename=None, checksum=True):
"""
Test that the CML for the given cubes matches the contents of
the reference file.
If the environment variable IRIS_TEST_CREATE_MISSING is
non-empty, the reference file is created if it doesn't exist.
Args:
* cubes:
Either a Cube or a sequence of Cubes.
Kwargs:
* reference_filename:
The relative path (relative to the test results directory).
If omitted, the result is generated from the calling
method's name, class, and module using
:meth:`iris.tests.IrisTest.result_path`.
* checksum:
When True, causes the CML to include a checksum for each
Cube's data. Defaults to True.
"""
if isinstance(cubes, iris.cube.Cube):
cubes = [cubes]
if reference_filename is None:
reference_filename = self.result_path(None, 'cml')
if isinstance(cubes, (list, tuple)):
xml = iris.cube.CubeList(cubes).xml(checksum=checksum, order=False,
byteorder=False)
else:
xml = cubes.xml(checksum=checksum, order=False, byteorder=False)
reference_path = self.get_result_path(reference_filename)
self._check_same(xml, reference_path)
def assertTextFile(self, source_filename, reference_filename, desc="text file"):
"""Check if two text files are the same, printing any diffs."""
with open(source_filename) as source_file:
source_text = source_file.readlines()
with open(reference_filename) as reference_file:
reference_text = reference_file.readlines()
if reference_text != source_text:
diff = ''.join(difflib.unified_diff(reference_text, source_text, 'Reference', 'Test result', '', '', 0))
self.fail("%s does not match reference file: %s\n%s" % (desc, reference_filename, diff))
def assertDataAlmostEqual(self, data, reference_filename, **kwargs):
reference_path = self.get_result_path(reference_filename)
if self._check_reference_file(reference_path):
kwargs.setdefault('err_msg', 'Reference file %s' % reference_path)
with open(reference_path, 'r') as reference_file:
stats = json.load(reference_file)
self.assertEqual(stats.get('shape', []), list(data.shape))
self.assertEqual(stats.get('masked', False),
ma.is_masked(data))
nstats = np.array((stats.get('mean', 0.), stats.get('std', 0.),
stats.get('max', 0.), stats.get('min', 0.)),
dtype=np.float_)
if math.isnan(stats.get('mean', 0.)):
self.assertTrue(math.isnan(data.mean()))
else:
data_stats = np.array((data.mean(), data.std(),
data.max(), data.min()),
dtype=np.float_)
self.assertArrayAllClose(nstats, data_stats, **kwargs)
else:
self._ensure_folder(reference_path)
stats = collections.OrderedDict([
('std', np.float_(data.std())),
('min', np.float_(data.min())),
('max', np.float_(data.max())),
('shape', data.shape),
('masked', ma.is_masked(data)),
('mean', np.float_(data.mean()))])
with open(reference_path, 'w') as reference_file:
reference_file.write(json.dumps(stats))
def assertFilesEqual(self, test_filename, reference_filename):
reference_path = self.get_result_path(reference_filename)
if self._check_reference_file(reference_path):
fmt = 'test file {!r} does not match reference {!r}.'
self.assertTrue(filecmp.cmp(test_filename, reference_path),
fmt.format(test_filename, reference_path))
else:
self._ensure_folder(reference_path)
shutil.copy(test_filename, reference_path)
def assertString(self, string, reference_filename=None):
"""
Test that `string` matches the contents of the reference file.
If the environment variable IRIS_TEST_CREATE_MISSING is
non-empty, the reference file is created if it doesn't exist.
Args:
* string:
The string to check.
Kwargs:
* reference_filename:
The relative path (relative to the test results directory).
If omitted, the result is generated from the calling
method's name, class, and module using
:meth:`iris.tests.IrisTest.result_path`.
"""
if reference_filename is None:
reference_path = self.result_path(None, 'txt')
else:
reference_path = self.get_result_path(reference_filename)
self._check_same(string, reference_path,
type_comparison_name='Strings')
def assertRepr(self, obj, reference_filename):
self.assertString(repr(obj), reference_filename)
def _check_same(self, item, reference_path, type_comparison_name='CML'):
if self._check_reference_file(reference_path):
with open(reference_path, 'rb') as reference_fh:
reference = ''.join(part.decode('utf-8')
for part in reference_fh.readlines())
self._assert_str_same(reference, item, reference_path,
type_comparison_name)
else:
self._ensure_folder(reference_path)
with open(reference_path, 'wb') as reference_fh:
reference_fh.writelines(
part.encode('utf-8')
for part in item)
def assertXMLElement(self, obj, reference_filename):
"""
Calls the xml_element method given obj and asserts the result is the same as the test file.
"""
doc = xml.dom.minidom.Document()
doc.appendChild(obj.xml_element(doc))
pretty_xml = doc.toprettyxml(indent=" ")
reference_path = self.get_result_path(reference_filename)
self._check_same(pretty_xml, reference_path,
type_comparison_name='XML')
def assertArrayEqual(self, a, b, err_msg=''):
np.testing.assert_array_equal(a, b, err_msg=err_msg)
def assertRaisesRegexp(self, *args, **kwargs):
"""
Emulate the old :meth:`unittest.TestCase.assertRaisesRegexp`.
Because the original function is now deprecated in Python 3.
Now calls :meth:`six.assertRaisesRegex()` (no final "p") instead.
It is the same, except for providing an additional 'msg' argument.
"""
# Note: invoke via parent class to avoid recursion as, in Python 2,
# "six.assertRaisesRegex" calls getattr(self, 'assertRaisesRegexp').
return six.assertRaisesRegex(super(IrisTest_nometa, self),
*args, **kwargs)
@contextlib.contextmanager
def _recordWarningMatches(self, expected_regexp=''):
# Record warnings raised matching a given expression.
matches = []
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
yield matches
messages = [str(warning.message) for warning in w]
expr = re.compile(expected_regexp)
matches.extend(message for message in messages
if expr.search(message))
@contextlib.contextmanager
def assertWarnsRegexp(self, expected_regexp=''):
# Check that a warning is raised matching a given expression.
with self._recordWarningMatches(expected_regexp) as matches:
yield
msg = "Warning matching '{}' not raised."
msg = msg.format(expected_regexp)
self.assertTrue(matches, msg)
@contextlib.contextmanager
def assertNoWarningsRegexp(self, expected_regexp=''):
# Check that no warning matching the given expression is raised.
with self._recordWarningMatches(expected_regexp) as matches:
yield
msg = "Unexpected warning(s) raised, matching '{}' : {!r}."
msg = msg.format(expected_regexp, matches)
self.assertFalse(matches, msg)
def _assertMaskedArray(self, assertion, a, b, strict, **kwargs):
# Define helper function to extract unmasked values as a 1d
# array.
def unmasked_data_as_1d_array(array):
array = ma.asarray(array)
if array.ndim == 0:
if array.mask:
data = np.array([])
else:
data = np.array([array.data])
else:
data = array.data[~ma.getmaskarray(array)]
return data
# Compare masks. This will also check that the array shapes
# match, which is not tested when comparing unmasked values if
# strict is False.
a_mask, b_mask = ma.getmaskarray(a), ma.getmaskarray(b)
np.testing.assert_array_equal(a_mask, b_mask)
if strict:
assertion(a.data, b.data, **kwargs)
else:
assertion(unmasked_data_as_1d_array(a),
unmasked_data_as_1d_array(b),
**kwargs)
def assertMaskedArrayEqual(self, a, b, strict=False):
"""
Check that masked arrays are equal. This requires the
unmasked values and masks to be identical.
Args:
* a, b (array-like):
Two arrays to compare.
Kwargs:
* strict (bool):
If True, perform a complete mask and data array equality check.
If False (default), the data array equality considers only unmasked
elements.
"""
self._assertMaskedArray(np.testing.assert_array_equal, a, b, strict)
def assertArrayAlmostEqual(self, a, b, decimal=6):
np.testing.assert_array_almost_equal(a, b, decimal=decimal)
def assertMaskedArrayAlmostEqual(self, a, b, decimal=6, strict=False):
"""
Check that masked arrays are almost equal. This requires the
masks to be identical, and the unmasked values to be almost
equal.
Args:
* a, b (array-like):
Two arrays to compare.
Kwargs:
* strict (bool):
If True, perform a complete mask and data array equality check.
If False (default), the data array equality considers only unmasked
elements.
* decimal (int):
Equality tolerance level for
:meth:`numpy.testing.assert_array_almost_equal`, with the meaning
'abs(desired-actual) < 0.5 * 10**(-decimal)'
"""
self._assertMaskedArray(np.testing.assert_array_almost_equal, a, b,
strict, decimal=decimal)
def assertArrayAllClose(self, a, b, rtol=1.0e-7, atol=0.0, **kwargs):
"""
Check arrays are equal, within given relative + absolute tolerances.
Args:
* a, b (array-like):
Two arrays to compare.
Kwargs:
* rtol, atol (float):
Relative and absolute tolerances to apply.
Any additional kwargs are passed to numpy.testing.assert_allclose.
Performs pointwise toleranced comparison, and raises an assertion if
the two are not equal 'near enough'.
For full details see underlying routine numpy.testing.assert_allclose.
"""
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol, **kwargs)
@contextlib.contextmanager
def temp_filename(self, suffix=''):
filename = iris.util.create_temp_filename(suffix)
try:
yield filename
finally:
os.remove(filename)
def file_checksum(self, file_path):
"""
Generate checksum from file.
"""
with open(file_path, "rb") as in_file:
return zlib.crc32(in_file.read())
def _unique_id(self):
"""
Returns the unique ID for the current assertion.
The ID is composed of two parts: a unique ID for the current test
(which is itself composed of the module, class, and test names), and
a sequential counter (specific to the current test) that is incremented
on each call.
For example, calls from a "test_tx" routine followed by a "test_ty"
routine might result in::
test_plot.TestContourf.test_tx.0
test_plot.TestContourf.test_tx.1
test_plot.TestContourf.test_tx.2
test_plot.TestContourf.test_ty.0
"""
# Obtain a consistent ID for the current test.
# NB. unittest.TestCase.id() returns different values depending on
# whether the test has been run explicitly, or via test discovery.
# For example:
# python tests/test_plot.py => '__main__.TestContourf.test_tx'
# ird -t => 'iris.tests.test_plot.TestContourf.test_tx'
bits = self.id().split('.')
if bits[0] == '__main__':
floc = sys.modules['__main__'].__file__
path, file_name = os.path.split(os.path.abspath(floc))
bits[0] = os.path.splitext(file_name)[0]
folder, location = os.path.split(path)
bits = [location] + bits
while location not in ['iris', 'example_tests']:
folder, location = os.path.split(folder)
bits = [location] + bits
test_id = '.'.join(bits)
# Derive the sequential assertion ID within the test
assertion_id = self._assertion_counts[test_id]
self._assertion_counts[test_id] += 1
return test_id + '.' + str(assertion_id)
def _check_reference_file(self, reference_path):
reference_exists = os.path.isfile(reference_path)
if not (reference_exists or
os.environ.get('IRIS_TEST_CREATE_MISSING')):
msg = 'Missing test result: {}'.format(reference_path)
raise AssertionError(msg)
return reference_exists
def _ensure_folder(self, path):
dir_path = os.path.dirname(path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def check_graphic(self):
"""
Check the hash of the current matplotlib figure matches the expected
image hash for the current graphic test.
To create missing image test results, set the IRIS_TEST_CREATE_MISSING
environment variable before running the tests. This will result in new
and appropriately "<hash>.png" image files being generated in the image
output directory, and the imagerepo.json file being updated.
"""
import imagehash
from PIL import Image
dev_mode = os.environ.get('IRIS_TEST_CREATE_MISSING')
unique_id = self._unique_id()
repo_fname = os.path.join(_RESULT_PATH, 'imagerepo.json')
with open(repo_fname, 'rb') as fi:
repo = json.load(codecs.getreader('utf-8')(fi))
try:
#: The path where the images generated by the tests should go.
image_output_directory = os.path.join(os.path.dirname(__file__),
'result_image_comparison')
if not os.access(image_output_directory, os.W_OK):
if not os.access(os.getcwd(), os.W_OK):
raise IOError('Write access to a local disk is required '
'to run image tests. Run the tests from a '
'current working directory you have write '
'access to to avoid this issue.')
else:
image_output_directory = os.path.join(
os.getcwd(), 'iris_image_test_output')
result_fname = os.path.join(image_output_directory,
'result-' + unique_id + '.png')
if not os.path.isdir(image_output_directory):
# Handle race-condition where the directories are
# created sometime between the check above and the
# creation attempt below.
try:
os.makedirs(image_output_directory)
except OSError as err:
# Don't care about "File exists"
if err.errno != 17:
raise
def _create_missing():
fname = '{}.png'.format(phash)
base_uri = ('https://scitools.github.io/test-iris-imagehash/'
'images/v4/{}')
uri = base_uri.format(fname)
hash_fname = os.path.join(image_output_directory, fname)
uris = repo.setdefault(unique_id, [])
uris.append(uri)
print('Creating image file: {}'.format(hash_fname))
figure.savefig(hash_fname)
msg = 'Creating imagerepo entry: {} -> {}'
print(msg.format(unique_id, uri))
lock = filelock.FileLock(os.path.join(_RESULT_PATH,
'imagerepo.lock'))
# The imagerepo.json file is a critical resource, so ensure
# thread safe read/write behaviour via platform independent
# file locking.
with lock.acquire(timeout=600):
with open(repo_fname, 'wb') as fo:
json.dump(repo, codecs.getwriter('utf-8')(fo),
indent=4, sort_keys=True)
# Calculate the test result perceptual image hash.
buffer = io.BytesIO()
figure = plt.gcf()
figure.savefig(buffer, format='png')
buffer.seek(0)
phash = imagehash.phash(Image.open(buffer), hash_size=_HASH_SIZE)
if unique_id not in repo:
if dev_mode:
_create_missing()
else:
figure.savefig(result_fname)
emsg = 'Missing image test result: {}.'
raise AssertionError(emsg.format(unique_id))
else:
uris = repo[unique_id]
# Extract the hex basename strings from the uris.
hexes = [os.path.splitext(os.path.basename(uri))[0]
for uri in uris]
# Create the expected perceptual image hashes from the uris.
to_hash = imagehash.hex_to_hash
expected = [to_hash(uri_hex) for uri_hex in hexes]
# Calculate hamming distance vector for the result hash.
distances = [e - phash for e in expected]
if np.all([hd > _HAMMING_DISTANCE for hd in distances]):
if dev_mode:
_create_missing()
else:
figure.savefig(result_fname)
msg = ('Bad phash {} with hamming distance {} '
'for test {}.')
msg = msg.format(phash, distances, unique_id)
if _DISPLAY_FIGURES:
emsg = 'Image comparison would have failed: {}'
print(emsg.format(msg))
else:
emsg = 'Image comparison failed: {}'
raise AssertionError(emsg.format(msg))
if _DISPLAY_FIGURES:
plt.show()
finally:
plt.close()
def _remove_testcase_patches(self):
"""Helper to remove per-testcase patches installed by :meth:`patch`."""
# Remove all patches made, ignoring errors.
for p in self.testcase_patches:
p.stop()
# Reset per-test patch control variable.
self.testcase_patches.clear()
def patch(self, *args, **kwargs):
"""
Install a mock.patch, to be removed after the current test.
The patch is created with mock.patch(*args, **kwargs).
Returns:
The substitute object returned by patch.start().
For example::
mock_call = self.patch('module.Class.call', return_value=1)
module_Class_instance.call(3, 4)
self.assertEqual(mock_call.call_args_list, [mock.call(3, 4)])
"""
# Make the new patch and start it.
patch = mock.patch(*args, **kwargs)
start_result = patch.start()
# Create the per-testcases control variable if it does not exist.
# NOTE: this mimics a setUp method, but continues to work when a
# subclass defines its own setUp.
if not hasattr(self, 'testcase_patches'):
self.testcase_patches = {}
# When installing the first patch, schedule remove-all at cleanup.
if not self.testcase_patches:
self.addCleanup(self._remove_testcase_patches)
# Record the new patch and start object for reference.
self.testcase_patches[patch] = start_result
# Return patch replacement object.
return start_result
def assertArrayShapeStats(self, result, shape, mean, std_dev, rtol=1e-6):
"""
Assert that the result, a cube, has the provided shape and that the
mean and standard deviation of the data array are also as provided.
Thus build confidence that a cube processing operation, such as a
cube.regrid, has maintained its behaviour.
"""
self.assertEqual(result.shape, shape)
self.assertArrayAllClose(result.data.mean(), mean, rtol=rtol)
self.assertArrayAllClose(result.data.std(), std_dev, rtol=rtol)
# An environment variable controls whether test timings are output.
#
# NOTE: to run tests with timing output, nosetests cannot be used.
# At present, that includes not using "python setup.py test"
# The typically best way is like this :
# $ export IRIS_TEST_TIMINGS=1
# $ python -m unittest discover -s iris.tests
# and commonly adding ...
# | grep "TIMING TEST" >iris_test_output.txt
#
_PRINT_TEST_TIMINGS = bool(int(os.environ.get('IRIS_TEST_TIMINGS', 0)))
def _method_path(meth):
cls = meth.im_class
return '.'.join([cls.__module__, cls.__name__, meth.__name__])
def _testfunction_timing_decorator(fn):
# Function decorator for making a testcase print its execution time.
@functools.wraps(fn)
def inner(*args, **kwargs):
start_time = datetime.datetime.now()
try:
result = fn(*args, **kwargs)
finally:
end_time = datetime.datetime.now()
elapsed_time = (end_time - start_time).total_seconds()
msg = '\n TEST TIMING -- "{}" took : {:12.6f} sec.'
name = _method_path(fn)
print(msg.format(name, elapsed_time))
return result
return inner
def iristest_timing_decorator(cls):
# Class decorator to make all "test_.." functions print execution timings.
if _PRINT_TEST_TIMINGS:
# NOTE: 'dir' scans *all* class properties, including inherited ones.
attr_names = dir(cls)
for attr_name in attr_names:
attr = getattr(cls, attr_name)
if callable(attr) and attr_name.startswith('test'):
attr = _testfunction_timing_decorator(attr)
setattr(cls, attr_name, attr)
return cls
class _TestTimingsMetaclass(type):
# An alternative metaclass for IrisTest subclasses, which makes
# them print execution timings for all the testcases.
# This is equivalent to applying the @iristest_timing_decorator to
# every test class that inherits from IrisTest.
# NOTE: however, it means you *cannot* specify a different metaclass for
# your test class inheriting from IrisTest.
# See below for how to solve that where needed.
def __new__(cls, clsname, base_classes, attrs):
result = type.__new__(cls, clsname, base_classes, attrs)
if _PRINT_TEST_TIMINGS:
result = iristest_timing_decorator(result)
return result
class IrisTest(six.with_metaclass(_TestTimingsMetaclass, IrisTest_nometa)):
# Derive the 'ordinary' IrisTest from IrisTest_nometa, but add the
# metaclass that enables test timings output.
# This means that all subclasses also get the timing behaviour.
# However, if a different metaclass is *wanted* for an IrisTest subclass,
# this would cause a metaclass conflict.
# Instead, you can inherit from IrisTest_nometa and apply the
# @iristest_timing_decorator explicitly to your new testclass.
pass
get_result_path = IrisTest.get_result_path
class GraphicsTestMixin(object):
# nose directive: dispatch tests concurrently.
_multiprocess_can_split_ = True
def setUp(self):
# Acquire threading non re-entrant blocking lock to ensure
# thread-safe plotting.
_lock.acquire()
# Make sure we have no unclosed plots from previous tests before
# generating this one.
if MPL_AVAILABLE:
plt.close('all')
def tearDown(self):
# If a plotting test bombs out it can leave the current figure
# in an odd state, so we make sure it's been disposed of.
if MPL_AVAILABLE:
plt.close('all')
# Release the non re-entrant blocking lock.
_lock.release()
class GraphicsTest(GraphicsTestMixin, IrisTest):
pass
class GraphicsTest_nometa(GraphicsTestMixin, IrisTest_nometa):
# Graphicstest without the metaclass providing test timings.
pass
class TestGribMessage(IrisTest):
def assertGribMessageContents(self, filename, contents):
"""
Evaluate whether all messages in a GRIB2 file contain the provided
contents.
* filename (string)
The path on disk of an existing GRIB file
* contents
An iterable of GRIB message keys and expected values.
"""
messages = GribMessage.messages_from_filename(filename)
for message in messages:
for element in contents:
section, key, val = element
self.assertEqual(message.sections[section][key], val)
def assertGribMessageDifference(self, filename1, filename2, diffs,
skip_keys=(), skip_sections=()):
"""
Evaluate that the two messages only differ in the ways specified.
* filename[0|1] (string)
The path on disk of existing GRIB files
* diffs
An dictionary of GRIB message keys and expected diff values:
{key: (m1val, m2val),...} .
* skip_keys
An iterable of key names to ignore during comparison.
* skip_sections
An iterable of section numbers to ignore during comparison.
"""
messages1 = list(GribMessage.messages_from_filename(filename1))
messages2 = list(GribMessage.messages_from_filename(filename2))
self.assertEqual(len(messages1), len(messages2))
for m1, m2 in zip(messages1, messages2):
m1_sect = set(m1.sections.keys())
m2_sect = set(m2.sections.keys())
for missing_section in (m1_sect ^ m2_sect):
what = ('introduced'
if missing_section in m1_sect else 'removed')
# Assert that an introduced section is in the diffs.
self.assertIn(missing_section, skip_sections,
msg='Section {} {}'.format(missing_section,
what))
for section in (m1_sect & m2_sect):
# For each section, check that the differences are
# known diffs.
m1_keys = set(m1.sections[section]._keys)
m2_keys = set(m2.sections[section]._keys)
difference = m1_keys ^ m2_keys
unexpected_differences = difference - set(skip_keys)
if unexpected_differences:
self.fail("There were keys in section {} which \n"
"weren't in both messages and which weren't "
"skipped.\n{}"
"".format(section,
', '.join(unexpected_differences)))
keys_to_compare = m1_keys & m2_keys - set(skip_keys)
for key in keys_to_compare:
m1_value = m1.sections[section][key]
m2_value = m2.sections[section][key]
msg = '{} {} != {}'
if key not in diffs:
# We have a key which we expect to be the same for
# both messages.
if isinstance(m1_value, np.ndarray):
# A large tolerance appears to be required for
# gribapi 1.12, but not for 1.14.
self.assertArrayAlmostEqual(m1_value, m2_value,
decimal=2)
else:
self.assertEqual(m1_value, m2_value,
msg=msg.format(key, m1_value,
m2_value))
else:
# We have a key which we expect to be different
# for each message.
self.assertEqual(m1_value, diffs[key][0],
msg=msg.format(key, m1_value,
diffs[key][0]))
self.assertEqual(m2_value, diffs[key][1],
msg=msg.format(key, m2_value,
diffs[key][1]))
def skip_data(fn):
"""
Decorator to choose whether to run tests, based on the availability of
external data.
Example usage:
@skip_data
class MyDataTests(tests.IrisTest):
...
"""
no_data = (not iris.config.TEST_DATA_DIR
or not os.path.isdir(iris.config.TEST_DATA_DIR)
or os.environ.get('IRIS_TEST_NO_DATA'))
skip = unittest.skipIf(
condition=no_data,
reason='Test(s) require external data.')
return skip(fn)
def skip_gdal(fn):
"""
Decorator to choose whether to run tests, based on the availability of the
GDAL library.
Example usage:
@skip_gdal
class MyGeoTiffTests(test.IrisTest):
...
"""
skip = unittest.skipIf(
condition=not GDAL_AVAILABLE,
reason="Test requires 'gdal'.")
return skip(fn)
def skip_plot(fn):
"""
Decorator to choose whether to run tests, based on the availability of the
matplotlib library.
Example usage:
@skip_plot
class MyPlotTests(test.GraphicsTest):
...
"""
skip = unittest.skipIf(
condition=not MPL_AVAILABLE,
reason='Graphics tests require the matplotlib library.')
return skip(fn)
skip_grib = unittest.skipIf(not GRIB_AVAILABLE,
'Test(s) require "iris-grib" package, '
'which is not available.')
skip_sample_data = unittest.skipIf(not SAMPLE_DATA_AVAILABLE,
('Test(s) require "iris-sample-data", '
'which is not available.'))
skip_nc_time_axis = unittest.skipIf(
not NC_TIME_AXIS_AVAILABLE,
'Test(s) require "nc_time_axis", which is not available.')
skip_inet = unittest.skipIf(not INET_AVAILABLE,
('Test(s) require an "internet connection", '
'which is not available.'))
skip_stratify = unittest.skipIf(
not STRATIFY_AVAILABLE,
'Test(s) require "python-stratify", which is not available.')
def no_warnings(func):
"""
Provides a decorator to ensure that there are no warnings raised
within the test, otherwise the test will fail.
"""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
with mock.patch('warnings.warn') as warn:
result = func(self, *args, **kwargs)
self.assertEqual(0, warn.call_count,
('Got unexpected warnings.'
' \n{}'.format(warn.call_args_list)))
return result
return wrapped
|
duncanwp/iris
|
lib/iris/tests/__init__.py
|
Python
|
lgpl-3.0
| 45,786
|
[
"NetCDF"
] |
537b24751a7fab0c969f88f17f2a5a85f96129917282859d54e23a10648bc67d
|
"""
This sample demonstrates a simple skill built with the Amazon Alexa Skills Kit.
The Intent Schema, Custom Slots, and Sample Utterances for this skill, as well
as testing instructions are located at http://amzn.to/1LzFrj6
For additional samples, visit the Alexa Skills Kit Getting Started guide at
http://amzn.to/1LGWsLG
"""
import logging
from alexa import build_speechlet_response, build_response
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
logging.basicConfig(level=logging.DEBUG)
logging.debug("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
if (event['session']['application']['applicationId'] !=
"amzn1.ask.skill.03df35c2-53f4-4120-9e96-30d5b05b9df4"):
raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
def on_session_started(session_started_request, session):
""" Called when the session starts """
logging.debug("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
logging.debug("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
logging.debug("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "StartFeed":
return start_feed(intent, session)
elif intent_name == "EndFeed":
return end_feed(intent, session)
else:
raise ValueError("Invalid intent")
def start_feed(intent, session):
""" Records the start of the feed and side, and builds return message"""
card_title = intent['name']
side = intent['slots']['BreastSide']['value']
speech_output = "Starting feeding on the {0} side. " \
"You are more than just a milk machine.".format(side)
should_end_session = False
return build_response({}, build_speechlet_response(card_title, speech_output, None,
should_end_session))
def end_feed(intent, session):
""" Ends the feed and builds the return message"""
card_title = intent['name']
speech_output = "Thank you for tracking you're nursing with breast easier. " \
"Nice jugs, Stephanie."
return build_response({}, build_speechlet_response(card_title, speech_output, None, True))
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Welcome"
speech_output = "Welcome to Breast Easier. " \
"Please start tracking a session by saying, " \
"Start nursing on my right or left side."
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "Please start tracking a session by saying, " \
"Start nursing on my right or left side."
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
logging.debug("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
def main():
pass
if __name__ == "__main__":
main()
|
dashnash/breast-easier-lambdafunc
|
src/main.py
|
Python
|
mit
| 4,948
|
[
"VisIt"
] |
2bcf9069f45db7d387e5a1f4898fab3aa7487264840b00dc41f69fb760b96f61
|
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
import config
from install_package import InstallPackage
import os
import shutil
import sys
import utils
# NB: for this module to build successfully, ITK has to be built with
# ITK_USE_REVIEW=ON (until the itkFlatStructuringElement moves OUT of the
# review directory, that is)
BASENAME = "ItkVtkGlue"
dependencies = ['CMake', 'ITK', 'VTK', 'WrapITK', 'SWIG']
class ItkVtkGlue(InstallPackage):
def __init__(self):
self.source_dir = '' # will set in get()
self.build_dir = os.path.join(config.build_dir, '%s-build' %
(BASENAME,))
#self.inst_dir = os.path.join(config.inst_dir, BASENAME)
def get(self):
self.source_dir = os.path.join(
config.WRAPITK_SOURCE_DIR,
'ExternalProjects', 'ItkVtkGlue')
if not os.path.exists(self.source_dir):
utils.error("ItkVtkGlue source not available. Have you executed "
"the WrapITK InstallPackage?")
else:
pass
if False:
# make sure that ENABLE_TESTING() in the CMakeLists.txt has been
# deactivated
repls = [('ENABLE_TESTING\(\)', '')]
utils.re_sub_filter_file(
repls,
os.path.join(self.source_dir,'CMakeLists.txt'))
# and also disable inclusing of Wrapping/Python/Testing dir
# this will probably change in future versions of ItkVtkGlue
repls = [('SUBDIRS\(Tests\)', '')]
utils.re_sub_filter_file(
repls,
os.path.join(self.source_dir,
'Wrapping/Python/CMakeLists.txt'))
def unpack(self):
# no unpack step
pass
def configure(self):
if os.path.exists(
os.path.join(self.build_dir, 'CMakeFiles/cmake.check_cache')):
utils.output("itkvtkglue build already configured.")
return
if not os.path.exists(self.build_dir):
os.mkdir(self.build_dir)
# we need the PATH types for VTK_DIR and for WrapITK_DIR, else
# these variables are NOT stored. That's just weird.
# we also need to pass the same instal prefix as for ITK, so
# that the external module can be put in the right place.
cmake_params = "-DBUILD_WRAPPERS=ON " \
"-DCMAKE_BUILD_TYPE=RelWithDebInfo " \
"-DCMAKE_INSTALL_PREFIX=%s " \
"-DVTK_DIR:PATH=%s " \
"-DITK_DIR=%s " \
"-DITK_TEST_DRIVER=%s " \
"-DWrapITK_DIR=%s " \
"-DSWIG_DIR=%s " \
"-DSWIG_EXECUTABLE=%s " \
"-DPYTHON_EXECUTABLE=%s " \
"-DPYTHON_LIBRARY=%s " \
"-DPYTHON_INCLUDE_PATH=%s " \
% \
(config.WRAPITK_TOPLEVEL,
config.VTK_DIR,
config.ITK_DIR, config.ITK_TEST_DRIVER,
config.WRAPITK_DIR,
config.SWIG_DIR, config.SWIG_EXECUTABLE,
config.PYTHON_EXECUTABLE,
config.PYTHON_LIBRARY,
config.PYTHON_INCLUDE_PATH)
ret = utils.cmake_command(self.build_dir, self.source_dir,
cmake_params)
if ret != 0:
utils.error(
"Could not configure ItkVtkGlue (P1). Fix and try again.")
def build(self):
posix_file = os.path.join(self.build_dir, 'lib/_ItkVtkGluePython.so')
nt_file = os.path.join(self.build_dir, 'lib',
config.BUILD_TARGET,
'_ItkVtkGluePython' + config.PYE_EXT)
if utils.file_exists(posix_file, nt_file):
utils.output("ItkVtkGlue already built. Skipping build step.")
else:
os.chdir(self.build_dir)
ret = utils.make_command('ItkVtkGlue.sln')
if ret != 0:
utils.error("Could not build ItkVtkGlue. Fix and try again.")
def install(self):
# config.WRAPITK_LIB is something like:
# /inst/Insight/lib/InsightToolkit/WrapITK/lib
if os.path.exists(
os.path.join(config.WRAPITK_LIB,
'_ItkVtkGluePython' + config.PYE_EXT)):
utils.output("ItkVtkGlue already installed. Skipping step.")
else:
os.chdir(self.build_dir)
ret = utils.make_command('ItkVtkGlue.sln', install=True)
if ret != 0:
utils.error(
"Could not install ItkVtkGlue. Fix and try again.")
def clean_build(self):
# nuke the build dir, the source dir is pristine and there is
# no installation
utils.output("Removing build dir.")
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
def get_installed_version(self):
return "NA"
|
nagyistoce/devide.johannes
|
install_packages/ip_itkvtkglue.py
|
Python
|
bsd-3-clause
| 5,230
|
[
"VTK"
] |
be140ab318acb777b3674379d70ddd52a88828f0fefb0fdc6bdfdedc983a0719
|
#!/usr/bin/python
"""
Copyright (C) 2008 Andreas Engelbredt Dalsgaard <andreas.dalsgaard@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. """
from parser import *
from lexer import *
if len(sys.argv) == 1:
print "usage : ./compile.py inputfile"
raise SystemExit
if len(sys.argv) >= 2:
filename = sys.argv[1]
lexer = lex.lex()
p = Parser(open(filename).read(), lexer)
p.AST.visit()
# vim:ts=4:sw=4:expandtab
|
yzh89/pyuppaal
|
pyuppaal/ulp/compiler.py
|
Python
|
gpl-3.0
| 1,047
|
[
"VisIt"
] |
d6fbd1722c43e3646d19dc9ee5f0deaab3f5b0036946e1a14ac2782f7fb83243
|
#!/usr/bin/env python
#
# DNATool - A program for DNA sequence manipulation
# Copyright (C) 2012- Damien Farrell & Jens Erik Nielsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Email: farrell.damien_at_gmail.com
import os
from Tkinter import *
import Pmw
class Preferences:
"""Manage personal preferences"""
def __init__(self,program,defaults):
"""Find and load the preferences file"""
filename = '.'+program+'_preferences'
dirs = self.get_dirs()
self.noprefs = False
try:
for ldir in dirs:
self.peatpath = os.path.join(ldir, '.peatdb')
fn=os.path.join(self.peatpath, filename)
if os.path.isfile(fn):
self.load_prefs(fn)
self.save_prefs()
return
else:
self.noprefs = True
if self.noprefs == True:
raise
except:
# If we didn't find a file then set to default and save
print 'Did not find preferences!'
self.prefs = defaults.copy()
print dirs
self.peatpath = os.path.join(dirs[0], '.peatdb')
self.pref_file = os.path.join(self.peatpath,filename)
self.prefs['_prefdir'] = self.peatpath
self.prefs['_preffile'] = self.pref_file
self.save_prefs()
# Can we set more variables?
# Defaults savedir?
if os.environ.has_key('HOMEPATH'):
self.prefs['datadir']=os.environ['HOMEPATH']
if os.environ.has_key('HOME'):
self.prefs['datadir']=os.environ['HOME']
# Use 'my documents' if available
if hasattr(self.prefs,'datadir'):
mydocs=os.path.join(self.prefs['datadir'],'My Documents')
if os.path.isdir(mydocs):
self.prefs['datadir']=mydocs
# Always save
self.save_prefs()
return
def __del__(self):
# Make sure we save the file when killed
self.save_prefs()
return
def set(self,key,value):
# Set a key
self.prefs[key]=value
self.save_prefs()
return
def get(self,key):
# Get a value
if self.prefs.has_key(key):
return self.prefs[key]
else:
raise NameError,'No such key'
return
def has_key(self,key):
"""No we have this key"""
return self.prefs.has_key(key)
def delete(self,key):
if self.prefs.has_key(key):
del self.prefs[key]
else:
raise 'No such key',key
self.save_prefs()
return
def get_dirs(self):
"""Compile a prioritised list of all dirs"""
dirs=[]
keys=['HOME','HOMEPATH','HOMEDRIVE']
import os, sys
for key in keys:
if os.environ.has_key(key):
dirs.append(os.environ[key])
#
if os.environ.has_key('HOMEPATH'):
# windows
dirs.append(os.environ['HOMEPATH'])
# Drives
possible_dirs=["C:\\","D:\\","/"]
for pdir in possible_dirs:
if os.path.isdir(pdir):
dirs.append(pdir)
# Check that all dirs are real
rdirs=[]
for dirname in dirs:
if os.path.isdir(dirname):
rdirs.append(dirname)
return rdirs
def load_prefs(self,filename):
# Load prefs
self.pref_file=filename
import pickle
try:
fd=open(filename)
self.prefs=pickle.load(fd)
fd.close()
except:
fd.close()
fd=open(filename,'rb')
self.prefs=pickle.load(fd)
fd.close()
return
def save_prefs(self):
# Save prefs
if not os.path.exists(self.peatpath):
os.mkdir(self.peatpath)
import pickle
fd=open(self.pref_file,'w')
pickle.dump(self.prefs,fd)
fd.close()
return
class preferences_dialog:
def __init__(self,parent,parentframe=None,subset='PEAT',callback=None):
"""Open the settings dialog"""
self.parent=parent
if parentframe!=None:
self.settings = Frame(master=parentframe,relief=RAISED)
self.settings.pack(fill=BOTH)
else:
self.settings=Toplevel()
self.settings.title('PEAT settings')
self.balloon=Pmw.Balloon(self.settings)
import os
home = os.path.expanduser("~")
blobdir = os.path.join(home, '.peatblob')
if subset=='PEAT':
variables=[['username','','textbox',''],
['password','','password',''],
['blobdir',blobdir,'textbox','blob directory for remote DBs, if using relstorage(mysql) this should be shared fs'],
['promptforlogcomments',True,'boolean','Prompt for log comments'],
['showDialogsinSidePane',True,'boolean','Show certain dialogs in sidepane by default'],
['thumbsize','200','textbox','Thumbnail size for external files'],
['molgraphApplication','pymol',['yasara','vmd','pymol','rasmol','other'],'Molecular graphics app'],
['molgraphAppPath',True,'textbox','Path to your molecular graphics application']]
# Put lots of choices up
row=0
vars={}
big_choice={}
self.balloon = Pmw.Balloon(self.settings)
for varname,default,choices,helptxt in variables:
if not self.parent.preferences.prefs.has_key(varname):
self.parent.preferences.set(varname,default)
# Find out which type of preference we have
if type(choices)==type([]):
# List of choices
var_value=self.parent.preferences.get(varname)
vars[varname]=StringVar()
vars[varname].set(var_value)
big_choice[varname]={'type':'options','choices':[]}
#for choice in choices:
# big_choice[varname]['choices']=choice)
optmenu = Pmw.OptionMenu (self.settings,
labelpos = 'w',
label_text = varname,
menubutton_textvariable = vars[varname],
items = choices,
menubutton_width = 10 )
optmenu.grid(row=row,column=0,columnspan=2)
if helptxt!='':
self.balloon.bind(optmenu, helptxt)
elif choices=='boolean':
var_value=self.parent.preferences.get(varname)
vars[varname]=BooleanVar()
lbl = Label(self.settings,text=varname)
lbl.grid(row=row,column=0)
col=1
vars[varname].set(var_value)
Checkbutton(self.settings,variable=vars[varname]).grid(row=row,column=col)
col=col+1
big_choice[varname]={'type':'boolean'}
elif choices=='textbox' or choices=='password':
# Free text with a default value
var_value=self.parent.preferences.get(varname)
vars[varname]=StringVar()
vars[varname].set(var_value)
lbl = Label(self.settings,text=varname)
lbl.grid(row=row,column=0)
if choices == 'password': s='*'
else: s=None
Entry(self.settings,textvariable=vars[varname],
bg='white',width=15,show=s).grid(row=row,column=1,columnspan=4)
big_choice[varname]={'type':'textbox'}
# Make a dropdown list of previous choices
try:
self.parent.preferences.get(varname+'_previous')
except:
self.parent.preferences.set(varname+'_previous',[default])
prev_choices=self.parent.preferences.get(varname+'_previous')
self.mb = Menubutton (self.settings,text="->",relief=RAISED )
self.mb.grid(row=row,column=5)
self.status_menu=Menu(self.mb,tearoff=0)
# Print the project names
for setting in prev_choices:
self.status_menu.add_radiobutton(label=setting,
variable=vars[varname],
value=setting,
indicatoron=1)
self.mb['menu']=self.status_menu
self.balloon.bind(self.mb,'Previous values')
row=row+1
if helptxt!='':
self.balloon.bind(lbl, helptxt)
# Functions for saving settings
def save_settings():
for varname in big_choice.keys():
if big_choice[varname]['type']=='options':
value=vars[varname].get()
self.parent.preferences.set(varname,value)
elif big_choice[varname]['type']=='boolean':
value=vars[varname].get()
self.parent.preferences.set(varname,value)
elif big_choice[varname]['type']=='textbox':
value=vars[varname].get()
self.parent.preferences.set(varname,value)
# Save the previous value
prev_vals=self.parent.preferences.get(varname+'_previous')
if not value in prev_vals:
prev_vals.append(value)
self.parent.preferences.set(varname+'_previous',prev_vals)
else:
raise Exception('Unknown preference type')
self.settings.destroy()
self.parent.preferences.save_prefs()
if callback != None:
callback()
return
def cancel():
self.settings.destroy()
return
# Buttons for saving or cancelling
bf = Frame(self.settings); bf.grid(row=row+1,column=0,
columnspan=4,padx=2,pady=2)
Button(bf,text='Save settings',command=save_settings).pack(side=RIGHT,fill=BOTH)
Button(bf,text='Close', command=cancel).pack(side=RIGHT,fill=BOTH)
return
|
dmnfarrell/peat
|
DNATool2/Prefs.py
|
Python
|
mit
| 11,106
|
[
"PyMOL",
"RasMol",
"VMD",
"YASARA"
] |
d59581cfade6395a2aa79926d514e3ba5a1376a78aabf764e8acfe43ae21dba3
|
#!/usr/bin/env python
## Copyright (C) 2005-2006 Graham I Cummins
## This program is free software; you can redistribute it and/or modify it under
## the terms of the GNU General Public License as published by the Free Software
## Foundation; either version 2 of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful, but WITHOUT ANY
## WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
## PARTICULAR PURPOSE. See the GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License along with
## this program; if not, write to the Free Software Foundation, Inc., 59 Temple
## Place, Suite 330, Boston, MA 02111-1307 USA
##
from data import Float64,fromstring,reshape,zeros, array, newData
from sys import byteorder
from mien.nmpml.basic_tools import NmpmlObject
import base64
from mien.math.array import Float32
def sortByIndAttrib(a, b):
return cmp(a.attrib("Index"), b.attrib("Index"))
class Recording(NmpmlObject):
'''Subclass for representing a record of a particular variable in
a model. Intended to be a child element of an Experiment.
Each Recording can record any number of points in space,
but a separate Recording should be used for each varriable.
Points are stored as ElementReferences referencing Sections, with the
Data atribute indicating the relative location along the section.
If there is more than one such reference, they must also have
Index attributes.
If no points are specified, the "Variable" is recorded directly (useful
for global and object variables).
Samples are stored in a child Data element, which is
automatically created if needed.
Attributes:
Variable: name of the varriable to be recorded. At some point
these names should be standardized, but for now they
are the names used by the Neuron simulator
SamplesPerSecond: The recording rate in Hz.
DataType:Optional. If not specified, uses Float32. Useful for very
long/spatially complex recordings of low prescision or
discrete variables, in order to save space.
'''
_allowedChildren =["Comments", "Data","ElementReference"]
_requiredAttributes = ["Name","Variable","SamplesPerSecond"]
_specialAttributes = ["DataType"]
def __str__(self):
return "Recording: %s" % (self.attrib('Variable'))
def getData(self):
de = self.getElementOrRef("Data")
if de:
self.data = de
else:
print "Can't find a data element. Making an empty one"
attrs = {"Url":"auto://upath", "Name":"recordingdata","SamplesPerSecond":self.attrib("SamplesPerSecond"), 'SampleType':'timeseries'}
self.data = newData(None, attrs)
self.newElement(self.data)
return self.data.getData()
def setData(self, dat, col=None, tit=None):
#print dat.shape, dat.max(), dat.min()
self.getData()
fs=self.attrib("SamplesPerSecond")
if self.data.attrib("SamplesPerSecond")!=fs:
self.data.setAttrib("SamplesPerSecond", fs)
if col == None:
self.data.datinit(dat, self.data.header())
if tit:
if type(tit)!=list:
tit=[tit]
self.data.setAttrib('Labels', tit)
elif self.data.shape()[1]==0 or dat.shape[0]!=self.data.shape()[0]:
self.data.datinit(dat, self.data.header())
self.data.setAttrib('Labels', [tit])
elif col<self.data.shape()[1]:
self.data.setData(dat, [col])
if tit:
self.data.setChanName(tit, col)
else:
if tit and type(tit)!=list:
tit=[tit]
self.data.addChans(dat, tit)
def setAllData(self, dat, labels=None):
self.getData()
head=self.data.header()
head["SamplesPerSecond"]=self.attrib("SamplesPerSecond")
head["SampleType"]="timeseries"
if labels:
head["Labels"]=labels
print dat.shape
self.data.datinit(dat, head)
def getPoints(self):
prs=self.getTypeRef("Section")
pts=[]
prs.sort(sortByIndAttrib)
for pr in prs:
rel = float(pr.attrib("Data"))
sec = pr.target()
pts.append([sec, rel])
cells=self.getTypeRef("Cell")
cells.sort(sortByIndAttrib)
for c in cells:
c=c.target()
print c
for sec in c.branch():
sec = c.getSection(sec)
pts.append([sec, 0.0])
pts.append([sec, 1.0])
return pts
def clearValues(self):
try:
fs=self.data.fs()
self.data.datinit(None, self.data.header())
except:
self.getData()
def getCellData(self, path):
path=path.rstrip('/')
prs=self.getTypeRef("Section")
if prs:
return None
cells=self.getTypeRef("Cell")
poss=[]
for c in cells:
if c.attrib("Target").rstrip('/')==path.rstrip('/'):
poss.append(c)
if len(poss)!=1:
return None
cell=poss[0].target()
ncols=cell.get_drawing_coords(spheres=True).shape[0]/2
dat=self.getData()
if dat==None:
print "no data"
return
out=zeros((dat.shape[0], ncols), Float32)
insat=0
for i, sec in enumerate(cell.branch()):
si=cell.getSection(sec)
sv=dat[:,2*i]
ev=dat[:,2*i+1]
if si.attrib("Spherical"):
npts=2
else:
npts=si.points.shape[0]
diff=(ev-sv)/npts
for j in range(1,npts):
na=sv+diff*j
na=na.astype(Float32)
out[:,insat]=na
insat+=1
return out
def timestep(self):
self.getData()
fs=self.data.fs()
return 1.0/fs
ELEMENTS={"Recording":Recording}
|
gic888/MIEN
|
nmpml/recording.py
|
Python
|
gpl-2.0
| 5,293
|
[
"NEURON"
] |
7317185af36ea85998e7018c41b9d0945c7b6c5f69fd997bcf38091c829c4415
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
# Copyright (C) 2019
# Max Planck Computing and Data Facility
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
********************************************
espressopp.storage.DomainDecompositionAdress
********************************************
The DomainDecompositionAdress is the Domain Decomposition for AdResS and H-
AdResS simulations. It makes sure that tuples (i.e. a coarse-grained particle
and its corresponding atomistic particles) are always stored together on one CPU.
When setting DomainDecompositionAdress you have to provide the system as well as
the nodegrid and the cellgrid.
Example - setting DomainDecompositionAdress:
>>> system.storage = espressopp.storage.DomainDecompositionAdress(system, nodeGrid, cellGrid)
.. function:: espressopp.storage.DomainDecompositionAdress(system, nodeGrid, cellGrid, halfCellInt)
:param system:
:param nodeGrid:
:param cellGrid:
:param halfCellInt: controls the use of half-cells (value 2), third-cells (value 3) or higher. Implicit value 1 for full cells (normal functionality).
:type system:
:type nodeGrid:
:type cellGrid:
:type halfCellInt: int
"""
from espressopp import pmi
from espressopp.esutil import cxxinit
from _espressopp import storage_DomainDecompositionAdress
from espressopp import toInt3DFromVector
from espressopp.tools import decomp
from espressopp import check
from espressopp.storage.Storage import *
class DomainDecompositionAdressLocal(StorageLocal,
storage_DomainDecompositionAdress):
def __init__(self, system, nodeGrid, cellGrid, halfCellInt):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, storage_DomainDecompositionAdress, system, nodeGrid, cellGrid, halfCellInt)
if pmi.isController:
class DomainDecompositionAdress(Storage):
pmiproxydefs = dict(
cls = 'espressopp.storage.DomainDecompositionAdressLocal',
pmicall = ['getCellGrid', 'cellAdjust']
)
def __init__(self, system, nodeGrid='auto', cellGrid='auto', halfCellInt='auto', nocheck=False):
if nocheck:
self.next_id = 0
self.pmiinit(system, nodeGrid, cellGrid, halfCellInt)
else:
if check.System(system, 'bc'):
if nodeGrid == 'auto':
nodeGrid = decomp.nodeGrid(system.comm.rank)
else:
nodeGrid = toInt3DFromVector(nodeGrid)
if cellGrid == 'auto':
raise Exception('Automatic cell size calculation not yet implemented')
else:
cellGrid = toInt3DFromVector(cellGrid)
if halfCellInt == 'auto':
halfCellInt = 1
for k in xrange(3):
if nodeGrid[k]*cellGrid[k] == 1:
print(("Warning! cellGrid[{}] has been "
"adjusted to 2 (was={})".format(k, cellGrid[k])))
cellGrid[k] = 2
self.next_id = 0
self.pmiinit(system, nodeGrid, cellGrid, halfCellInt)
else:
raise Exception('Error: could not create DomainDecomposition object')
|
govarguz/espressopp
|
src/storage/DomainDecompositionAdress.py
|
Python
|
gpl-3.0
| 4,205
|
[
"ESPResSo"
] |
07e86e706bcc2632f5ac54630ad856e253bb80e959879c711732e320ce05284b
|
'''
Example of a spike generator (only outputs spikes)
In this example spikes are generated and sent through UDP packages. At the end of the simulation a raster plot of the
spikes is created.
'''
from brian import *
import numpy
from brian_multiprocess_udp import BrianConnectUDP
number_of_neurons_total = 45
number_of_neurons_spiking = 30
def main_NeuronGroup(input_Neuron_Group, simulation_clock):
print "main_NeuronGroup!" #DEBUG!
simclock = simulation_clock
delta_t=5
random_list=numpy.random.randint(number_of_neurons_total,size=number_of_neurons_spiking)
random_list.sort()
spiketimes = [(i, delta_t*ms) for i in random_list]
SpikesOut = SpikeGeneratorGroup(number_of_neurons_total, spiketimes, period=300*ms, clock=simclock) # the maximum clock of the input spikes is limited here (period)
MSpkOut=SpikeMonitor(SpikesOut) # Spikes sent by UDP
return ([SpikesOut],[],[MSpkOut])
def post_simulation_function(input_NG, simulation_NG, simulation_SYN, simulation_MN):
"""
input_NG: the neuron group that receives the input spikes
simulation_NG: the neuron groups list passed to the system by the user function (main_NeuronGroup)
simulation_SYN: the synapses list passed to the system by the user function (main_NeuronGroup)
simulation_MN: the monitors list passed to the system by the user function (main_NeuronGroup)
This way it is possible to plot, save or do whatever you want with these objects after the end of the simulation!
"""
figure()
raster_plot(simulation_MN[0])
title("Spikes Sent by UDP")
show(block=True)
if __name__=="__main__":
my_simulation = BrianConnectUDP(main_NeuronGroup, NumOfNeuronsOutput=number_of_neurons_total, post_simulation_function=post_simulation_function,
output_addresses=[("127.0.0.1", 10101), ("127.0.0.1", 12121)], simclock_dt=5, TotalSimulationTime=10000, brian_address=0)
|
ricardodeazambuja/BrianConnectUDP
|
examples/InputNeuronGroup_multiple_outputs.py
|
Python
|
cc0-1.0
| 1,929
|
[
"Brian",
"NEURON"
] |
5afb38fba155b2f53e3e76999963f6df77b21da74d770019de233587c42a1ac1
|
from numpy import c_, zeros, arange
from traits.api import HasStrictTraits, \
true, false, Instance
from mayavi.sources.vtk_data_source import VTKDataSource
from mayavi.sources.array_source import ArraySource
from mayavi.core.source import Source
from mayavi.core.trait_defs import ArrayOrNone
from tvtk.api import tvtk
############################################################################
# The DataSourceFactory class
############################################################################
class DataSourceFactory(HasStrictTraits):
""" Factory for creating data sources. The information about the
organisation of the data is given by setting the public traits.
"""
# Whether the position is implicitely inferred from the array indices
position_implicit = false
# Whether the data is on an orthogonal grid
orthogonal_grid = false
# If the data is unstructured
unstructured = false
# If the factory should attempt to connect the data points
connected = true
# The position of the data points
position_x = ArrayOrNone
position_y = ArrayOrNone
position_z = ArrayOrNone
# Connectivity array. If none, it is implicitely inferred from the array
# indices
connectivity_triangles = ArrayOrNone
# Whether or not the data points should be connected.
lines = false
# The scalar data array
scalar_data = ArrayOrNone
# Whether there is vector data
has_vector_data = false
# The vector components
vector_u = ArrayOrNone
vector_v = ArrayOrNone
vector_w = ArrayOrNone
#----------------------------------------------------------------------
# Private traits
#----------------------------------------------------------------------
_vtk_source = Instance(tvtk.DataSet)
_mayavi_source = Instance(Source)
#----------------------------------------------------------------------
# Private interface
#----------------------------------------------------------------------
def _add_scalar_data(self):
""" Adds the scalar data to the vtk source.
"""
if self.scalar_data is not None:
scalars = self.scalar_data.ravel()
self._vtk_source.point_data.scalars = scalars
def _add_vector_data(self):
""" Adds the vector data to the vtk source.
"""
if self.has_vector_data:
vectors = c_[self.vector_u.ravel(),
self.vector_v.ravel(),
self.vector_w.ravel(),
]
self._vtk_source.point_data.vectors = vectors
def _mk_polydata(self):
""" Creates a PolyData vtk data set using the factory's
attributes.
"""
points = c_[self.position_x.ravel(),
self.position_y.ravel(),
self.position_z.ravel(),
]
lines = None
if self.lines:
np = len(points) - 1
lines = zeros((np, 2), 'l')
lines[:, 0] = arange(0, np - 0.5, 1, 'l')
lines[:, 1] = arange(1, np + 0.5, 1, 'l')
self._vtk_source = tvtk.PolyData(points=points, lines=lines)
if (self.connectivity_triangles is not None and
self.connected):
assert self.connectivity_triangles.shape[1] == 3, \
"The connectivity list must be Nx3."
self._vtk_source.polys = self.connectivity_triangles
self._mayavi_source = VTKDataSource(data=self._vtk_source)
def _mk_image_data(self):
""" Creates an ImageData VTK data set and the associated ArraySource
using the factory's attributes.
"""
self._mayavi_source = ArraySource(transpose_input_array=True,
scalar_data=self.scalar_data,
origin=[0., 0., 0],
spacing=[1, 1, 1])
self._vtk_source = self._mayavi_source.image_data
def _mk_rectilinear_grid(self):
""" Creates a RectilinearGrid VTK data set using the factory's
attributes.
"""
rg = tvtk.RectilinearGrid()
x = self.position_x.squeeze()
if x.ndim == 3:
x = x[:, 0, 0]
y = self.position_y.squeeze()
if y.ndim == 3:
y = y[0, :, 0]
z = self.position_z.squeeze()
if z.ndim == 3:
z = z[0, 0, :]
# FIXME: We should check array size here.
rg.dimensions = (x.size, y.size, z.size)
rg.x_coordinates = x
rg.y_coordinates = y
rg.z_coordinates = z
self._vtk_source = rg
self._mayavi_source = VTKDataSource(data=self._vtk_source)
def _mk_structured_grid(self):
""" Creates a StructuredGrid VTK data set using the factory's
attributes.
"""
# FIXME: We need to figure out the dimensions of the data
# here, if any.
sg = tvtk.StructuredGrid(dimensions=self.scalar_data.shape)
sg.points = c_[self.position_x.ravel(),
self.position_y.ravel(),
self.position_z.ravel(),
]
self._vtk_source = sg
self._mayavi_source = VTKDataSource(data=self._vtk_source)
#----------------------------------------------------------------------
# Public interface
#----------------------------------------------------------------------
def build_data_source(self, **traits):
""" Uses all the information given by the user on his data
structure to figure out the right data structure.
"""
self.set(**traits)
if not self.lines:
if self.position_implicit:
self._mk_image_data()
elif self.orthogonal_grid:
self._mk_rectilinear_grid()
elif self.connectivity_triangles is None:
if self.unstructured:
self._mk_polydata()
else:
self._mk_structured_grid()
else:
self._mk_polydata()
else:
self._mk_polydata()
self._add_scalar_data()
self._add_vector_data()
return self._mayavi_source
def view(src):
""" Open up a mayavi scene and display the dataset in it.
"""
from mayavi import mlab
mayavi = mlab.get_engine()
fig = mlab.figure(bgcolor=(1, 1, 1), fgcolor=(0, 0, 0),)
mayavi.add_source(src)
mlab.pipeline.surface(src, opacity=0.1)
mlab.pipeline.surface(mlab.pipeline.extract_edges(src),
color=(0, 0, 0), )
def test_image_data():
from numpy import random
scalars = random.random((3, 3, 3))
factory = DataSourceFactory()
image_data = factory.build_data_source(scalar_data=scalars,
position_implicit=True,)
view(image_data)
def test_rectilinear_grid():
from numpy import random, mgrid
factory = DataSourceFactory()
scalars = random.random((3, 3, 3))
x = arange(3) ** 2
y = 0.5 * arange(3) ** 2
z = arange(3) ** 2
rectilinear_grid = factory.build_data_source(scalar_data=scalars,
position_implicit=False,
orthogonal_grid=True,
position_x=x,
position_y=y,
position_z=z)
view(rectilinear_grid)
def test_structured_grid():
from numpy import random, mgrid
factory = DataSourceFactory()
scalars = random.random((3, 3, 3))
x, y, z = mgrid[0:3, 0:3, 0:3]
x = x + 0.5 * random.random(x.shape)
y = y + 0.5 * random.random(y.shape)
z = z + 0.5 * random.random(z.shape)
structured_grid = factory.build_data_source(scalar_data=scalars,
position_x=x,
position_y=y,
position_z=z)
view(structured_grid)
if __name__ == '__main__':
from pyface.api import GUI
test_image_data()
test_rectilinear_grid()
test_structured_grid()
GUI().start_event_loop()
|
dmsurti/mayavi
|
mayavi/tools/data_wizards/data_source_factory.py
|
Python
|
bsd-3-clause
| 8,298
|
[
"Mayavi",
"VTK"
] |
681117a93fb24671ee119148ae2c98c22912d07599db33244c3808136e57dfa8
|
from future import standard_library
standard_library.install_aliases()
import logging
import pycurl
import io
import re
import os
import hashlib
from biomaj.utils import Utils
from biomaj.download.ftp import FTPDownload
try:
from io import BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
class HTTPDownload(FTPDownload):
'''
Base class to download files from HTTP
Makes use of http.parse.dir.line etc.. regexps to extract page information
protocol=http
server=ftp.ncbi.nih.gov
remote.dir=/blast/db/FASTA/
remote.files=^alu.*\\.gz$
'''
def __init__(self, protocol, host, rootdir, config):
FTPDownload.__init__(self, protocol, host, rootdir)
self.config = config
def list(self, directory=''):
'''
List FTP directory
:return: tuple of file and dirs in current directory with details
'''
logging.debug('Download:List:'+self.url+self.rootdir+directory)
#self.crl.setopt(pycurl.URL, self.url+self.rootdir+directory)
try:
self.crl.setopt(pycurl.URL, self.url+self.rootdir+directory)
except Exception as a:
self.crl.setopt(pycurl.URL, (self.url+self.rootdir+directory).encode('ascii', 'ignore'))
if self.proxy is not None:
self.crl.setopt(pycurl.PROXY, self.proxy)
if self.proxy_auth is not None:
self.crl.setopt(pycurl.PROXYUSERPWD, self.proxy_auth)
if self.credentials is not None:
self.crl.setopt(pycurl.USERPWD, self.credentials)
output = BytesIO()
# lets assign this buffer to pycurl object
self.crl.setopt(pycurl.WRITEFUNCTION, output.write)
self.crl.setopt(pycurl.HEADERFUNCTION, self.header_function)
self.crl.perform()
# Figure out what encoding was sent with the response, if any.
# Check against lowercased header name.
encoding = None
if 'content-type' in self.headers:
content_type = self.headers['content-type'].lower()
match = re.search('charset=(\S+)', content_type)
if match:
encoding = match.group(1)
if encoding is None:
# Default encoding for HTML is iso-8859-1.
# Other content types may have different default encoding,
# or in case of binary data, may have no encoding at all.
encoding = 'iso-8859-1'
# lets get the output in a string
result = output.getvalue().decode(encoding)
'''
'http.parse.dir.line': r'<a[\s]+href="([\S]+)/".*alt="\[DIR\]">.*([\d]{2}-[\w\d]{2,5}-[\d]{4}\s[\d]{2}:[\d]{2})',
'http.parse.file.line': r'<a[\s]+href="([\S]+)".*([\d]{2}-[\w\d]{2,5}-[\d]{4}\s[\d]{2}:[\d]{2})[\s]+([\d\.]+[MKG]{0,1})',
'http.group.dir.name': 1,
'http.group.dir.date': 2,
'http.group.file.name': 1,
'http.group.file.date': 2,
'http.group.file.size': 3,
'''
rfiles = []
rdirs = []
dirs = re.findall(self.config.get('http.parse.dir.line'), result)
if dirs is not None and len(dirs) > 0:
for founddir in dirs:
rfile = {}
rfile['permissions'] = ''
rfile['group'] = ''
rfile['user'] = ''
rfile['size'] = '0'
date = founddir[int(self.config.get('http.group.dir.date'))-1]
dirdate = date.split()
parts = dirdate[0].split('-')
#19-Jul-2014 13:02
rfile['month'] = Utils.month_to_num(parts[1])
rfile['day'] = parts[0]
rfile['year'] = parts[2]
rfile['name'] = founddir[int(self.config.get('http.group.dir.name'))-1]
rdirs.append(rfile)
files = re.findall(self.config.get('http.parse.file.line'), result)
if files is not None and len(files)>0:
for foundfile in files:
rfile = {}
rfile['permissions'] = ''
rfile['group'] = ''
rfile['user'] = ''
rfile['size'] = foundfile[int(self.config.get('http.group.file.size'))-1]
date = foundfile[int(self.config.get('http.group.file.date'))-1]
dirdate = date.split()
parts = dirdate[0].split('-')
#19-Jul-2014 13:02
rfile['month'] = Utils.month_to_num(parts[1])
rfile['day'] = parts[0]
rfile['year'] = parts[2]
rfile['name'] = foundfile[int(self.config.get('http.group.file.name'))-1]
filehash = (rfile['name']+str(date)+str(rfile['size'])).encode('utf-8')
rfile['hash'] = hashlib.md5(filehash).hexdigest()
rfiles.append(rfile)
return (rfiles, rdirs)
|
horkko/biomaj-postgres
|
biomaj/download/http.py
|
Python
|
agpl-3.0
| 4,880
|
[
"BLAST"
] |
0a698a51cbf25e94d7acd1eb91f125dbb5d8dd7433e946ebe3605ea96cffae0c
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2009 Brian G. Matherly
# Copyright (C) 2009 Peter G. Landgren
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Some independent constants/functions that can be safely imported
without any translation happening yet. Do _not_ add imports that will
perform a translation on import, eg Gtk.
"""
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
import platform
import sys
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from const import WINDOWS, MACOS, LINUX
#-------------------------------------------------------------------------
#
# Public Functions
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# Platform determination functions
#
#-------------------------------------------------------------------------
def lin():
"""
Return True if a linux system
Note: Normally do as linux in else statement of a check !
"""
if platform.system() in LINUX:
return True
return False
def mac():
"""
Return True if a Macintosh system
"""
if platform.system() in MACOS:
return True
return False
def win():
"""
Return True if a windows system
"""
if platform.system() in WINDOWS:
return True
return False
## The following functions do import gtk, but only when called. They
## should only be called after translation system has been
## initialized!
def is_quartz():
"""
Tests to see if Python is currently running with gtk and
windowing system is Mac OS-X's "quartz".
"""
if mac():
try:
from gi.repository import Gtk
from gi.repository import Gdk
except:
return False
return Gdk.Display.get_default().__class__.__name__.endswith("QuartzDisplay")
return False
def has_display():
"""
Tests to see if Python is currently running with gtk
"""
# FIXME: currently, Gtk.init_check() requires all strings
# in argv, and we might have unicode.
temp, sys.argv = sys.argv, sys.argv[:1]
try:
from gi.repository import Gtk
except:
return False
try:
test = Gtk.init_check(temp)
sys.argv = temp
if test:
return True
else:
return False
except:
sys.argv = temp
return False
# A couple of places add menu accelerators using <alt>, which doesn't
# work with Gtk-quartz. <Meta> is the usually correct replacement, but
# in one case the key is a number, and <meta>number is used by Spaces
# (a mac feature), so we'll use control instead.
def mod_key():
"""
Returns a string to pass to an accelerator map.
"""
if is_quartz():
return "<ctrl>"
return "<alt>"
|
arunkgupta/gramps
|
gramps/gen/constfunc.py
|
Python
|
gpl-2.0
| 3,813
|
[
"Brian"
] |
2e37917801d40f25e6c1fcaee469fca697cdbe624e4ea973beb2e25c2fd456df
|
"""End-To-End Memory Networks.
The implementation is based on http://arxiv.org/abs/1503.08895 [1]
"""
from __future__ import absolute_import
from __future__ import division
import tensorflow as tf
import numpy as np
from six.moves import range
import code
def position_encoding(sentence_size, embedding_size):
"""
Position Encoding described in section 4.1 [1]
"""
encoding = np.ones((embedding_size, sentence_size), dtype=np.float32)
ls = sentence_size+1
le = embedding_size+1
for i in range(1, le):
for j in range(1, ls):
encoding[i-1, j-1] = (i - (le-1)/2) * (j - (ls-1)/2)
encoding = 1 + 4 * encoding / embedding_size / sentence_size
return np.transpose(encoding)
def zero_nil_slot(t, name=None):
"""
Overwrites the nil_slot (first row) of the input Tensor with zeros.
The nil_slot is a dummy slot and should not be trained and influence
the training algorithm.
"""
with tf.op_scope([t], name, "zero_nil_slot") as name:
t = tf.convert_to_tensor(t, name="t")
s = tf.shape(t)[1]
z = tf.zeros(tf.pack([1, s]))
return tf.concat(0, [z, tf.slice(t, [1, 0], [-1, -1])], name=name)
def add_gradient_noise(t, stddev=1e-3, name=None):
"""
Adds gradient noise as described in http://arxiv.org/abs/1511.06807 [2].
The input Tensor `t` should be a gradient.
The output will be `t` + gaussian noise.
0.001 was said to be a good fixed value for memory networks [2].
"""
with tf.op_scope([t, stddev], name, "add_gradient_noise") as name:
t = tf.convert_to_tensor(t, name="t")
gn = tf.random_normal(tf.shape(t), stddev=stddev)
return tf.add(t, gn, name=name)
class MemN2N(object):
"""End-To-End Memory Network."""
def __init__(self, batch_size, vocab_size, sentence_size, memory_size, embedding_size,
hops=3,
max_grad_norm=40.0,
nonlin=None,
initializer=tf.random_normal_initializer(stddev=0.1),
optimizer=tf.train.AdamOptimizer(learning_rate=1e-2),
encoding=position_encoding,
session=tf.Session(),
name='MemN2N'):
"""Creates an End-To-End Memory Network
Args:
batch_size: The size of the batch.
vocab_size: The size of the vocabulary (should include the nil word). The nil word
one-hot encoding should be 0.
sentence_size: The max size of a sentence in the data. All sentences should be padded
to this length. If padding is required it should be done with nil one-hot encoding (0).
memory_size: The max size of the memory. Since Tensorflow currently does not support jagged arrays
all memories must be padded to this length. If padding is required, the extra memories should be
empty memories; memories filled with the nil word ([0, 0, 0, ......, 0]).
memory_size is min(memory_size, max_story_size),
and max_story_size is the maximum number of sentences in a story
embedding_size: The size of the word embedding. 20
hops: The number of hops. A hop consists of reading and addressing a memory slot.
Defaults to `3`.
max_grad_norm: Maximum L2 norm clipping value. Defaults to `40.0`.
nonlin: Non-linearity. Defaults to `None`.
initializer: Weight initializer. Defaults to `tf.random_normal_initializer(stddev=0.1)`.
optimizer: Optimizer algorithm used for SGD. Defaults to `tf.train.AdamOptimizer(learning_rate=1e-2)`.
encoding: A function returning a 2D Tensor (sentence_size, embedding_size). Defaults to `position_encoding`.
session: Tensorflow Session the model is run with. Defaults to `tf.Session()`.
name: Name of the End-To-End Memory Network. Defaults to `MemN2N`.
"""
self._batch_size = batch_size
self._vocab_size = vocab_size
self._sentence_size = sentence_size
self._memory_size = memory_size
self._embedding_size = embedding_size
self._hops = hops
self._max_grad_norm = max_grad_norm
self._nonlin = nonlin
self._init = initializer
self._opt = optimizer
self._name = name
self._build_inputs()
self._build_vars()
self._encoding = tf.constant(encoding(self._sentence_size, self._embedding_size), name="encoding")
# cross entropy
# to convert back from tensor to numpy array, use .eval() on the transformed tensor
logits = self._inference(self._stories, self._queries) # (batch_size, vocab_size)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, tf.cast(self._answers, tf.float32), name="cross_entropy")
cross_entropy_sum = tf.reduce_sum(cross_entropy, name="cross_entropy_sum")
# loss op
loss_op = cross_entropy_sum
# gradient pipeline
grads_and_vars = self._opt.compute_gradients(loss_op)
grads_and_vars = [(tf.clip_by_norm(g, self._max_grad_norm), v) for g,v in grads_and_vars]
grads_and_vars = [(add_gradient_noise(g), v) for g,v in grads_and_vars]
nil_grads_and_vars = []
for g, v in grads_and_vars:
if v.name in self._nil_vars:
nil_grads_and_vars.append((zero_nil_slot(g), v))
else:
nil_grads_and_vars.append((g, v))
train_op = self._opt.apply_gradients(nil_grads_and_vars, name="train_op")
# predict ops
# the predict op is to get the maximum at the first dimension
# now we want to restrict the predicted words by the words appearing in the context
#logits_np = logits.eval()
#stories_np = self._stories.eval()
#queries_np = self._queries.eval()
#logits2 = tf.reshape(logits, [-1])
#self._stories2 = tf.reshape(self._stories, [-1, sentence_size * memory_size])
#logits2 = tf.gather(logits2, self._stories2)
predict_op = tf.argmax(logits, 1, name="predict_op")
# softmax for the probability distribution
predict_proba_op = tf.nn.softmax(logits, name="predict_proba_op")
# log of the probability distribution
predict_log_proba_op = tf.log(predict_proba_op, name="predict_log_proba_op")
# assign ops
self.loss_op = loss_op
self.predict_op = predict_op
self.predict_proba_op = predict_proba_op
self.predict_log_proba_op = predict_log_proba_op
self.train_op = train_op
init_op = tf.initialize_all_variables()
self._sess = session
self._sess.run(init_op)
def _build_inputs(self):
# number of stories (variable), number of memory units, number of words in a sentence
self._stories = tf.placeholder(tf.int32, [None, self._memory_size, self._sentence_size], name="stories")
# number of queries (variable), number of words in a sentence
self._queries = tf.placeholder(tf.int32, [None, self._sentence_size], name="queries")
# should be a probability distribution of vocabulary
self._answers = tf.placeholder(tf.int32, [None, self._vocab_size], name="answers")
def _build_vars(self):
with tf.variable_scope(self._name):
nil_word_slot = tf.zeros([1, self._embedding_size])
# A is an embedding matrix
A = tf.concat(0, [ nil_word_slot, self._init([self._vocab_size-1, self._embedding_size]) ])
# B is another embedding matrix
B = tf.concat(0, [ nil_word_slot, self._init([self._vocab_size-1, self._embedding_size]) ])
self.A = tf.Variable(A, name="A")
self.B = tf.Variable(B, name="B")
# TA is the temporal matrix with shape memory_size * embedding_size
self.TA = tf.Variable(self._init([self._memory_size, self._embedding_size]), name='TA')
self.H = tf.Variable(self._init([self._embedding_size, self._embedding_size]), name="H")
# W is the output matrix
self.W = tf.Variable(self._init([self._embedding_size, self._vocab_size]), name="W")
self._nil_vars = set([self.A.name, self.B.name])
# get the predicted answer
def _inference(self, stories, queries):
with tf.variable_scope(self._name):
# look up the queries in the embedding matrix
# the queries are transformed into the embedding format instead of the indices
q_emb = tf.nn.embedding_lookup(self.B, queries)
# position encoding
u_0 = tf.reduce_sum(q_emb * self._encoding, 1)
u = [u_0]
for _ in range(self._hops):
# get the embeddings of the stories
m_emb = tf.nn.embedding_lookup(self.A, stories)
# m_i = sum(A x_ij) + T_A(i)
# reduce the sum along the second dimension
m = tf.reduce_sum(m_emb * self._encoding, 2) + self.TA
# hack to get around no reduce_dot
u_temp = tf.transpose(tf.expand_dims(u[-1], -1), [0, 2, 1])
dotted = tf.reduce_sum(m * u_temp, 2)
# Calculate probabilities
probs = tf.nn.softmax(dotted)
probs_temp = tf.transpose(tf.expand_dims(probs, -1), [0, 2, 1])
c_temp = tf.transpose(m, [0, 2, 1])
o_k = tf.reduce_sum(c_temp * probs_temp, 2)
u_k = tf.matmul(u[-1], self.H) + o_k
# nonlinearity
if self._nonlin:
u_k = nonlin(u_k)
u.append(u_k)
res = tf.matmul(u_k, self.W)
return res #tf.matmul(u_k, self.W)
def batch_fit(self, stories, queries, answers):
"""Runs the training algorithm over the passed batch
Args:
stories: Tensor (None, memory_size, sentence_size)
queries: Tensor (None, sentence_size)
answers: Tensor (None, vocab_size)
Returns:
loss: floating-point number, the loss computed for the batch
"""
feed_dict = {self._stories: stories, self._queries: queries, self._answers: answers}
loss, _ = self._sess.run([self.loss_op, self.train_op], feed_dict=feed_dict)
return loss
def predict(self, stories, queries):
"""Predicts answers as one-hot encoding.
Args:
stories: Tensor (None, memory_size, sentence_size)
queries: Tensor (None, sentence_size)
Returns:
answers: Tensor (None, vocab_size)
"""
feed_dict = {self._stories: stories, self._queries: queries}
# predict_op = tf.argmax(logits, 1, name="predict_op")
res = self._sess.run(self.predict_op, feed_dict=feed_dict)
return res
def test_predict(self, stories, queries, context_vocab):
feed_dict = {self._stories: stories, self._queries: queries}
logits = self._inference(self._stories, self._queries) # (batch_size, vocab_size)
logits = tf.add(logits, context_vocab)
test_predict_op = tf.argmax(logits, 1, name="predict_op")
res = self._sess.run(test_predict_op, feed_dict=feed_dict)
return res
def predict_proba(self, stories, queries):
"""Predicts probabilities of answers.
Args:
stories: Tensor (None, memory_size, sentence_size)
queries: Tensor (None, sentence_size)
Returns:
answers: Tensor (None, vocab_size)
"""
feed_dict = {self._stories: stories, self._queries: queries}
return self._sess.run(self.predict_proba_op, feed_dict=feed_dict)
def predict_log_proba(self, stories, queries):
"""Predicts log probabilities of answers.
Args:
stories: Tensor (None, memory_size, sentence_size)
queries: Tensor (None, sentence_size)
Returns:
answers: Tensor (None, vocab_size)
"""
feed_dict = {self._stories: stories, self._queries: queries}
return self._sess.run(self.predict_log_proba_op, feed_dict=feed_dict)
|
ZeweiChu/memn2n
|
memn2n/memn2n.py
|
Python
|
mit
| 12,164
|
[
"Gaussian"
] |
88d2999379a2ec882d4150a861af822c24d01cd9982f0063085c42bc1c81ccba
|
# -*- coding=utf-8 -*-
"""functional testing for bioweb application"""
import sys
import os
import time
import unittest
import re
import subprocess
from splinter import Browser
from selenium.common.exceptions import NoAlertPresentException, NoSuchElementException
##find_by_type
def findByType(browser, ident, type):
if type != 'css' and type != 'xpath' and type != 'tag' and type != 'id' and type != 'text' and type != 'name' and type != 'href':
error = "Improper search method " + str(type)
self.assertTrue(False, error)
if(type == 'css'):
return browser.find_by_css(ident)
elif(type == 'xpath'):
return browser.find_by_xpath(ident)
elif(type == 'tag'):
return browser.find_by_tag(ident)
elif(type == 'text'):
return browser.find_by_text(ident)
elif(type == 'id'):
return browser.find_by_id(ident)
elif(type == 'name'):
return browser.find_by_name(ident)
elif(type == 'href'):
return browser.find_link_by_href(ident)
return
## @brief test-cases
class TestFunctionalBioweb(unittest.TestCase):
## Browser used for testing - default Google Chrome
browser = ''
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(self):
pass
def setUp(self):
pass
def tearDown(self):
pass
def clickMenuLink(self, ident, interval=0.1, maxTime=1.0, type='css'):
"""Searches for an identifier and clicks it. Search method is provided by 'type' argument
- either css, xpath, tag, text, id, href or name."""
counter = 0.0
link = None
while counter < maxTime and link is None:
try:
link = findByType(self.browser, ident, type)
except:
time.sleep(interval)
counter += interval
self.assertIsNotNone(link, "Cannot find link with ident='{css}' in {brow}".format(css=ident, brow='self.browser'))
link.first.click()
def findElement(self, ident, interval=0.1, maxTime=1, type='css'):
"""Searches for an identifier and returns it. Search method is provided by 'type' argument
- either css, xpath, tag, text, id, href or name."""
counter = 0.0
link = None
while counter < maxTime and link is None:
try:
link = findByType(self.browser, ident, type)
except:
time.sleep(interval)
counter += interval
self.assertIsNotNone(link, "Cannot find link with ident='{css}' in {brow}".format(css=ident, brow='self.browser'))
return link.first
def test01AnyAnswer(self):
"""tests if the application is loaded"""
self.assertTrue(len(self.browser.html) > 0)
def test02ProperTitleAndLogo(self):
"""tests if the web page title and logo is correct"""
title = self.browser.title
if not isinstance(title, str):
title = title.decode()
self.assertEqual(title, u'MyApp')
def test03TabTranslations(self):
"""test if translations works"""
self.clickMenuLink('#a_lang_en')
self.assertEqual(self.findElement('server_time', type='id').text[:len('server time:')], u'server time:')
self.assertEqual(self.findElement('server_version', type='id').text[:len('server version:')], u'server version:')
self.assertEqual(self.findElement('db_version', type='id').text[:len('db version:')], u'db version:')
self.assertEqual(self.findElement('client_version', type='id').text[:len('client version:')], u'client version:')
self.assertEqual(self.findElement('cpp_get_number', type='id').text[:len('C++ getNumber() result:')], u'C++ getNumber() result:')
self.clickMenuLink('#a_lang_pl')
self.assertEqual(self.findElement('server_time', type='id').text[:len('czas serwera:')], u'czas serwera:')
self.assertEqual(self.findElement('server_version', type='id').text[:len('wersja serwera:')], u'wersja serwera:')
self.assertEqual(self.findElement('db_version', type='id').text[:len('wersja bazy danych:')], u'wersja bazy danych:')
self.assertEqual(self.findElement('client_version', type='id').text[:len('wersja klienta:')], u'wersja klienta:')
self.assertEqual(self.findElement('cpp_get_number', type='id').text[:len('C++ getNumber() result:')], u'C++ getNumber() result:')
def test04About(self):
"""test 'about' page"""
server_time = self.browser.find_by_id('server_time_val').first.text
self.assertTrue(len(server_time) > 0)
self.assertTrue(len(self.findElement('server_version_val', type='id').text) > 0)
self.assertTrue(len(self.findElement('db_version_val', type='id').text) > 0)
self.assertTrue(len(self.findElement('client_version_val', type='id').text) > 0)
server_time_after = server_time
counter = 0
while server_time_after == server_time and counter < 10:
server_time_after = self.browser.find_by_id('server_time_val').first.text
time.sleep(1)
counter += 1
self.assertNotEqual(server_time, server_time_after)
def test05CppCommands(self):
"""test new command creation"""
self.assertTrue(self.findElement('cpp_commands_number_val', type='id').text, "0");
self.clickMenuLink('cpp_new_command_button', type='id');
time.sleep(1)
self.assertTrue(self.findElement('cpp_commands_number_val', type='id').text, "1");
self.assertTrue(self.findElement('cpp_command_id', type='id').text, "1");
self.clickMenuLink('cpp_new_command_button', type='id');
time.sleep(1)
self.assertTrue(self.findElement('cpp_commands_number_val', type='id').text, "2");
if __name__ == "__main__":
## Browser used in the tests
www_browser = 'chrome'
## Webpage address
www_addr = '127.0.0.1'
## Port used
www_port = '9000'
## Test mode - f for localhost, g for demo server
mode = ''
if len(sys.argv) == 4:
www_browser = sys.argv[1]
www_addr = sys.argv[2]
www_port = sys.argv[3]
if www_browser == 'google-chrome' or www_browser == 'google-chrome-stable':
www_browser = 'chrome' # Drivers only recognize 'chrome' as a name
browser = Browser(www_browser)
browser.driver.maximize_window()
address = 'http://' + www_addr + ':' + www_port
browser.visit(address)
# setting up the class
TestFunctionalBioweb.browser = browser
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestFunctionalBioweb))
try:
unittest.TextTestRunner(verbosity=3).run(suite)
finally:
pass
browser.quit()
|
mingless/bayesian_webclass
|
bioweb/functional_tests.py
|
Python
|
mit
| 6,935
|
[
"VisIt"
] |
a5c90e970cbbda23cd8f4aff2f339cbb2e3ada5667a89375a37930953c2091bf
|
# Copyright (C) 2017
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
**********************************
espressopp.interaction.ConstrainRG
**********************************
This class calculates forces acting on constrained radii of gyration of subchains [Zhang_2014]_.
Subchains are defined as a tuple list.
.. math:: U = k_{rg} \left(R_{g}^2 - {R_{g}^{ideal}}^2\right)^2
where :math:`R_{g}^{ideal}` stands for the desired radius of gyration of subchain.
This class set 2 conditions on a tuple list. defining subchains.
1. The length of all tuples must be the same.
2. int(key particle id / The length of a tuple) must not be redundantly, where key particle id is the smallest particle id in a tuple.
.. function:: espressopp.interaction.ConstrainRG(k_rg)
:param k_rg: (default: 100.)
:type k_rg: real
.. function:: espressopp.interaction.FixedLocalTupleListConstrainRG(system, tuplelist, potential)
:param system:
:param tuplelist:
:param potential:
:type system:
:type tuplelist:
:type potential:
.. function:: espressopp.interaction.FixedLocalTupleListConstrainRG.getPotential()
:rtype:
.. function:: espressopp.interaction.FixedLocalTupleListConstrainRG.setRG(particlelist)
:param particlelist:
:type particlelist: python::list
"""
from espressopp import pmi, infinity
from espressopp.esutil import *
from espressopp.interaction.Potential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_ConstrainRG, interaction_FixedLocalTupleListConstrainRG
class ConstrainRGLocal(PotentialLocal, interaction_ConstrainRG):
def __init__(self, k_rg=100.):
"""Initialize the local ConstrainRG."""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_ConstrainRG, k_rg)
class FixedLocalTupleListConstrainRGLocal(InteractionLocal, interaction_FixedLocalTupleListConstrainRG):
def __init__(self, system, fixedtuplelist, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedLocalTupleListConstrainRG, system, fixedtuplelist, potential)
def getPotential(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self)
def setRG(self, particleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
id = 0
for particle in particleList:
rg = particle[1]**2
self.cxxclass.setRG(self, id, rg)
id = id + 1
if pmi.isController:
class ConstrainRG(Potential):
pmiproxydefs = dict(
cls = 'espressopp.interaction.ConstrainRGLocal',
pmiproperty = ['k_rg'],
)
class FixedLocalTupleListConstrainRG(Interaction, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedLocalTupleListConstrainRGLocal',
pmicall = ['getPotential', 'setRG']
)
|
espressopp/espressopp
|
src/interaction/ConstrainRG.py
|
Python
|
gpl-3.0
| 4,044
|
[
"ESPResSo"
] |
edd0ddaee2d5ecbf0367c801bc55be630f63b1e04f03bd746a098e1b20a81ce5
|
import tensorflow as tf
import numpy as np
import math
class MySimpleModel(object):
def __init__(self, resize, label_size):
# session init
self.sess = tf.InteractiveSession()
# variable
self.x = tf.placeholder(tf.float32, shape=[None, resize*resize])
self.y_ = tf.placeholder(tf.float32, shape=[None, label_size])
self.W = tf.Variable(tf.zeros([resize*resize, label_size]))
self.b = tf.Variable(tf.zeros([label_size]))
# output
self.y = tf.nn.softmax(tf.matmul(self.x, self.W) + self.b)
# train value
self.cross_entropy = -tf.reduce_sum(self.y_ * tf.log(self.y))
self.train_step = tf.train.GradientDescentOptimizer(0.1).minimize(self.cross_entropy)
self.correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(self.y_, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, "float"))
# variable initialize
self.sess.run(tf.initialize_all_variables())
def __del__(self):
self.sess.close()
def simple_train(self, pre_bat, bat):
for i in range(4):
self.train_step.run({self.x: pre_bat[0], self.y_: pre_bat[1]})
print self.accuracy.eval(feed_dict={self.x: bat[0], self.y_: bat[1]})
res = self.y.eval(feed_dict={self.x: bat[0]})
maxidx = self.sess.run(tf.argmax(res, 1)[0])
print res, maxidx
return maxidx, res[0, maxidx]
def backpropa_train(self, bat):
for i in range(4):
self.train_step.run({self.x: bat[0], self.y_: bat[1]})
def feedforward(self, tfimage):
res = self.y.eval(feed_dict={self.x: tfimage})
maxidx = self.sess.run(tf.argmax(res, 1)[0])
print '%f %' % res[0, maxidx]
return maxidx, res[0, maxidx]
class MyTfModel(object):
def __init__(self, resize, label_size, conv):
# session
self.sess = tf.InteractiveSession()
# variable
self.x = tf.placeholder(tf.float32, shape=[None, resize*resize])
self.y_ = tf.placeholder(tf.float32, shape=[None, label_size])
self.W = tf.Variable(tf.zeros([resize*resize, label_size]))
self.b = tf.Variable(tf.zeros([label_size]))
self.x_img = tf.reshape(self.x, [-1, resize, resize, 1])
# convolution layer, 32 output
self.W_conv = self.weight_var([conv, conv, 1, 32], resize)
self.b_conv = self.bias_var([32])
# hidden layer
self.h_conv = tf.nn.relu(self.conv2d(self.x_img, self.W_conv) + self.b_conv)
self.h_pool = self.max_pool_2x2(self.h_conv)
# fully-connected layer, 1024 neuron
self.W_fc = self.weight_var([resize*resize/4 * 32, 1024], resize)
self.b_fc = self.bias_var([1024])
self.h_pool_flat = tf.reshape(self.h_pool, [-1, resize*resize/4 * 32])
self.h_fc = tf.nn.relu(tf.matmul(self.h_pool_flat, self.W_fc) + self.b_fc)
# readout layer
self.W_ro = self.weight_var([1024, label_size], resize)
self.b_ro = self.bias_var([label_size])
# output
self.y_conv = tf.nn.softmax(tf.matmul(self.h_fc, self.W_ro) + self.b_ro)
# training
self.cross_entropy = -tf.reduce_sum(self.y_ * tf.log(self.y_conv))
self.train_step= tf.train.GradientDescentOptimizer(0.1).minimize(self.cross_entropy)
self.correct_prediction = tf.equal(tf.argmax(self.y_conv, 1), tf.argmax(self.y_, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
# graph initialize
self.sess.run(tf.initialize_all_variables())
def __del__(self):
self.sess.close()
def weight_var(self, shape, resize):
initial = tf.truncated_normal(shape, stddev=(1.0 / float(resize*resize)))
return tf.Variable(initial)
def bias_var(self, shape):
initial = tf.constant(0.001, shape=shape)
return tf.Variable(initial)
def conv2d(self, x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(self, x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def tf_train(self, pre_bat, bat):
for i in range(4):
self.train_step.run(feed_dict={self.x: pre_bat[0], self.y_:pre_bat[1]})
print self.accuracy.eval(feed_dict={self.x: bat[0], self.y_: bat[1]})
res = self.y_conv.eval(feed_dict={self.x: bat[0]})
maxidx = self.sess.run(tf.argmax(res, 1)[0])
print res, maxidx
return maxidx, res[0, maxidx]
def backpropa_train(self, bat):
for i in range(3):
self.train_step.run({self.x: bat[0], self.y_: bat[1]})
def feedforward(self, tfimage):
res = self.y.eval(feed_dict={self.x: tfimage})
maxidx = self.sess.run(tf.argmax(res, 1)[0])
print '%f %' % res[0, maxidx]
return maxidx, res[0, maxidx]
|
lastone9182/flask
|
static/uploads/backup/mymodel.py
|
Python
|
mit
| 5,075
|
[
"NEURON"
] |
c00352aee226f3065e868f81ae94b1bc4eee6692ee5a45cab315e64b4d57cd7e
|
#!/usr/bin/env python
########################################################################
# File : dirac-wms-get-wn
# Author : Philippe Charpentier
########################################################################
"""
Get WNs for a selection of jobs
"""
import datetime
from functools import cmp_to_key
import DIRAC
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
@Script()
def main():
site = "BOINC.World.org"
status = ["Running"]
minorStatus = None
workerNodes = None
since = None
date = "today"
full = False
until = None
batchIDs = None
Script.registerSwitch("", "Site=", " Select site (default: %s)" % site)
Script.registerSwitch("", "Status=", " Select status (default: %s)" % status)
Script.registerSwitch("", "MinorStatus=", " Select minor status")
Script.registerSwitch("", "WorkerNode=", " Select WN")
Script.registerSwitch("", "BatchID=", " Select batch jobID")
Script.registerSwitch("", "Since=", " Date since when to select jobs, or number of days (default: today)")
Script.registerSwitch("", "Date=", " Specify the date (check for a full day)")
Script.registerSwitch("", "Full", " Printout full list of job (default: False except if --WorkerNode)")
Script.parseCommandLine()
from DIRAC import gLogger
from DIRAC.Interfaces.API.Dirac import Dirac
from DIRAC.WorkloadManagementSystem.Client.JobMonitoringClient import JobMonitoringClient
switches = Script.getUnprocessedSwitches()
for switch in switches:
if switch[0] == "Site":
site = switch[1]
elif switch[0] == "MinorStatus":
minorStatus = switch[1]
elif switch[0] == "Status":
if switch[1].lower() == "all":
status = [None]
else:
status = switch[1].split(",")
elif switch[0] == "WorkerNode":
workerNodes = switch[1].split(",")
elif switch[0] == "BatchID":
try:
batchIDs = [int(id) for id in switch[1].split(",")]
except Exception:
gLogger.error("Invalid jobID", switch[1])
DIRAC.exit(1)
elif switch[0] == "Full":
full = True
elif switch[0] == "Date":
since = switch[1].split()[0]
until = str(datetime.datetime.strptime(since, "%Y-%m-%d") + datetime.timedelta(days=1)).split()[0]
elif switch[0] == "Since":
date = switch[1].lower()
if date == "today":
since = None
elif date == "yesterday":
since = 1
elif date == "ever":
since = 2 * 365
elif date.isdigit():
since = int(date)
date += " days"
else:
since = date
if isinstance(since, int):
since = str(datetime.datetime.now() - datetime.timedelta(days=since)).split()[0]
if workerNodes or batchIDs:
# status = [None]
full = True
monitoring = JobMonitoringClient()
dirac = Dirac()
# Get jobs according to selection
jobs = set()
for stat in status:
res = dirac.selectJobs(site=site, date=since, status=stat, minorStatus=minorStatus)
if not res["OK"]:
gLogger.error("Error selecting jobs", res["Message"])
DIRAC.exit(1)
allJobs = set(int(job) for job in res["Value"])
if until:
res = dirac.selectJobs(site=site, date=until, status=stat)
if not res["OK"]:
gLogger.error("Error selecting jobs", res["Message"])
DIRAC.exit(1)
allJobs -= set(int(job) for job in res["Value"])
jobs.update(allJobs)
if not jobs:
gLogger.always("No jobs found...")
DIRAC.exit(0)
# res = monitoring.getJobsSummary( jobs )
# print eval( res['Value'] )[jobs[0]]
allJobs = set()
result = {}
wnJobs = {}
gLogger.always("%d jobs found" % len(jobs))
# Get host name
for job in jobs:
res = monitoring.getJobParameter(job, "HostName")
node = res.get("Value", {}).get("HostName", "Unknown")
res = monitoring.getJobParameter(job, "LocalJobID")
batchID = res.get("Value", {}).get("LocalJobID", "Unknown")
if workerNodes:
if not [wn for wn in workerNodes if node.startswith(wn)]:
continue
allJobs.add(job)
if batchIDs:
if batchID not in batchIDs:
continue
allJobs.add(job)
if full or status == [None]:
allJobs.add(job)
result.setdefault(job, {})["Status"] = status
result[job]["Node"] = node
result[job]["LocalJobID"] = batchID
wnJobs[node] = wnJobs.setdefault(node, 0) + 1
# If necessary get jobs' status
statusCounters = {}
if allJobs:
allJobs = sorted(allJobs, reverse=True)
res = monitoring.getJobsStates(allJobs)
if not res["OK"]:
gLogger.error("Error getting job parameter", res["Message"])
else:
jobStates = res["Value"]
for job in allJobs:
stat = (
jobStates.get(job, {}).get("Status", "Unknown")
+ "; "
+ jobStates.get(job, {}).get("MinorStatus", "Unknown")
+ "; "
+ jobStates.get(job, {}).get("ApplicationStatus", "Unknown")
)
result[job]["Status"] = stat
statusCounters[stat] = statusCounters.setdefault(stat, 0) + 1
elif not workerNodes and not batchIDs:
allJobs = sorted(jobs, reverse=True)
# Print out result
if workerNodes or batchIDs:
gLogger.always("Found %d jobs at %s, WN %s (since %s):" % (len(allJobs), site, workerNodes, date))
if allJobs:
gLogger.always("List of jobs:", ",".join([str(job) for job in allJobs]))
else:
if status == [None]:
gLogger.always("Found %d jobs at %s (since %s):" % (len(allJobs), site, date))
for stat in sorted(statusCounters):
gLogger.always("%d jobs %s" % (statusCounters[stat], stat))
else:
gLogger.always("Found %d jobs %s at %s (since %s):" % (len(allJobs), status, site, date))
gLogger.always(
"List of WNs:",
",".join(
[
"%s (%d)" % (node, wnJobs[node])
for node in sorted(wnJobs, key=cmp_to_key(lambda n1, n2: (wnJobs[n2] - wnJobs[n1])))
]
),
)
if full:
if workerNodes or batchIDs:
nodeJobs = {}
for job in allJobs:
status = result[job]["Status"]
node = result[job]["Node"].split(".")[0]
jobID = result[job].get("LocalJobID")
nodeJobs.setdefault(node, []).append((jobID, job, status))
if not workerNodes:
workerNodes = sorted(nodeJobs)
for node in workerNodes:
for job in nodeJobs.get(node.split(".")[0], []):
gLogger.always("%s " % node + "(%s): %s - %s" % job)
else:
for job in allJobs:
status = result[job]["Status"]
node = result[job]["Node"]
jobID = result[job].get("LocalJobID")
gLogger.always("%s (%s): %s - %s" % (node, jobID, job, status))
if __name__ == "__main__":
main()
|
ic-hep/DIRAC
|
src/DIRAC/WorkloadManagementSystem/scripts/dirac_wms_get_wn.py
|
Python
|
gpl-3.0
| 7,583
|
[
"DIRAC"
] |
784e4c4af2ae4d41f110cde836dcbc2b36c41df5749e1f0f845fbf27dd074d01
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Core scripts for the OrcaNet package.
"""
import os
import toml
import warnings
import time
from datetime import timedelta
import keras as ks
import orcanet.backend as backend
from orcanet.utilities.visualization import update_summary_plot
from orcanet.in_out import IOHandler
from orcanet.history import HistoryHandler
from orcanet.utilities.nn_utilities import load_zero_center_data, get_auto_label_modifier
import orcanet.logging as olog
class Organizer:
"""
Core class for working with networks in OrcaNet.
Attributes
----------
cfg : Configuration
Contains all configurable options.
io : orcanet.in_out.IOHandler
Utility functions for accessing the info in cfg.
history : orcanet.in_out.HistoryHandler
For reading and plotting data from the log files created
during training.
"""
def __init__(self, output_folder,
list_file=None,
config_file=None,
tf_log_level=None):
"""
Set the attributes of the Configuration object.
Instead of using a config_file, the attributes of orga.cfg can
also be changed directly, e.g. by calling orga.cfg.batchsize.
Parameters
----------
output_folder : str
Name of the folder of this model in which everything will be saved,
e.g., the summary.txt log file is located in here.
Will be used to load saved files or to save new ones.
list_file : str, optional
Path to a toml list file with pathes to all the h5 files that should
be used for training and validation.
Will be used to extract samples and labels.
config_file : str, optional
Path to a toml config file with settings that are used instead of
the default ones.
tf_log_level : int/str
Sets the TensorFlow CPP_MIN_LOG_LEVEL environment variable.
0 = all messages are logged (default behavior).
1 = INFO messages are not printed.
2 = INFO and WARNING messages are not printed.
3 = INFO, WARNING, and ERROR messages are not printed.
"""
if tf_log_level is not None:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(tf_log_level)
self.cfg = Configuration(output_folder, list_file, config_file)
self.io = IOHandler(self.cfg)
self.history = HistoryHandler(output_folder)
self.xs_mean = None
self._auto_label_modifier = None
self._stored_model = None
def train_and_validate(self, model=None, epochs=None):
"""
Train a model and validate according to schedule.
The various settings of this process can be controlled with the
attributes of orca.cfg.
The model will be trained on the given data, saved and validated.
Logfiles of the training are saved in the output folder.
Plots showing the training and validation history, as well as
the weights and activations of the network are generated in
the plots subfolder after every validation.
The training can be resumed by executing this function again.
Parameters
----------
model : ks.models.Model or str, optional
Compiled keras model to use for training. Required for the first
epoch (the start of training).
Can also be the path to a saved keras model, which will be laoded.
If model is None, the most recent saved model will be
loaded automatically to continue the training.
epochs : int, optional
How many epochs should be trained by running this function.
None for infinite.
Returns
-------
model : ks.models.Model
The trained keras model.
"""
latest_epoch = self.io.get_latest_epoch()
model = self._get_model(model, logging=False)
self._stored_model = model
# check if the validation is missing for the latest fileno
if latest_epoch is not None:
state = self.history.get_state()[-1]
if state["is_validated"] is False and self.val_is_due(latest_epoch):
self.validate()
next_epoch = self.io.get_next_epoch(latest_epoch)
n_train_files = self.io.get_no_of_files("train")
trained_epochs = 0
while epochs is None or trained_epochs < epochs:
# Train on remaining files
for file_no in range(next_epoch[1], n_train_files + 1):
curr_epoch = (next_epoch[0], file_no)
self.train(model)
if self.val_is_due(curr_epoch):
self.validate()
next_epoch = (next_epoch[0] + 1, 1)
trained_epochs += 1
self._stored_model = None
return model
def train(self, model=None):
"""
Trains a model on the next file.
The progress of the training is also logged and plotted.
Parameters
----------
model : ks.models.Model or str, optional
Compiled keras model to use for training. Required for the first
epoch (the start of training).
Can also be the path to a saved keras model, which will be laoded.
If model is None, the most recent saved model will be
loaded automatically to continue the training.
Returns
-------
history : dict
The history of the training on this file. A record of training
loss values and metrics values.
"""
# Create folder structure
self.io.get_subfolder(create=True)
latest_epoch = self.io.get_latest_epoch()
model = self._get_model(model, logging=True)
self._set_up(model, logging=True)
# epoch about to be trained
next_epoch = self.io.get_next_epoch(latest_epoch)
next_epoch_float = self.io.get_epoch_float(*next_epoch)
if latest_epoch is None:
self.io.check_connections(model)
olog.log_start_training(self)
model_path = self.io.get_model_path(*next_epoch)
model_path_local = self.io.get_model_path(*next_epoch, local=True)
if os.path.isfile(model_path):
raise FileExistsError(
"Can not train model in epoch {} file {}, this model has "
"already been saved!".format(*next_epoch))
smry_logger = olog.SummaryLogger(self, model)
lr = self.io.get_learning_rate(next_epoch)
ks.backend.set_value(model.optimizer.lr, lr)
files_dict = self.io.get_file("train", next_epoch[1])
line = "Training in epoch {} on file {}/{}".format(
next_epoch[0], next_epoch[1], self.io.get_no_of_files("train"))
self.io.print_log(line)
self.io.print_log("-" * len(line))
self.io.print_log("Learning rate is at {}".format(
ks.backend.get_value(model.optimizer.lr)))
self.io.print_log('Inputs and files:')
for input_name, input_file in files_dict.items():
self.io.print_log(" {}: \t{}".format(input_name,
os.path.basename(
input_file)))
start_time = time.time()
history = backend.train_model(self, model, next_epoch, batch_logger=True)
elapsed_s = int(time.time() - start_time)
model.save(model_path)
smry_logger.write_line(next_epoch_float, lr, history_train=history)
self.io.print_log('Training results:')
for metric_name, loss in history.items():
self.io.print_log(" {}: \t{}".format(metric_name, loss))
self.io.print_log("Elapsed time: {}".format(timedelta(seconds=elapsed_s)))
self.io.print_log("Saved model to: {}\n".format(model_path_local))
update_summary_plot(self)
if self.cfg.cleanup_models:
self.cleanup_models()
return history
def validate(self, make_weight_plots=True):
"""
Validate the most recent saved model on all validation files.
Will also log the progress, as well as update the summary plot and
plot weights and activations of the model.
Returns
-------
history : dict
The history of the validation on all files. A record of validation
loss values and metrics values.
make_weight_plots : bool
If true, generate and save plots of the activations and weights of
the network to the 'plots/' subfolder.
"""
latest_epoch = self.io.get_latest_epoch()
if latest_epoch is None:
raise ValueError("Can not validate: No saved model found")
if self.history.get_state()[-1]["is_validated"] is True:
raise ValueError("Can not validate in epoch {} file {}: "
"Has already been validated".format(*latest_epoch))
if self._stored_model is None:
model = self.load_saved_model(*latest_epoch)
else:
model = self._stored_model
self._set_up(model, logging=True)
epoch_float = self.io.get_epoch_float(*latest_epoch)
smry_logger = olog.SummaryLogger(self, model)
olog.log_start_validation(self)
start_time = time.time()
history = backend.validate_model(self, model)
elapsed_s = int(time.time() - start_time)
self.io.print_log('Validation results:')
for metric_name, loss in history.items():
self.io.print_log(f" {metric_name}: \t{loss}")
self.io.print_log(f"Elapsed time: {timedelta(seconds=elapsed_s)}\n")
smry_logger.write_line(epoch_float, "n/a", history_val=history)
update_summary_plot(self)
if make_weight_plots:
backend.save_actv_wghts_plot(
self, model, latest_epoch, samples=self.cfg.batchsize)
if self.cfg.cleanup_models:
self.cleanup_models()
return history
def predict(self, epoch=None, fileno=None, concatenate=False):
"""
Make a prediction if it does not exist yet, and return its filepath.
Load the model with the lowest validation loss, let it predict on
all samples of the validation set
in the toml list, and save this prediction together with all the
y_values as a h5 file in the predictions subfolder.
Parameters
----------
epoch : int, optional
Epoch of a model to load.
fileno : int, optional
File number of a model to load.
concatenate : bool
Whether the prediction files should also be concatenated.
Returns
-------
pred_filename : List
List to the paths of all created prediction files.
If concatenate = True, the list always only contains the
path to the concatenated prediction file.
"""
if fileno is None and epoch is None:
epoch, fileno = self.history.get_best_epoch_fileno()
print(f"Automatically set epoch to epoch {epoch} file {fileno}.")
elif fileno is None or epoch is None:
raise ValueError(
"Either both or none of epoch and fileno must be None")
is_pred_done = self._check_if_pred_already_done(epoch, fileno)
if is_pred_done:
print("Prediction has already been done.")
pred_filepaths = self.io.get_pred_files_list(epoch, fileno)
else:
model = self.load_saved_model(epoch, fileno, logging=False)
self._set_up(model)
start_time = time.time()
backend.make_model_prediction(self, model, epoch, fileno)
elapsed_s = int(time.time() - start_time)
print('Finished predicting on all validation files.')
print("Elapsed time: {}\n".format(timedelta(seconds=elapsed_s)))
pred_filepaths = self.io.get_pred_files_list(epoch, fileno)
# concatenate all prediction files if wished
concatenated_folder = self.io.get_subfolder("predictions") + '/concatenated'
n_val_files = self.io.get_no_of_files("val")
if concatenate is True and n_val_files > 1:
if not os.path.isdir(concatenated_folder):
print('Concatenating all prediction files to a single one.')
pred_filename_conc = self.io.concatenate_pred_files(concatenated_folder)
pred_filepaths = [pred_filename_conc]
else:
# omit directories if there are any in the concatenated folder
fname_conc_file_list = list(file for file in os.listdir(concatenated_folder)
if os.path.isfile(os.path.join(concatenated_folder,
file)))
pred_filepaths = [concatenated_folder + '/' + fname_conc_file_list[0]]
return pred_filepaths
def inference(self, epoch=None, fileno=None):
"""
Make an inference and return the filepaths.
Load the model with the lowest validation loss, let
it predict on all samples of the inference set
in the toml list, and save this prediction as a h5 file in the
predictions subfolder. y values will only be added if they are in
the input file, so this can be used on un-labelled data as well.
Parameters
----------
epoch : int, optional
Epoch of a model to load.
fileno : int, optional
File number of a model to load.
Returns
-------
filenames : list
List to the paths of all created output files.
"""
if fileno is None and epoch is None:
epoch, fileno = self.history.get_best_epoch_fileno()
print("Automatically set epoch to epoch {} file {}.".format(epoch, fileno))
elif fileno is None or epoch is None:
raise ValueError(
"Either both or none of epoch and fileno must be None")
model = self.load_saved_model(epoch, fileno, logging=False)
self._set_up(model)
filenames = []
for files_dict in self.io.yield_files("inference"):
# output filename is based on name of file in first input
first_filename = os.path.basename(list(files_dict.values())[0])
output_filename = "model_epoch_{}_file_{}_on_{}".format(
epoch, fileno, first_filename)
output_path = os.path.join(self.io.get_subfolder("predictions"),
output_filename)
filenames.append(output_path)
if os.path.exists(output_path):
warnings.warn("Warning: {} exists already, skipping "
"file".format(output_filename))
continue
start_time = time.time()
backend.h5_inference(
self, model, files_dict, output_path, use_def_label=False)
elapsed_s = int(time.time() - start_time)
print('Finished on file {} in {}'.format(
first_filename, timedelta(seconds=elapsed_s)))
return filenames
def cleanup_models(self):
"""
Delete all models except for the the most recent one (to continue
training), and the ones with the highest and lowest loss/metrics.
"""
all_epochs = self.io.get_all_epochs()
epochs_to_keep = {self.io.get_latest_epoch(), }
try:
for metric in self.history.get_metrics():
epochs_to_keep.add(
self.history.get_best_epoch_fileno(
metric=f"val_{metric}", mini=True))
epochs_to_keep.add(
self.history.get_best_epoch_fileno(
metric=f"val_{metric}", mini=False))
except ValueError:
# no best epoch exists
pass
for epoch in epochs_to_keep:
if epoch not in all_epochs:
warnings.warn(
f"ERROR: keeping_epoch {epoch} not in available epochs {all_epochs}, "
f"skipping clean-up of models!")
return
print("\nClean-up saved models:")
for epoch in all_epochs:
model_path = self.io.get_model_path(epoch[0], epoch[1])
model_name = os.path.basename(model_path)
if epoch in epochs_to_keep:
print("Keeping model {}".format(model_name))
else:
print("Deleting model {}".format(model_name))
os.remove(model_path)
def _check_if_pred_already_done(self, epoch, fileno):
"""
Checks if the prediction has already been done before.
(-> predicted on all validation files)
Returns
-------
pred_done : bool
Boolean flag to specify if the prediction has
already been fully done or not.
"""
latest_pred_file_no = self.io.get_latest_prediction_file_no(epoch, fileno)
total_no_of_val_files = self.io.get_no_of_files('val')
if latest_pred_file_no is None:
pred_done = False
elif latest_pred_file_no == total_no_of_val_files:
return True
else:
pred_done = False
return pred_done
def get_xs_mean(self, logging=False):
"""
Set and return the zero center image for each list input.
Requires the cfg.zero_center_folder to be set. If no existing
image for the given input files is found in the folder, it will
be calculated and saved by averaging over all samples in the
train dataset.
Parameters
----------
logging : bool
If true, the execution of this function will be logged into the
full summary in the output folder if called for the first time.
Returns
-------
dict
Dict of numpy arrays that contains the mean_image of the x dataset
(1 array per list input).
Example format:
{ "input_A" : ndarray, "input_B" : ndarray }
"""
if self.xs_mean is None:
if self.cfg.zero_center_folder is None:
raise ValueError("Can not calculate zero center: "
"No zero center folder given")
self.xs_mean = load_zero_center_data(self, logging=logging)
return self.xs_mean
def load_saved_model(self, epoch, fileno, logging=False):
"""
Load a saved model.
Parameters
----------
epoch : int
Epoch of the saved model.
fileno : int
Fileno of the saved model.
logging : bool
If True, will log this function call into the log.txt file.
Returns
-------
model : keras model
"""
path_of_model = self.io.get_model_path(epoch, fileno)
path_loc = self.io.get_model_path(epoch, fileno, local=True)
self.io.print_log("Loading saved model: " + path_loc, logging=logging)
model = ks.models.load_model(
path_of_model, custom_objects=self.cfg.custom_objects)
return model
def _get_model(self, model, logging=False):
""" Load most recent saved model or use user model. """
latest_epoch = self.io.get_latest_epoch()
if latest_epoch is None:
# new training, log info about model
if model is None:
raise ValueError("You need to provide a compiled keras model "
"for the start of the training! (You gave None)")
elif isinstance(model, str):
# path to a saved model
self.io.print_log("Loading model from " + model, logging=logging)
model = ks.models.load_model(model)
if logging:
self._save_as_json(model)
model.summary(print_fn=self.io.print_log)
try:
plots_folder = self.io.get_subfolder("plots", create=True)
ks.utils.plot_model(model, plots_folder + "/model_plot.png")
except OSError as e:
warnings.warn("Can not plot model: " + str(e))
else:
# resuming training, load model if it is not given
if model is None:
model = self.load_saved_model(*latest_epoch, logging=logging)
elif isinstance(model, str):
# path to a saved model
self.io.print_log("Loading model from " + model, logging=logging)
model = ks.models.load_model(model)
return model
def _save_as_json(self, model):
""" Save the architecture of a model as json to fixed path. """
json_filename = "model_arch.json"
json_string = model.to_json(indent=1)
model_folder = self.io.get_subfolder("saved_models", create=True)
with open(os.path.join(model_folder, json_filename), "w") as f:
f.write(json_string)
def _set_up(self, model, logging=False):
""" Necessary setup for training, validating and predicting. """
if self.cfg.get_list_file() is None:
raise ValueError("No files specified. Need to load a toml "
"list file with pathes to h5 files first.")
if self.cfg.label_modifier is None:
self._auto_label_modifier = get_auto_label_modifier(model)
if self.cfg.zero_center_folder is not None:
self.get_xs_mean(logging)
def val_is_due(self, epoch=None):
"""
True if validation is due on given epoch according to schedule.
Does not check if it has been done already.
"""
if epoch is None:
epoch = self.io.get_latest_epoch()
n_train_files = self.io.get_no_of_files("train")
val_sched = (epoch[1] == n_train_files) or \
(self.cfg.validate_interval is not None and
epoch[1] % self.cfg.validate_interval == 0)
return val_sched
class Configuration(object):
"""
Contains all the configurable options in the OrcaNet scripts.
All of these public attributes (the ones without a
leading underscore) can be changed either directly or with a
.toml config file via the method update_config().
Attributes
----------
batchsize : int
Batchsize that will be used for the training and validation of
the network.
callback_train : keras callback or list or None
Callback or list of callbacks to use during training.
class_weight : dict or None
class_weigth argument of fit_generator:
Optional dictionary mapping class indices (integers) to a weight
(float) value, used for weighting the loss function (during
training only). This can be useful to tell the model to
"pay more attention" to samples from an under-represented class.
cleanup_models : bool
If true, will only keep the best (in terms of val loss) and the most
recent from all saved models in order to save disk space.
custom_objects : dict or None
Optional dictionary mapping names (strings) to custom classes or
functions to be considered by keras during deserialization of models.
dataset_modifier : function or None
For orga.predict: Function that determines which datasets get created
in the resulting h5 file. If none, every output layer will get one
dataset each for both the label and the prediction, and one dataset
containing the y_values from the validation files.
key_x_values : str
The name of the datagroup in the h5 input files which contains
the samples for the network.
key_y_values : str
The name of the datagroup in the h5 input files which contains
the info for the labels.
label_modifier : function or None
Operation to be performed on batches of y_values read from the input
files before they are fed into the model as labels. If None is given,
all y_values with the same name as the output layers will be passed
to the model as a dict, with the keys being the dtype names.
learning_rate : float, tuple, function or str
The learning rate for the training.
If it is a float: The learning rate will be constantly this value.
If it is a tuple of two floats: The first float gives the learning rate
in epoch 1 file 1, and the second float gives the decrease of the
learning rate per file (e.g. 0.1 for 10% decrease per file).
If it is a function: Takes as an input the epoch and the
file number (in this order), and returns the learning rate.
If it is a str: Path to a csv file inside the main folder, containing
3 columns with the epoch, fileno, and the value the lr will be set
to when reaching this epoch/fileno.
max_queue_size : int
max_queue_size option of the keras training and evaluation generator
methods. How many batches get preloaded
from the generator.
n_events : None or int
For testing purposes. If not the whole .h5 file should be used for
training, define the number of samples.
sample_modifier : function or None
Operation to be performed on batches of x_values read from the input
files before they are fed into the model as samples.
shuffle_train : bool
If true, the order in which batches are read out from the files during
training are randomized each time they are read out.
train_logger_display : int
How many batches should be averaged for one line in the training log files.
train_logger_flush : int
After how many lines the training log file should be flushed (updated on
the disk). -1 for flush at the end of the file only.
output_folder : str
Name of the folder of this model in which everything will be saved,
e.g., the summary.txt log file is located in here.
use_scratch_ssd : bool
Only working at HPC Erlangen: Declares if the input files should be
copied to the node-local SSD scratch space.
validate_interval : int or None
Validate the model after this many training files have been trained on
in an epoch. There will always be a validation at the end of an epoch.
None for only validate at the end of an epoch.
Example: validate_interval=3 --> Validate after file 3, 6, 9, ...
verbose_train : int
verbose option of keras.model.fit_generator.
0 = silent, 1 = progress bar, 2 = one line per epoch.
verbose_val : int
verbose option of evaluate_generator.
0 = silent, 1 = progress bar.
zero_center_folder : None or str
Path to a folder in which zero centering images are stored.
If this path is set, zero centering images for the given dataset will
either be calculated and saved automatically at the start of the
training, or loaded if they have been saved before.
"""
# TODO add a clober script that properly deletes models + logfiles
def __init__(self, output_folder, list_file=None, config_file=None, **kwargs):
"""
Set the attributes of the Configuration object.
Values are loaded from the given files, if provided. Otherwise, default
values are used.
Parameters
----------
output_folder : str
Name of the folder of this model in which everything will be saved,
e.g., the summary.txt log file is located in here.
list_file : str or None
Path to a toml list file with pathes to all the h5 files that should
be used for training and validation.
config_file : str or None
Path to a toml config file with attributes that are used instead of
the default ones.
kwargs
Overwrites the values given in the config file.
"""
self.batchsize = 64
self.learning_rate = 0.001
self.zero_center_folder = None
self.validate_interval = None
self.cleanup_models = False
self.class_weight = None
self.sample_modifier = None
self.dataset_modifier = None
self.label_modifier = None
self.key_x_values = "x"
self.key_y_values = "y"
self.custom_objects = None
self.shuffle_train = False
self.callback_train = None
self.use_scratch_ssd = False
self.verbose_train = 1
self.verbose_val = 0
self.n_events = None
self.max_queue_size = 10
self.train_logger_display = 100
self.train_logger_flush = -1
self._default_values = dict(self.__dict__)
# Main folder:
if output_folder[-1] == "/":
self.output_folder = output_folder
else:
self.output_folder = output_folder+"/"
# Private attributes:
self._files_dict = {
"train": None,
"val": None,
"inference": None,
}
self._list_file = None
# Load the optionally given list and config files.
if list_file is not None:
self.import_list_file(list_file)
if config_file is not None:
self.update_config(config_file)
# set given kwargs:
for key, val in kwargs.items():
if hasattr(self, key):
setattr(self, key, val)
else:
raise AttributeError(
"Unknown attribute {}".format(key))
def import_list_file(self, list_file):
"""
Import the filepaths of the h5 files from a toml list file.
Parameters
----------
list_file : str
Path to the toml list file.
"""
if self._list_file is not None:
raise ValueError("Can not load list file: Has already been loaded! "
"({})".format(self._list_file))
file_content = toml.load(list_file)
name_mapping = {
"train_files": "train",
"validation_files": "val",
"inference_files": "inference",
}
for toml_name, files_dict_name in name_mapping.items():
files = self._extract_filepaths(file_content, toml_name)
self._files_dict[files_dict_name] = files or None
self._list_file = list_file
@staticmethod
def _extract_filepaths(file_content, which):
"""
Get train/val/inf filepaths for different inputs from a toml readout.
Makes sure that all input have the same number of files.
"""
allowed_which = ["train_files", "validation_files", "inference_files"]
assert which in allowed_which
files = {}
n_files = []
for input_key, input_values in file_content.items():
for key in input_values.keys():
if key not in allowed_which:
raise NameError(
f"Unknown argument '{key}' in toml file: "
f"Must be either of {allowed_which}")
if which in input_values:
files_input = tuple(input_values[which])
files[input_key] = files_input
n_files.append(len(files_input))
if n_files and n_files.count(n_files[0]) != len(n_files):
raise ValueError(
"Input with different number of {} in toml list".format(which))
return files
def update_config(self, config_file):
"""
Update the default cfg parameters with values from a toml config file.
Parameters
----------
config_file : str
Path to a toml config file.
"""
user_values = toml.load(config_file)["config"]
for key in user_values:
if hasattr(self, key):
setattr(self, key, user_values[key])
else:
raise AttributeError(
"Unknown attribute {} in config file ".format(key))
def get_list_file(self):
"""
Returns the path to the list file that was used to set the training
and validation files. None if no list file has been used.
"""
return self._list_file
def get_files(self, which):
"""
Get the training or validation file paths for each list input set.
Parameters
----------
which : str
Either "train", "val" or "inference".
Returns
-------
dict
A dict containing the paths to the training or validation files on
which the model will be trained on. Example for the format for
two input sets with two files each:
{
"input_A" : ('path/to/set_A_file_1.h5', 'path/to/set_A_file_2.h5'),
"input_B" : ('path/to/set_B_file_1.h5', 'path/to/set_B_file_2.h5'),
}
"""
if which not in self._files_dict.keys():
raise NameError("Unknown fileset name ", which)
if self._files_dict[which] is None:
raise AttributeError("No {} files have been specified!".format(which))
return self._files_dict[which]
@property
def default_values(self):
""" The default values for all settings. """
return self._default_values
@property
def key_samples(self):
""" Backward compatibility """
return self.key_x_values
@property
def key_labels(self):
""" Backward compatibility """
return self.key_y_values
|
ViaFerrata/DL_pipeline_TauAppearance
|
orcanet/core.py
|
Python
|
agpl-3.0
| 34,137
|
[
"ORCA"
] |
d4da5b3cf3440a23835e3a6c32fd4ecb9173de742019957c0db61d0b6f8bf75b
|
from math import *
import random
# don't change the noise paameters
steering_noise = 0.1
distance_noise = 0.03
measurement_noise = 0.3
class plan:
# --------
# init:
# creates an empty plan
#
def __init__(self, grid, init, goal, cost=1):
self.cost = cost
self.grid = grid
self.init = init
self.goal = goal
self.make_heuristic(grid, goal, self.cost)
self.path = []
self.spath = []
# --------
#
# make heuristic function for a grid
def make_heuristic(self, grid, goal, cost):
self.heuristic = [[0 for row in range(len(grid[0]))]
for col in range(len(grid))]
for i in range(len(self.grid)):
for j in range(len(self.grid[0])):
self.heuristic[i][j] = abs(i - self.goal[0]) + \
abs(j - self.goal[1])
# ------------------------------------------------
#
# A* for searching a path to the goal
#
#
def astar(self):
if self.heuristic == []:
raise ValueError, "Heuristic must be defined to run A*"
# internal motion parameters
delta = [[-1, 0], # go up
[0, -1], # go left
[1, 0], # go down
[0, 1]] # do right
# open list elements are of the type: [f, g, h, x, y]
closed = [[0 for row in range(len(self.grid[0]))]
for col in range(len(self.grid))]
action = [[0 for row in range(len(self.grid[0]))]
for col in range(len(self.grid))]
closed[self.init[0]][self.init[1]] = 1
x = self.init[0]
y = self.init[1]
h = self.heuristic[x][y]
g = 0
f = g + h
open = [[f, g, h, x, y]]
found = False # flag that is set when search complete
resign = False # flag set if we can't find expand
count = 0
while not found and not resign:
# check if we still have elements on the open list
if len(open) == 0:
resign = True
print '###### Search terminated without success'
else:
# remove node from list
open.sort()
open.reverse()
next = open.pop()
x = next[3]
y = next[4]
g = next[1]
# check if we are done
if x == goal[0] and y == goal[1]:
found = True
# print '###### A* search successful'
else:
# expand winning element and add to new open list
for i in range(len(delta)):
x2 = x + delta[i][0]
y2 = y + delta[i][1]
if x2 >= 0 and x2 < len(self.grid) and y2 >= 0 \
and y2 < len(self.grid[0]):
if closed[x2][y2] == 0 and self.grid[x2][y2] == 0:
g2 = g + self.cost
h2 = self.heuristic[x2][y2]
f2 = g2 + h2
open.append([f2, g2, h2, x2, y2])
closed[x2][y2] = 1
action[x2][y2] = i
count += 1
# extract the path
invpath = []
x = self.goal[0]
y = self.goal[1]
invpath.append([x, y])
while x != self.init[0] or y != self.init[1]:
x2 = x - delta[action[x][y]][0]
y2 = y - delta[action[x][y]][1]
x = x2
y = y2
invpath.append([x, y])
self.path = []
for i in range(len(invpath)):
self.path.append(invpath[len(invpath) - 1 - i])
# ------------------------------------------------
#
# this is the smoothing function
#
def smooth(self, weight_data=0.1, weight_smooth=0.1,
tolerance=0.000001):
if self.path == []:
raise ValueError, "Run A* first before smoothing path"
self.spath = [[0 for row in range(len(self.path[0]))] \
for col in range(len(self.path))]
for i in range(len(self.path)):
for j in range(len(self.path[0])):
self.spath[i][j] = self.path[i][j]
change = tolerance
while change >= tolerance:
change = 0.0
for i in range(1, len(self.path) - 1):
for j in range(len(self.path[0])):
aux = self.spath[i][j]
self.spath[i][j] += weight_data * \
(self.path[i][j] - self.spath[i][j])
self.spath[i][j] += weight_smooth * \
(self.spath[i - 1][j] + self.spath[i + 1][j]
- (2.0 * self.spath[i][j]))
if i >= 2:
self.spath[i][j] += 0.5 * weight_smooth * \
(2.0 * self.spath[i - 1][j] - self.spath[i - 2][j]
- self.spath[i][j])
if i <= len(self.path) - 3:
self.spath[i][j] += 0.5 * weight_smooth * \
(2.0 * self.spath[i + 1][j] - self.spath[i + 2][j]
- self.spath[i][j])
change += abs(aux - self.spath[i][j])
# ------------------------------------------------
#
# this is the robot class
#
class robot:
# --------
# init:
# creates robot and initializes location/orientation to 0, 0, 0
#
def __init__(self, length=0.5):
self.x = 0.0
self.y = 0.0
self.orientation = 0.0
self.length = length
self.steering_noise = 0.0
self.distance_noise = 0.0
self.measurement_noise = 0.0
self.num_collisions = 0
self.num_steps = 0
# --------
# set:
# sets a robot coordinate
#
def set(self, new_x, new_y, new_orientation):
self.x = float(new_x)
self.y = float(new_y)
self.orientation = float(new_orientation) % (2.0 * pi)
# --------
# set_noise:
# sets the noise parameters
#
def set_noise(self, new_s_noise, new_d_noise, new_m_noise):
# makes it possible to change the noise parameters
# this is often useful in particle filters
self.steering_noise = float(new_s_noise)
self.distance_noise = float(new_d_noise)
self.measurement_noise = float(new_m_noise)
# --------
# check:
# checks of the robot pose collides with an obstacle, or
# is too far outside the plane
def check_collision(self, grid):
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 1:
dist = sqrt((self.x - float(i)) ** 2 +
(self.y - float(j)) ** 2)
if dist < 0.5:
self.num_collisions += 1
return False
return True
def check_goal(self, goal, threshold=1.0):
dist = sqrt((float(goal[0]) - self.x) ** 2 + (float(goal[1]) - self.y) ** 2)
return dist < threshold
# --------
# move:
# steering = front wheel steering angle, limited by max_steering_angle
# distance = total distance driven, most be non-negative
def move(self, grid, steering, distance,
tolerance=0.001, max_steering_angle=pi / 4.0):
if steering > max_steering_angle:
steering = max_steering_angle
if steering < -max_steering_angle:
steering = -max_steering_angle
if distance < 0.0:
distance = 0.0
# make a new copy
res = robot()
res.length = self.length
res.steering_noise = self.steering_noise
res.distance_noise = self.distance_noise
res.measurement_noise = self.measurement_noise
res.num_collisions = self.num_collisions
res.num_steps = self.num_steps + 1
# apply noise
steering2 = random.gauss(steering, self.steering_noise)
distance2 = random.gauss(distance, self.distance_noise)
# Execute motion
turn = tan(steering2) * distance2 / res.length
if abs(turn) < tolerance:
# approximate by straight line motion
res.x = self.x + (distance2 * cos(self.orientation))
res.y = self.y + (distance2 * sin(self.orientation))
res.orientation = (self.orientation + turn) % (2.0 * pi)
else:
# approximate bicycle model for motion
radius = distance2 / turn
cx = self.x - (sin(self.orientation) * radius)
cy = self.y + (cos(self.orientation) * radius)
res.orientation = (self.orientation + turn) % (2.0 * pi)
res.x = cx + (sin(res.orientation) * radius)
res.y = cy - (cos(res.orientation) * radius)
# check for collision
# res.check_collision(grid)
return res
# --------
# sense:
#
def sense(self):
return [random.gauss(self.x, self.measurement_noise),
random.gauss(self.y, self.measurement_noise)]
# --------
# measurement_prob
# computes the probability of a measurement
#
def measurement_prob(self, measurement):
# compute errors
error_x = measurement[0] - self.x
error_y = measurement[1] - self.y
# calculate Gaussian
error = exp(- (error_x ** 2) / (self.measurement_noise ** 2) / 2.0) \
/ sqrt(2.0 * pi * (self.measurement_noise ** 2))
error *= exp(- (error_y ** 2) / (self.measurement_noise ** 2) / 2.0) \
/ sqrt(2.0 * pi * (self.measurement_noise ** 2))
return error
def __repr__(self):
# return '[x=%.5f y=%.5f orient=%.5f]' % (self.x, self.y, self.orientation)
return '[%.5f, %.5f]' % (self.x, self.y)
# ------------------------------------------------
#
# this is the particle filter class
#
class particles:
# --------
# init:
# creates particle set with given initial position
#
def __init__(self, x, y, theta,
steering_noise, distance_noise, measurement_noise, N=100):
self.N = N
self.steering_noise = steering_noise
self.distance_noise = distance_noise
self.measurement_noise = measurement_noise
self.data = []
for i in range(self.N):
r = robot()
r.set(x, y, theta)
r.set_noise(steering_noise, distance_noise, measurement_noise)
self.data.append(r)
# --------
#
# extract position from a particle set
#
def get_position(self):
x = 0.0
y = 0.0
orientation = 0.0
for i in range(self.N):
x += self.data[i].x
y += self.data[i].y
# orientation is tricky because it is cyclic. By normalizing
# around the first particle we are somewhat more robust to
# the 0=2pi problem
orientation += (((self.data[i].orientation
- self.data[0].orientation + pi) % (2.0 * pi))
+ self.data[0].orientation - pi)
return [x / self.N, y / self.N, orientation / self.N]
# --------
#
# motion of the particles
#
def move(self, grid, steer, speed):
newdata = []
for i in range(self.N):
r = self.data[i].move(grid, steer, speed)
newdata.append(r)
self.data = newdata
# --------
#
# sensing and resampling
#
def sense(self, Z):
w = []
for i in range(self.N):
w.append(self.data[i].measurement_prob(Z))
# resampling (careful, this is using shallow copy)
p3 = []
index = int(random.random() * self.N)
beta = 0.0
mw = max(w)
for i in range(self.N):
beta += random.random() * 2.0 * mw
while beta > w[index]:
beta -= w[index]
index = (index + 1) % self.N
p3.append(self.data[index])
self.data = p3
# --------
#
# run: runs control program for the robot
#
def run(grid, goal, spath, params, printflag=False, speed=0.1, timeout=1000):
myrobot = robot()
myrobot.set(0., 0., 0.)
myrobot.set_noise(steering_noise, distance_noise, measurement_noise)
filter = particles(myrobot.x, myrobot.y, myrobot.orientation,
steering_noise, distance_noise, measurement_noise)
cte = 0.0
err = 0.0
N = 0
index = 0 # index into the path
while not myrobot.check_goal(goal) and N < timeout:
diff_cte = - cte
# ----------------------------------------
# compute the CTE
# start with the present robot estimate
estimate = filter.get_position()
# some basic vector calculations
dx = spath[index + 1][0] - spath[index][0]
dy = spath[index + 1][1] - spath[index][1]
drx = estimate[0] - spath[index][0]
dry = estimate[1] - spath[index][1]
# u is the robot estimate projectes onto the path segment
u = (drx * dx + dry * dy) / (dx * dx + dy * dy)
# the cte is the estimate projected onto the normal of the path segment
cte = (dry * dx - drx * dy) / (dx * dx + dy * dy)
# pick the next path segment
if u > 1.0 and index < len(spath) - 1:
index += 1
# ----------------------------------------
diff_cte += cte
steer = - params[0] * cte - params[1] * diff_cte
myrobot = myrobot.move(grid, steer, speed)
filter.move(grid, steer, speed)
Z = myrobot.sense()
filter.sense(Z)
if not myrobot.check_collision(grid):
print '##### Collision ####'
err += (cte ** 2)
N += 1
if printflag:
print myrobot, cte, index, u
return [myrobot.check_goal(goal), myrobot.num_collisions, myrobot.num_steps]
# ------------------------------------------------
#
# this is our main routine
#
def main(grid, init, goal, steering_noise, distance_noise, measurement_noise,
weight_data, weight_smooth, p_gain, d_gain):
path = plan(grid, init, goal)
path.astar()
path.smooth(weight_data, weight_smooth)
return run(grid, goal, path.spath, [p_gain, d_gain])
# ------------------------------------------------
#
# input data and parameters
#
# grid format:
# 0 = navigable space
# 1 = occupied space
grid = [[0, 1, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 1],
[0, 1, 0, 1, 0, 0]]
init = [0, 0]
goal = [len(grid) - 1, len(grid[0]) - 1]
steering_noise = 0.1
distance_noise = 0.03
measurement_noise = 0.3
weight_data = 0.09
weight_smooth = 0.2
p_gain = 1.9
d_gain = 6.0
print main(grid, init, goal, steering_noise, distance_noise, measurement_noise,
weight_data, weight_smooth, p_gain, d_gain)
|
AKS1996/VOCOWA
|
segmented_cte.py
|
Python
|
mit
| 15,242
|
[
"Gaussian"
] |
777cc560054b71be200cddd8a230032e81de8f3734323482844f8ba9bcd8f6ed
|
import cStringIO
import logging
import operator as op
from antlr3.tree import CommonTree as AST
from lib.typecheck import *
import lib.const as C
import lib.visit as v
from .. import util
from . import field_nonce, register_field
import expression as exp
import clazz
class Field(v.BaseNode):
def __init__(self, **kwargs):
self._id = field_nonce()
self._clazz = kwargs.get("clazz", None) # for Java-to-C translation
self._annos = kwargs.get("annos", [])
self._mods = kwargs.get("mods", [])
self._typ = kwargs.get("typ", None)
self._name = kwargs.get("name", None)
self._init = kwargs.get("init", None)
register_field(self)
@property
def id(self):
return self._id
@property
def clazz(self):
return self._clazz
@clazz.setter
def clazz(self, v):
self._clazz = v
@property
def annos(self):
return self._annos
@property
def mods(self):
return self._mods
@property
def is_private(self):
return C.mod.PR in self._mods
@property
def is_static(self):
return C.mod.ST in self._mods
@property
def is_final(self):
return C.mod.FN in self._mods
@property
def is_aliasing(self):
if not self._init: return False
if not self.is_static or not self.is_final: return False
fld_a = None
if self._init.kind == C.E.DOT:
rcv_ty = exp.typ_of_e(None, self._init.le)
fld_a = clazz.find_fld(rcv_ty, self._init.re.id)
elif self._init.kind == C.E.ID:
fld_a = clazz.find_fld(self._clazz.name, self._init.id)
return fld_a != None
@property
def typ(self):
return self._typ
@typ.setter
def typ(self, v):
self._typ = v
@property
def name(self):
return self._name
@name.setter
def name(self, v):
self._name = v
@property
def init(self):
return self._init
@init.setter
def init(self, v):
self._init = v
def __repr__(self):
return u"{}_{}".format(self._name, util.sanitize_ty(self._clazz.name))
def __str__(self):
buf = cStringIO.StringIO()
if self._mods: buf.write(' '.join(self._mods) + ' ')
buf.write(' '.join([self._typ, self._name]))
if self._init: buf.write(" = " + str(self._init))
buf.write(';')
return buf.getvalue()
def __eq__(self, other):
return repr(self) == repr(other)
def accept(self, visitor):
visitor.visit(self)
if self._init: self._init = self._init.accept(visitor)
def jsonify(self):
m = {}
if self._mods: m["mods"] = self._mods
m["type"] = self._typ
m["name"] = self._name
return m
# merge field definition in another template
def merge(self, other):
# double-check it refers to the same field
assert self._name == other.name
assert self._typ == other.typ
# adopt init expression if exists
if not self._init and other.init:
logging.debug("merging: {} -> {}".format(other.init, repr(self)))
self._init = other.init
# (DECL (ANNOTATION ...)* modifier* ((FIELD|METHOD) ...))
# (FIELD (TYPE Id) (NAME Id (= (E... ))?))
@takes("Clazz", AST, list_of("Anno"), list_of(unicode))
@returns(nothing)
def parse(cls, node, annos, mods):
_node = node.getChildren()[-1]
typ = util.implode_id(_node.getChild(0))
name = _node.getChild(1)
fid = name.getChild(0).getText()
if name.getChildCount() > 1:
init = exp.parse_e(name.getChild(1).getChild(0), cls)
else: init = None
fld = Field(clazz=cls, annos=annos, mods=mods, typ=typ, name=fid, init=init)
cls.flds.append(fld)
|
plum-umd/pasket
|
pasket/meta/field.py
|
Python
|
mit
| 3,489
|
[
"VisIt"
] |
d094501731bb4ef581730a247bef9939d166bd8fba46f4ccca1ab9b31c49006e
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import time
from functools import reduce
import numpy
from pyscf import symm
from pyscf import lib
from pyscf.tdscf import uhf
from pyscf.scf import uhf_symm
from pyscf.scf import _response_functions
from pyscf.data import nist
from pyscf import __config__
# Low excitation filter to avoid numerical instability
POSTIVE_EIG_THRESHOLD = getattr(__config__, 'tdscf_rhf_TDDFT_positive_eig_threshold', 1e-3)
class TDA(uhf.TDA):
def nuc_grad_method(self):
from pyscf.grad import tduks
return tduks.Gradients(self)
class TDDFT(uhf.TDHF):
def nuc_grad_method(self):
from pyscf.grad import tduks
return tduks.Gradients(self)
RPA = TDUKS = TDDFT
class TDDFTNoHybrid(TDA):
''' Solve (A-B)(A+B)(X+Y) = (X+Y)w^2
'''
def get_vind(self, mf):
wfnsym = self.wfnsym
singlet = self.singlet
mol = mf.mol
mo_coeff = mf.mo_coeff
assert(mo_coeff[0].dtype == numpy.double)
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
nao, nmo = mo_coeff[0].shape
occidxa = numpy.where(mo_occ[0]>0)[0]
occidxb = numpy.where(mo_occ[1]>0)[0]
viridxa = numpy.where(mo_occ[0]==0)[0]
viridxb = numpy.where(mo_occ[1]==0)[0]
nocca = len(occidxa)
noccb = len(occidxb)
nvira = len(viridxa)
nvirb = len(viridxb)
orboa = mo_coeff[0][:,occidxa]
orbob = mo_coeff[1][:,occidxb]
orbva = mo_coeff[0][:,viridxa]
orbvb = mo_coeff[1][:,viridxb]
if wfnsym is not None and mol.symmetry:
if isinstance(wfnsym, str):
wfnsym = symm.irrep_name2id(mol.groupname, wfnsym)
orbsyma, orbsymb = uhf_symm.get_orbsym(mol, mo_coeff)
wfnsym = wfnsym % 10 # convert to D2h subgroup
orbsyma = orbsyma % 10
orbsymb = orbsymb % 10
sym_forbida = (orbsyma[occidxa,None] ^ orbsyma[viridxa]) != wfnsym
sym_forbidb = (orbsymb[occidxb,None] ^ orbsymb[viridxb]) != wfnsym
sym_forbid = numpy.hstack((sym_forbida.ravel(), sym_forbidb.ravel()))
e_ia_a = (mo_energy[0][viridxa,None] - mo_energy[0][occidxa]).T
e_ia_b = (mo_energy[1][viridxb,None] - mo_energy[1][occidxb]).T
e_ia = numpy.hstack((e_ia_a.reshape(-1), e_ia_b.reshape(-1)))
if wfnsym is not None and mol.symmetry:
e_ia[sym_forbid] = 0
d_ia = numpy.sqrt(e_ia).ravel()
ed_ia = e_ia.ravel() * d_ia
hdiag = e_ia.ravel() ** 2
vresp = mf.gen_response(mo_coeff, mo_occ, hermi=1)
def vind(zs):
nz = len(zs)
zs = numpy.asarray(zs).reshape(nz,-1)
if wfnsym is not None and mol.symmetry:
zs = numpy.copy(zs)
zs[:,sym_forbid] = 0
dmsa = (zs[:,:nocca*nvira] * d_ia[:nocca*nvira]).reshape(nz,nocca,nvira)
dmsb = (zs[:,nocca*nvira:] * d_ia[nocca*nvira:]).reshape(nz,noccb,nvirb)
dmsa = lib.einsum('xov,po,qv->xpq', dmsa, orboa, orbva.conj())
dmsb = lib.einsum('xov,po,qv->xpq', dmsb, orbob, orbvb.conj())
dmsa = dmsa + dmsa.conj().transpose(0,2,1)
dmsb = dmsb + dmsb.conj().transpose(0,2,1)
v1ao = vresp(numpy.asarray((dmsa,dmsb)))
v1a = lib.einsum('xpq,po,qv->xov', v1ao[0], orboa.conj(), orbva)
v1b = lib.einsum('xpq,po,qv->xov', v1ao[1], orbob.conj(), orbvb)
hx = numpy.hstack((v1a.reshape(nz,-1), v1b.reshape(nz,-1)))
hx += ed_ia * zs
hx *= d_ia
return hx
return vind, hdiag
def kernel(self, x0=None, nstates=None):
'''TDDFT diagonalization solver
'''
cpu0 = (time.clock(), time.time())
mf = self._scf
if mf._numint.libxc.is_hybrid_xc(mf.xc):
raise RuntimeError('%s cannot be used with hybrid functional'
% self.__class__)
self.check_sanity()
self.dump_flags()
if nstates is None:
nstates = self.nstates
else:
self.nstates = nstates
log = lib.logger.Logger(self.stdout, self.verbose)
vind, hdiag = self.get_vind(self._scf)
precond = self.get_precond(hdiag)
if x0 is None:
x0 = self.init_guess(self._scf, self.nstates)
def pickeig(w, v, nroots, envs):
idx = numpy.where(w > POSTIVE_EIG_THRESHOLD**2)[0]
return w[idx], v[:,idx], idx
self.converged, w2, x1 = \
lib.davidson1(vind, x0, precond,
tol=self.conv_tol,
nroots=nstates, lindep=self.lindep,
max_space=self.max_space, pick=pickeig,
verbose=log)
mo_energy = self._scf.mo_energy
mo_occ = self._scf.mo_occ
occidxa = numpy.where(mo_occ[0]>0)[0]
occidxb = numpy.where(mo_occ[1]>0)[0]
viridxa = numpy.where(mo_occ[0]==0)[0]
viridxb = numpy.where(mo_occ[1]==0)[0]
nocca = len(occidxa)
noccb = len(occidxb)
nvira = len(viridxa)
nvirb = len(viridxb)
e_ia_a = (mo_energy[0][viridxa,None] - mo_energy[0][occidxa]).T
e_ia_b = (mo_energy[1][viridxb,None] - mo_energy[1][occidxb]).T
e_ia = numpy.hstack((e_ia_a.reshape(-1), e_ia_b.reshape(-1)))
e_ia = numpy.sqrt(e_ia)
e = []
xy = []
for i, z in enumerate(x1):
if w2[i] < POSTIVE_EIG_THRESHOLD**2:
continue
w = numpy.sqrt(w2[i])
zp = e_ia * z
zm = w/e_ia * z
x = (zp + zm) * .5
y = (zp - zm) * .5
norm = lib.norm(x)**2 - lib.norm(y)**2
if norm > 0:
norm = 1/numpy.sqrt(norm)
e.append(w)
xy.append(((x[:nocca*nvira].reshape(nocca,nvira) * norm, # X_alpha
x[nocca*nvira:].reshape(noccb,nvirb) * norm), # X_beta
(y[:nocca*nvira].reshape(nocca,nvira) * norm, # Y_alpha
y[nocca*nvira:].reshape(noccb,nvirb) * norm)))# Y_beta
self.e = numpy.array(e)
self.xy = xy
if self.chkfile:
lib.chkfile.save(self.chkfile, 'tddft/e', self.e)
lib.chkfile.save(self.chkfile, 'tddft/xy', self.xy)
log.timer('TDDFT', *cpu0)
log.note('Excited State energies (eV)\n%s', self.e * nist.HARTREE2EV)
return self.e, self.xy
def nuc_grad_method(self):
from pyscf.grad import tduks
return tduks.Gradients(self)
class dRPA(TDDFTNoHybrid):
def __init__(self, mf):
from pyscf import scf
from pyscf.dft.rks import KohnShamDFT
if not isinstance(mf, KohnShamDFT):
raise RuntimeError("direct RPA can only be applied with DFT; for HF+dRPA, use .xc='hf'")
mf = scf.addons.convert_to_uhf(mf)
mf.xc = ''
TDDFTNoHybrid.__init__(self, mf)
TDH = dRPA
class dTDA(TDA):
def __init__(self, mf):
from pyscf import scf
from pyscf.dft.rks import KohnShamDFT
if not isinstance(mf, KohnShamDFT):
raise RuntimeError("direct TDA can only be applied with DFT; for HF+dTDA, use .xc='hf'")
mf = scf.addons.convert_to_uhf(mf)
mf.xc = ''
TDA.__init__(self, mf)
def tddft(mf):
'''Driver to create TDDFT or TDDFTNoHybrid object'''
if mf._numint.libxc.is_hybrid_xc(mf.xc):
return TDDFT(mf)
else:
return TDDFTNoHybrid(mf)
from pyscf import dft
dft.uks.UKS.TDA = dft.uks_symm.UKS.TDA = lib.class_as_method(TDA)
dft.uks.UKS.TDHF = dft.uks_symm.UKS.TDHF = None
#dft.uks.UKS.TDDFT = dft.uks_symm.UKS.TDDFT = lib.class_as_method(TDDFT)
dft.uks.UKS.TDDFTNoHybrid = dft.uks_symm.UKS.TDDFTNoHybrid = lib.class_as_method(TDDFTNoHybrid)
dft.uks.UKS.TDDFT = dft.uks_symm.UKS.TDDFT = tddft
dft.uks.UKS.dTDA = dft.uks_symm.UKS.dTDA = lib.class_as_method(dTDA)
dft.uks.UKS.dRPA = dft.uks_symm.UKS.dRPA = lib.class_as_method(dRPA)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
['H' , (0. , 0. , .917)],
['F' , (0. , 0. , 0.)], ]
mol.basis = '631g'
mol.build()
mf = dft.UKS(mol)
mf.xc = 'lda, vwn_rpa'
mf.scf()
td = mf.TDDFTNoHybrid()
#td.verbose = 5
td.nstates = 5
print(td.kernel()[0] * 27.2114)
# [ 9.08754011 9.08754011 9.7422721 9.7422721 12.48375928]
mf = dft.UKS(mol)
mf.xc = 'b88,p86'
mf.scf()
td = mf.TDDFT()
td.nstates = 5
#td.verbose = 5
print(td.kernel()[0] * 27.2114)
# [ 9.09321047 9.09321047 9.82203065 9.82203065 12.29842071]
mf = dft.UKS(mol)
mf.xc = 'lda,vwn'
mf.scf()
td = mf.TDA()
td.nstates = 5
print(td.kernel()[0] * 27.2114)
# [ 9.01393088 9.01393088 9.68872733 9.68872733 12.42444633]
mol.spin = 2
mf = dft.UKS(mol)
mf.xc = 'lda, vwn_rpa'
mf.scf()
td = TDDFTNoHybrid(mf)
#td.verbose = 5
td.nstates = 5
print(td.kernel()[0] * 27.2114)
# [ 0.0765857 3.16823079 15.20150204 18.40379107 21.11477253]
mf = dft.UKS(mol)
mf.xc = 'b88,p86'
mf.scf()
td = TDDFT(mf)
td.nstates = 5
#td.verbose = 5
print(td.kernel()[0] * 27.2114)
# [ 0.05161674 3.57883843 15.0960023 18.33537454 20.76914967]
mf = dft.UKS(mol)
mf.xc = 'lda,vwn'
mf.scf()
td = TDA(mf)
td.nstates = 5
print(td.kernel()[0] * 27.2114)
# [ 0.16142061 3.22811366 14.98443928 18.29273507 21.18410081]
|
gkc1000/pyscf
|
pyscf/tdscf/uks.py
|
Python
|
apache-2.0
| 10,463
|
[
"PySCF"
] |
0557bd3c63d7bab1887edc1880c5df3210bc85aef77b417bf1bce3d64447e0de
|
"""
IPOL SIFT
"""
from lib import base_app, build, http
from lib.misc import app_expose, ctime
from lib.base_app import init_app
from cherrypy import TimeoutError
import cherrypy
import os.path
import shutil
######## WARNING OVERLOADING EMPYT_APP #########
######## WARNING OVERLOADING EMPYT_APP #########
######## WARNING OVERLOADING EMPYT_APP #########
import config_json
######## WARNING OVERLOADING EMPYT_APP #########
######## WARNING OVERLOADING EMPYT_APP #########
######## WARNING OVERLOADING EMPYT_APP #########
from .lib_demo_sift import draw_keys, draw_keys_oriented, \
draw_matches, find_nearest_keypoint,\
illustrate_pair, draw_one_match, Image
class app(base_app):
""" demo app """
title = "Anatomy of the SIFT Method"
input_nb = 2
input_max_pixels = None
input_max_method = 'zoom'
input_dtype = '1x8i'
input_ext = '.png'
is_test = False
xlink_article = "http://www.ipol.im/pub/pre/82/"
xlink_src = "http://www.ipol.im/pub/pre/82/sift_anatomy_20140314.zip"
######## WARNING OVERLOADING EMPYT_APP #########
######## WARNING OVERLOADING EMPYT_APP #########
######## WARNING OVERLOADING EMPYT_APP #########
######## WARNING OVERLOADING EMPYT_APP #########
######## WARNING OVERLOADING EMPYT_APP #########
######## WARNING OVERLOADING EMPYT_APP #########
######## WARNING OVERLOADING EMPYT_APP #########
######## WARNING OVERLOADING EMPYT_APP #########
######## WARNING OVERLOADING EMPYT_APP #########
def init_cfg(self):
"""
reinitialize the config dictionary between 2 page calls
"""
# read the config dict
self.cfg = config_json.cfg_open(self.work_dir)
# default three sections
self.cfg.setdefault('param', {})
self.cfg.setdefault('info', {})
self.cfg.setdefault('meta', {})
######## WARNING OVERLOADING EMPYT_APP #########
######## WARNING OVERLOADING EMPYT_APP #########
######## WARNING OVERLOADING EMPYT_APP #########
######## WARNING OVERLOADING EMPYT_APP #########
######## WARNING OVERLOADING EMPYT_APP #########
######## WARNING OVERLOADING EMPYT_APP #########
######## WARNING OVERLOADING EMPYT_APP #########
######## WARNING OVERLOADING EMPYT_APP #########
######## WARNING OVERLOADING EMPYT_APP #########
def __init__(self):
"""
app setup
"""
# setup the parent class
base_dir = os.path.dirname(os.path.abspath(__file__))
base_app.__init__(self, base_dir)
# select the base_app steps to expose
app_expose(base_app.index)
app_expose(base_app.input_select)
app_expose(base_app.input_upload)
app_expose(base_app.params)
def build(self):
"""
program build/update
"""
zip_filename = 'sift_anatomy_20140314.zip'
src_dir_name = 'sift_anatomy_20140314/'
prog_filename = 'sift_cli'
prog_filename2 = 'match_cli'
prog_filename3 = 'demo_extract_patch'
# store common file path in variables
tgz_file = self.dl_dir + zip_filename
prog_file = self.bin_dir + prog_filename
log_file = self.base_dir + "build.log"
# get the latest source archive
build.download(app.xlink_src, tgz_file)
# test if the dest file is missing, or too old
if (os.path.isfile(prog_file)
and ctime(tgz_file) < ctime(prog_file)):
cherrypy.log("not rebuild needed",
context='BUILD', traceback=False)
else:
# extract the archive
build.extract(tgz_file, self.src_dir)
# build the program
build.run("make -j4 -C %s" % (self.src_dir + src_dir_name),
stdout=log_file)
# save into bin dir
if os.path.isdir(self.bin_dir):
shutil.rmtree(self.bin_dir)
os.mkdir(self.bin_dir)
# copy all the programs to the bin dir
shutil.copy(self.src_dir +
os.path.join(os.path.join(src_dir_name,'bin'),
prog_filename), os.path.join(self.bin_dir,
prog_filename) )
shutil.copy(self.src_dir +
os.path.join(os.path.join(src_dir_name,'bin'),
prog_filename2), os.path.join(self.bin_dir,
prog_filename2) )
shutil.copy(self.src_dir +
os.path.join(os.path.join(src_dir_name,'bin'),
prog_filename3), os.path.join(self.bin_dir,
prog_filename3) )
# cleanup the source dir
shutil.rmtree(self.src_dir)
return
@cherrypy.expose
@init_app
def wait(self, **kwargs):
"""
run redirection
"""
# Initialize default values
self.cfg['param']['newrun'] = False
self.cfg['param']['action'] = 'std_sift_matching'
self.cfg['param']['show'] = 'result'
self.cfg['param']['x'] = '-1'
self.cfg['param']['y'] = '-1'
VALID_KEYS = [
'newrun',
'action',
'show',
'x',
'y',
'n_oct',
'n_spo',
'sigma_min',
'delta_min',
'sigma_in',
'C_DoG',
'C_edge',
'n_bins',
'lambda_ori',
't',
'n_hist',
'n_ori',
'lambda_descr',
'flag_match',
'C_match']
if ('paradic' in self.cfg['param']):
self.cfg['param']['paradic'] = \
self.cfg['param']['paradic']
else:
self.load_standard_parameters()
# PROCESS ALL THE INPUTS
for prp in kwargs.keys():
if( prp in VALID_KEYS ):
if (prp == 'newrun'):
self.cfg['param']['newrun'] = kwargs[prp]
elif (prp == 'action'):
self.cfg['param']['action'] = kwargs[prp]
elif (prp == 'show'):
self.cfg['param']['show'] = kwargs[prp]
elif (prp == 'x'):
self.cfg['param']['x'] = kwargs[prp]
elif (prp == 'y'):
self.cfg['param']['y'] = kwargs[prp]
else:
self.cfg['param']['paradic'][prp] = kwargs[prp]
self.cfg.save()
http.refresh(self.base_url + 'run?key=%s' % self.key)
return self.tmpl_out("wait.html")
@cherrypy.expose
@init_app
def result(self, **kwargs):
"""
display the algo results
"""
VALID_KEYS = ['show']
for prp in kwargs.keys():
if( prp in VALID_KEYS ):
if (prp == 'show'):
self.cfg['param']['show'] = kwargs[prp]
self.cfg.save()
show = self.cfg['param']['show']
if (show == 'antmy_detect'):
return self.tmpl_out("antmy_detect.html")
elif (show == 'antmy_descr_match'):
return self.tmpl_out("antmy_descr_match.html")
elif (show == 'antmy_gauss_scsp'):
return self.tmpl_out("antmy_gauss_scsp.html")
else: # show == basic
return self.tmpl_out("result.html")
def load_standard_parameters(self):
"""
Load default parameters of the method
"""
paradic = {'x':'0',
'y':'0',
'n_oct':'8',
'n_spo':'3',
'sigma_min':'0.8',
'delta_min':'0.5',
'sigma_in':'0.5',
'C_DoG':'0.015',
'C_edge':'10',
'n_bins':'36',
'lambda_ori':'1.5',
't':'0.8',
'n_hist':'4',
'n_ori':'8',
'lambda_descr':'6',
'flag_match':'1',
'C_match':'0.6'}
self.cfg['param']['paradic'] = paradic
self.cfg.save()
@cherrypy.expose
@init_app
def run(self):
"""
Accepted value for 'ACTION' flag
std_sift_matching : run SIFT and MATCHING with default settings,
cust_sift_matching : run SIFT and MATCHING with customized settings
cust_matching : run MATCHING with customized settings.
Each action also runs the appropriate illustration routines.
Accepted value for 'SHOW' flag
result : standard results,
antmy_detect : Anatomy of SIFT, keypoint detection,
antmy_descr_match : Anatomy of SIFT, description and matching,
antmy_gauss_scsp : Anatomy of SIFT, Gaussian scale-space.
"""
# read (x,y) - Set SIFT parameters
action = self.cfg['param']['action']
x = float(self.cfg['param']['x'])
# Expressed en PIL coordinates system y| x-
y = float(self.cfg['param']['y'])
# read image size and store in 'param' dict to control html rendering
work_dir = self.work_dir
[w1, h1] = Image.open(work_dir+'input_0.orig.png').size
[w2, h2] = Image.open(work_dir+'input_1.orig.png').size
wdth = max(w1, w2)
hght = max(h1, h2)
wpair = int(w1+w2+max(w1, w2)/10)
self.cfg['param']['hght'] = hght
self.cfg['param']['wdth'] = wdth
self.cfg.save()
# Convert x y provided by the form <input type=image ..; >
# We assume that the width of the html body is assumed width is 920px
x = x*wpair/920
y = y*wpair/920
self.cfg['param']['x'] = x
self.cfg['param']['y'] = y
self.cfg.save()
if (action == 'std_sift_matching'):
try:
self.load_standard_parameters()
self.run_std_sift()
self.run_matching()
self.illustrate_std_sift()
self.illustrate_matching()
except TimeoutError:
return self.error(errcode='timeout',
errmsg="Try again with simpler images.")
except RuntimeError:
return self.error(errcode='runtime',
errmsg="Runtime error in std_sift_matching.")
elif (action == "cust_sift_matching"):
try:
self.run_sift()
print "after run_sift()"
self.run_matching()
print "after run_matching()"
self.illustrate_sift()
print "after illustrate_sift()"
self.illustrate_matching()
self.detail_matching()
except TimeoutError:
return self.error(errcode='timeout',
errmsg="Try with simpler images.")
except RuntimeError:
return self.error(errcode='runtime',
errmsg="Runtime error in cust_sift_matching.")
elif (action == "cust_matching"):
try:
self.run_matching()
self.illustrate_matching()
except TimeoutError:
return self.error(errcode='timeout',
errmsg="Try with simpler images.")
except RuntimeError:
return self.error(errcode='runtime',
errmsg="Runtime error in cust_matching.")
else:
try:
self.detail_matching()
except TimeoutError:
return self.error(errcode='timeout',
errmsg="Try with simpler images.")
except RuntimeError:
return self.error(errcode='runtime',
errmsg="Runtime error in else (you know).")
## archive
if self.cfg['meta']['original']:
ar = self.make_archive()
ar.add_file("input_0.png", info="first input image")
ar.add_file("input_1.png", info="second input image")
ar.add_file("input_0.orig.png", info="first uploaded image")
ar.add_file("input_1.orig.png", info="second uploaded image")
ar.add_file("OUTmatches.png", info="matches")
ar.add_file("keys_im0.txt", compress=True)
ar.add_file("keys_im1.txt", compress=True)
ar.add_file("OUTmatches.txt", compress=True)
ar.save()
self.cfg.save()
http.redir_303(self.base_url + 'result?key=%s' % self.key)
return self.tmpl_out("run.html")
def run_std_sift(self):
"""
Run the SIFT algorithm on each of the two images
with standard parameters
"""
for i in range(2):
image = 'input_'+str(i)+'.png'
label = 'im'+str(i)
f = open(self.work_dir+'keys_'+label+'.txt','w')
sift = self.run_proc(['sift_cli', image], stdout=f)
self.wait_proc(sift, timeout=self.timeout)
return 1
def run_sift(self):
"""
Run the SIFT algorithm on each of the two images
with custom parameters
"""
paradic = self.cfg['param']['paradic']
for i in range(2):
image = 'input_'+str(i)+'.png'
label = 'im'+str(i)
f = open(self.work_dir+'keys_'+label+'.txt','w')
sift = self.run_proc(['sift_cli', image, label,
str(paradic['n_oct']),
str(paradic['n_spo']),
str(paradic['sigma_min']),
str(paradic['delta_min']),
str(paradic['sigma_in']),
str(paradic['C_DoG']),
str(paradic['C_edge']),
str(paradic['n_bins']),
str(paradic['lambda_ori']),
str(paradic['t']),
str(paradic['n_hist']),
str(paradic['n_ori']),
str(paradic['lambda_descr'])],
stdout=f)
self.wait_proc(sift, timeout=self.timeout)
return 1
def run_matching(self):
"""
Run the matching algorithm on a pair of keypoint.
input : keys_im0.txt
keys_im1.txt
argument : flag_match , method flag
C_match , threshold
extra argument :
n_hist n_ori to read the feature fector and
to save in ASCII files the keypoints
feature vectors,
n_bins to save in ASCII files the
keypoints orientation histograms
"""
paradic = self.cfg['param']['paradic']
print 'in run_matching() n_bins = ' +str(paradic['n_bins'])
f = open(self.work_dir+'matches.txt','w')
matching = self.run_proc(['match_cli', 'keys_im0.txt',
'keys_im1.txt',
str(paradic['flag_match']),
str(paradic['C_match']),
str(paradic['n_hist']),
str(paradic['n_ori']),
str(paradic['n_bins'])],
stdout=f)
self.wait_proc(matching, timeout=self.timeout)
return 1
def illustrate_matching(self):
"""
Draw matching keypoints in each image.
Draw matches on the pair of images.
"""
work_dir = self.work_dir
draw_keys_oriented(work_dir+'matching_keys_im0.txt',
work_dir+'input_0.orig.png',
work_dir+'matching_keys_im0.png')
draw_keys_oriented(work_dir+'matching_keys_im1.txt',
work_dir+'input_1.orig.png',
work_dir+'matching_keys_im1.png')
draw_matches(work_dir+'matches.txt',
work_dir+'input_0.orig.png',
work_dir+'input_1.orig.png',
work_dir+'OUTmatches.png')
return 1
def illustrate_std_sift(self):
"""
Draw detected keypoints in each image.
"""
work_dir = self.work_dir
draw_keys_oriented(work_dir+'keys_im0.txt',
work_dir+'input_0.orig.png',
work_dir+'keys_im0.png')
draw_keys_oriented(work_dir+'keys_im1.txt',
work_dir+'input_1.orig.png',
work_dir+'keys_im1.png')
return 1
def illustrate_sift(self):
"""
Draw keypoints at each stage on each image
"""
print 'passe illustrate sift in'
work_dir = self.work_dir
draw_keys_oriented(work_dir+'keys_im0.txt',
work_dir+'input_0.orig.png',
work_dir+'keys_im0.png')
draw_keys_oriented(work_dir+'keys_im1.txt',
work_dir+'input_1.orig.png',
work_dir+'keys_im1.png')
print 'passe illustrate sift in'
for im in ['0','1']:
for kypts in ['NES', 'DoGSoftThresh', 'ExtrInterp',
'ExtrInterpREJ', 'DoGThresh', 'OnEdgeResp',
'OnEdgeRespREJ']:
draw_keys(work_dir+'extra_'+kypts+'_im'+im+'.txt',
work_dir+'input_'+im+'.orig.png',
work_dir+'extra_'+kypts+'_im'+im+'.png')
draw_keys_oriented(work_dir+'extra_OriAssignedMULT_im'+im+'.txt',
work_dir+'input_'+im+'.orig.png',
work_dir+'extra_OriAssignedMULT_im'+im+'.png')
return 1
def detail_matching(self):
"""
Draw keypoints at each stage on each image
Draw matches on the pair of image
"""
paradic = self.cfg['param']['paradic']
work_dir = self.work_dir
x = float(self.cfg['param']['x']) # selected pixel in the first image
y = float(self.cfg['param']['y'])
# sift parameters
# number of bins in the orientation histogram
n_bins = int(paradic['n_bins'])
n_hist = int(paradic['n_hist'])
# descriptor of n_hist X n_hist weighted histograms with n_ori
n_ori = int(paradic['n_ori'])
delta_min = float(paradic['delta_min'])
sigma_min = float(paradic['sigma_min'])
sigma_in = float(paradic['sigma_in'])
lambda_ori = float(paradic['lambda_ori'])
lambda_descr = float(paradic['lambda_descr'])
#threshold defining reference orientations
n_spo = int(paradic['n_spo'])
# Read feature vectors from output files
if (os.path.getsize(work_dir+'OUTmatches.txt') > 0 ):
pairdata = find_nearest_keypoint(work_dir+'OUTmatches.txt', y, x)
illustrate_pair(pairdata, n_bins, n_hist, n_ori, work_dir)
# Read keys coordinates.
d = 6+n_bins+n_hist*n_hist*n_ori # size of keydata inside pairdata
v = n_hist*n_hist*n_ori
[x1, y1, sigma1, theta1] = [float(x) for x in pairdata[0:4]]
[o1, s1] = [float(x) for x in pairdata[4+v:4+v+2]]
[x2a, y2a, sigma2a, theta2a] = [float(x) for x in pairdata[d:d+4]]
[o2a, s2a] = [float(x) for x in pairdata[d+4+v:d+4+v+2]]
[x2b, y2b, sigma2b, theta2b] = \
[float(x) for x in pairdata[2*d:2*d+4]]
[o2b, s2b] = [float(x) for x in pairdata[2*d+4+v:2*d+4+v+2]]
draw_one_match(pairdata,
work_dir+'input_0.png',
work_dir+'input_1.png',
d,
lambda_ori,
lambda_descr,
n_hist,
work_dir+'OUTonepair.png')
# Extract thumbnails.
# keypoint 1 (image 1)
print ' '.join(['demo_extract_patch', work_dir+'input_0.png',
str(x1), str(y1), str(sigma1), str(theta1), str(o1), str(s1),
str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),
str(lambda_ori), str(lambda_descr), str(n_hist),
work_dir+"detail_im1"])
proc = self.run_proc(['demo_extract_patch', work_dir+'input_0.png',
str(x1), str(y1), str(sigma1), str(theta1), str(o1), str(s1),
str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),
str(lambda_ori), str(lambda_descr), str(n_hist),
work_dir+"detail_im1"])
self.wait_proc(proc, timeout=self.timeout)
# keypoint 2a (nearest neighbor in image 2)
print ' '.join(['demo_extract_patch', work_dir+'input_1.png',
str(x2a), str(y2a), str(sigma2a), str(theta2a), str(o2a), str(s2a),
str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),
str(lambda_ori), str(lambda_descr), str(n_hist),
work_dir+"detail_im2a"])
proc = self.run_proc(['demo_extract_patch', work_dir+'input_1.png',
str(x2a), str(y2a), str(sigma2a), str(theta2a), str(o2a), str(s2a),
str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),
str(lambda_ori), str(lambda_descr), str(n_hist),
work_dir+"detail_im2a"])
self.wait_proc(proc, timeout=self.timeout)
# keypoint 2b (second nearest neighbor in image 2)
proc = self.run_proc(['demo_extract_patch', work_dir+'input_1.png',
str(x2b), str(y2b), str(sigma2b), str(theta2b), str(o2b), str(s2b),
str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),
str(lambda_ori), str(lambda_descr), str(n_hist),
work_dir+"detail_im2b"])
self.wait_proc(proc, timeout=self.timeout)
return 1
|
juan-cardelino/matlab_demos
|
ipol_demo-light-1025b85/app_available/82/app.py
|
Python
|
gpl-2.0
| 22,321
|
[
"Gaussian"
] |
182b3a8ee5e0fd03c4af716097bbdd9f69d2974df440c20a195aac7490885ec5
|
# this is a temporary python file that will help me to understand the likelhood and times of the minimisation
import numpy
def return_likelihood_list(path):
"""
the function returns the log-likelihood of the given file,
the function does not check before/after and does not do anything smart with all the images
other functions will do this
"""
f = file(path, "r")
lines = f.read().splitlines()
like_list = []
like_album_list = []
like_image_list = []
for line in lines:
like_list.append(float(line.split(" ")[-1]))
if "album" in line:
like_album_list.append(float(line.split(" ")[-1]))
if "image" in line:
like_image_list.append(float(line.split(" ")[-1]))
like_list = numpy.array(like_list)
like_album_list = numpy.array(like_album_list)
like_image_list = numpy.array(like_image_list)
return like_list, like_album_list, like_image_list
def return_likelihood_per_iter(path):
"""
the function returns the likelhood after aggregation per iteration
it returns two lists of before and after in order to plot them in different color
"""
f = file(path, "r")
lines = f.read().splitlines()
iter_num = []
album_before = []
album_after = []
iter_val = 0
for line in lines:
if "album before" in line:
iter_val += 1
album_before.append(float(line.split(" ")[-1]))
iter_num.append(iter_val)
elif "album after" in line:
album_after.append(float(line.split(" ")[-1]))
iter_num = numpy.array(iter_num)
album_before = numpy.array(album_before)
album_after = numpy.array(album_after)
return iter_num, album_before, album_after
def return_likelihood_per_iter_and_gaus_num(path):
"""
function returns the likelihood before and after the minimisation as a function of iteration and gaussian number
"""
f = file(path, "r")
lines = f.read().splitlines()
iter_num = []
gaus_num = []
album_before = []
album_after = []
iter_val = 0
for line in lines:
if ("Phase I," in line) and ("album before" in line):
iter_val += 1
album_before.append(float(line.split(" ")[-1]))
gaus_num.append(3)
iter_num.append(iter_val)
elif ("Phase I," in line) and ("album after" in line):
album_after.append(float(line.split(" ")[-1]))
elif ("Phase II," in line) and ("album before" in line):
iter_val += 1
album_before.append(float(line.split(" ")[-1]))
gaus_num.append(5)
iter_num.append(iter_val)
elif ("Phase II," in line) and ("album after" in line):
album_after.append(float(line.split(" ")[-1]))
elif ("Phase III," in line) and ("album before" in line):
iter_val += 1
album_before.append(float(line.split(" ")[-1]))
gaus_num.append(7)
iter_num.append(iter_val)
elif ("Phase III," in line) and ("album after" in line):
album_after.append(float(line.split(" ")[-1]))
iter_num = numpy.array(iter_num)
gaus_num = numpy.array(gaus_num)
album_before = numpy.array(album_before)
album_after = numpy.array(album_after)
return iter_num, gaus_num, album_before, album_after
|
davidwhogg/DeprojectAllGalaxies
|
scripts/times_and_likelihood.py
|
Python
|
mit
| 2,979
|
[
"Gaussian"
] |
ddd7c96eace592c2a26acc4fef6d58db51c91c55df783f92eec1021d0dd05795
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Library for creating different architectures for policies.
Each policy \Pi: S -> A is a mapping from the set of states to the set of
actions. Each policy provides a method that takes as an input state s and
outputs action a recommended by the policy.
"""
import abc
import copy
import math
import numpy as np
import scipy
class Policy(abc.ABC):
r"""Abstract class for different policies \Pi: S -> A.
Class is responsible for creating different policies and provides an interface
for computing actions recommended by policies in different input states.
In particular, this class provides an interface that accepts compressed
vectorized form of the policy and decompresses it.
Standard procedure for improving the parameters of the policy with an
interface given by the class:
policy = policies.ParticularClassThatInheritsFromBaseClass(...)
vectorized_network = policy.get_initial()
while(...):
new_vectorized_network = SomeTransformationOf(vectorized_network)
policy.update(new_vectorized_network)
and SomeTransformationOf is a single step of some optimization procedure such
as gradient descent that sees the policy in the vectorized form.
"""
@abc.abstractmethod
def update(self, vectorized_parameters):
"""Updates the policy using new parameters from <vectorized_parameters>.
Updates the parameters of the policy using new parameters encoded by
<vectorized_parameters>. The size of the vector <vectorized_parameters>
should be the number of all biases and weights of the neural network.
We use the convention where parameters encoding matrices of connections of
the neural network come in <vectorized_parameters> before parameters
encoding biases and furthermore the order in <vectorized_parameters> of
parameters encoding weights for different matrices/biases-vectors is
inherited from the order of these matrices/biases-vectors in the
decompressed neural network. Details regarding compression depend on
different neural network architectures used (such as: structured and
unstructured) and are given in the implementations of that abstract method
in specific classes that inherit from Policy.
Args:
vectorized_parameters: parameters of the neural network in the vectorized
form.
Returns:
"""
raise NotImplementedError('Abstract method')
@abc.abstractmethod
def get_action(self, state):
"""Returns the action proposed by a policy in a given state.
Returns an action proposed by the policy in <state>.
Args:
state: input state
Returns:
Action proposed by the policy represented by an object of the class in a
given state.
"""
raise NotImplementedError('Abstract method')
@abc.abstractmethod
def get_initial(self):
"""Returns the default parameters of the policy in the vectorized form.
Initial parameters of the policy are output in the vectorized form.
Args:
Returns:
Numpy array encoding in the vectorized form initial parameters of the
policy.
"""
raise NotImplementedError('Abstract method')
@abc.abstractmethod
def get_total_num_parameters(self):
"""Outputs total number of parameters of the policy.
Args:
Returns:
Total number of parameters used by the policy.
"""
raise NotImplementedError('Abstract method')
def reset(self):
"""Resets any relevant parameters in the policy."""
pass
class UnstructuredNeuralNetworkPolicy(Policy):
"""Derives from Policy and encodes a policy using unstructured neural network.
This class encodes agent's policy as an unstructured neural network fed with
the state of an agent and outputting recommended action. "Unstructured" means
that the matrices of the neural network are not constrained to live in the
lower-dimensional space, have low-displacement rank, etc. Thus the policy
is determined by the full set of all the weights and biases.
"""
def __init__(
self,
state_dimensionality,
action_dimensionality,
hidden_layers,
nonlinearities,
low=None,
high=None,
):
"""Sets up parameters of the unstructured neural network.
Args:
state_dimensionality: dimensionality of the first layer
action_dimensionality: dimensionality of the last layer
hidden_layers: list of sizes of hidden layers
nonlinearities: list of nonlinear mapping applied pointwise in hidden
layers; each nonlinearity is a mapping f: R^{n} ->R^{n},
where n - dimensionality of the input vector as well as
its nonlinear transformation
low: A list of minimum bounds for the action.
high: A list of maximum bounds for the action array.
"""
matrices = []
matrices.append(
np.zeros(state_dimensionality * hidden_layers[0]).reshape(
hidden_layers[0], state_dimensionality))
for i in range(0, len(hidden_layers) - 1):
matrices.append(
np.zeros(hidden_layers[i] * hidden_layers[i + 1]).reshape(
hidden_layers[i + 1], hidden_layers[i]))
matrices.append(
np.zeros(hidden_layers[len(hidden_layers) - 1] *
action_dimensionality).reshape(
action_dimensionality,
hidden_layers[len(hidden_layers) - 1]))
biases = []
for i in range(len(hidden_layers)):
biases.append(np.zeros(hidden_layers[i]).reshape(hidden_layers[i], 1))
self.matrices = matrices
self.biases = biases
self.nonlinearities = nonlinearities
self.state_dimensionality = state_dimensionality
self.action_dimensionality = action_dimensionality
self.hidden_layers = hidden_layers
self.low = low
self.high = high
super().__init__()
def update(self, vectorized_parameters):
new_matrices = []
current_index = 0
new_matrices.append(vectorized_parameters[current_index:current_index +
self.state_dimensionality *
self.hidden_layers[0]].reshape(
self.hidden_layers[0],
self.state_dimensionality))
current_index += self.state_dimensionality * self.hidden_layers[0]
for i in range(0, len(self.hidden_layers) - 1):
new_matrices.append(
vectorized_parameters[current_index:current_index +
self.hidden_layers[i] *
self.hidden_layers[i + 1]].reshape(
self.hidden_layers[i + 1],
self.hidden_layers[i]))
current_index += self.hidden_layers[i] * self.hidden_layers[i + 1]
new_matrices.append(
vectorized_parameters[current_index:current_index +
self.hidden_layers[len(self.hidden_layers) - 1] *
self.action_dimensionality].reshape(
self.action_dimensionality,
self.hidden_layers[len(self.hidden_layers) -
1]))
current_index += self.hidden_layers[len(self.hidden_layers) -
1] * self.action_dimensionality
new_biases = []
for i in range(len(self.hidden_layers)):
new_biases.append(vectorized_parameters[current_index:current_index +
self.hidden_layers[i]].reshape(
self.hidden_layers[i], 1))
current_index += self.hidden_layers[i]
self.matrices = new_matrices
self.biases = new_biases
def get_action(self, state):
state = np.reshape(state, (len(state), 1))
for i in range(len(self.matrices) - 1):
state = np.matmul(self.matrices[i], state)
state = np.add(state, self.biases[i])
state = (self.nonlinearities[i])(state)
action = np.matmul(self.matrices[len(self.matrices) - 1], state)
if self.low is not None and self.high is not None:
action = np.tanh(action)
for i in range(len(action)):
action[i][0] = (
action[i][0] * (self.high[i] - self.low[i]) / 2.0 +
(self.low[i] + self.high[i]) / 2.0)
return action
def get_initial(self):
# The initial policy is given by weights and biases taken independently at
# random from the Gaussian distribution.
np.random.seed(100)
vectorized_list = []
for i in range(len(self.matrices)):
next_matrix = 1.0 / math.sqrt(float(len(
self.matrices[i]))) * np.random.randn(
len(self.matrices[i]) * len(self.matrices[i][0]))
vectorized_list.append(next_matrix)
for i in range(len(self.biases)):
next_biases_vector = np.random.randn(len(self.biases[i]))
vectorized_list.append(next_biases_vector)
vectorized_network = np.concatenate(vectorized_list)
return vectorized_network
def get_total_num_parameters(self):
total = 0
for i in range(len(self.matrices)):
total += len(self.matrices[i]) * len(self.matrices[i][0])
for i in range(len(self.biases)):
total += len(self.biases[i])
return total
class TwoLayerTanhToeplitzNNP(Policy):
"""Derives from Policy and encodes a policy using Toeplitz neural network.
This class encodes agent's policy as a structured neural network fed with
the state of an agent and outputting recommended action. The neural network
has two hidden layers, each followed by tanh nonlinearity. All the matrices
of connections are constrained to be Toeplitz matrices. This policy also
supports state normalization. If the state_normalization flag is on,
the policy keeps track of the necessary state normalization parameters.
First it has a field self.global_num_steps that allows to store the number of
global steps taken so far and used in the computation of the state mean
and state covariances. It will also have a fields self.state_mean and
self.state_covariance that allow to store the state mean and the state
covariance respectively. When state_normalization = True, there are two
main changes:
1) The policy evaluation changes. Specifically, if the state mean is mu,
the state covariance is cov, and the neural network computes function f, the
policy takes the form pi: S -> A. Where p(s) = f( diag(cov)^{-1/2} (s-mu)).
Where diag(cov)^{-1/2} stands for the diagonal of the state covariance
raised to minus 1/2.
2) Storing and reading a vectorized policy includes parameters encoding
the global number of steps, state mean and state covariance. The vectorized
parameters vector takes the form:
[global_num_steps, state_mean, vectorized(state_covariance), nn_params]
where vectorized(state_covariance) is a state_dim**2 vector made of a
vectorized version of the state covariance matrix. When the
state_normalization flag is on, all methods including init,
get_action, get_initial, update work under this underlying assumption.
"""
def __init__(self,
state_dimensionality,
action_dimensionality,
first_hidden_size,
second_hidden_size,
low=None,
high=None,
state_normalization=False):
"""Sets up parameters of the unstructured neural network.
Args:
state_dimensionality: dimensionality of the first layer
action_dimensionality: dimensionality of the last layer
first_hidden_size: size of the first hidden layer
second_hidden_size: size of the second hidden layer
low: array of lower bounds for actions' dimensions
high: array for upper bounds for actions' dimensions
state_normalization: determines if state normalization is used or not
"""
first_threshold = state_dimensionality + first_hidden_size - 1
second_threshold = first_threshold + first_hidden_size + second_hidden_size
second_threshold -= 1
third_threshold = second_threshold + second_hidden_size
third_threshold += action_dimensionality - 1
fourth_threshold = third_threshold + first_hidden_size
fifth_threshold = fourth_threshold + second_hidden_size
nb_parameters = (state_dimensionality + first_hidden_size -
1) + (first_hidden_size + second_hidden_size -
1) + (second_hidden_size + action_dimensionality -
1) + first_hidden_size + second_hidden_size
vectorized_parameters = np.zeros(nb_parameters)
first_column = vectorized_parameters[0:first_hidden_size]
first_row = vectorized_parameters[first_hidden_size - 1:first_threshold]
first_matrix = scipy.linalg.toeplitz(first_column, first_row)
second_column = vectorized_parameters[first_threshold:first_threshold +
second_hidden_size]
second_row = vectorized_parameters[first_threshold + second_hidden_size -
1:second_threshold]
second_matrix = scipy.linalg.toeplitz(second_column, second_row)
third_column = vectorized_parameters[second_threshold:second_threshold +
action_dimensionality]
third_row = vectorized_parameters[second_threshold + action_dimensionality -
1:third_threshold]
third_matrix = scipy.linalg.toeplitz(third_column, third_row)
first_biases = vectorized_parameters[
third_threshold:fourth_threshold].reshape((first_hidden_size, 1))
second_biases = vectorized_parameters[
fourth_threshold:fifth_threshold].reshape((second_hidden_size, 1))
self.matrices = [first_matrix, second_matrix, third_matrix]
self.biases = [first_biases, second_biases]
self.state_dimensionality = state_dimensionality
self.action_dimensionality = action_dimensionality
self.first_hidden_size = first_hidden_size
self.second_hidden_size = second_hidden_size
self.first_threshold = first_threshold
self.second_threshold = second_threshold
self.third_threshold = third_threshold
self.fourth_threshold = fourth_threshold
self.fifth_threshold = fifth_threshold
self.state_normalization = state_normalization
if state_normalization:
self.global_num_steps = 0
self.state_mean = np.zeros(self.state_dimensionality)
self.state_covariance = np.zeros(
(self.state_dimensionality, self.state_dimensionality))
def tanh(x):
critical_bareer = 20.0
if x > critical_bareer:
return 1.0
if x < -critical_bareer:
return -1.0
return 2.0 / (1.0 + math.exp(0.0 - 2.0 * x)) - 1.0
def nonlinearity(state):
for i in range(len(state)):
state[i][0] = tanh(state[i][0])
return state
self.nonlinearity = nonlinearity
self.low = low
self.high = high
super().__init__()
def update(self, vectorized_parameters):
if self.state_normalization:
self.global_num_steps = vectorized_parameters[0]
self.state_mean = vectorized_parameters[1:self.state_dimensionality + 1]
cov_size = self.state_dimensionality**2
cov_dims = (self.state_dimensionality, self.state_dimensionality)
self.state_covariance = vectorized_parameters[self.state_dimensionality +
1:cov_size +
self.state_dimensionality +
1]
self.state_covariance = np.reshape(self.state_covariance, cov_dims)
vectorized_parameters = vectorized_parameters[1 + cov_size +
self.state_dimensionality:]
first_column = vectorized_parameters[0:self.first_hidden_size]
first_row = vectorized_parameters[self.first_hidden_size -
1:self.first_threshold]
first_matrix = scipy.linalg.toeplitz(first_column, first_row)
second_column = vectorized_parameters[self.
first_threshold:self.first_threshold +
self.second_hidden_size]
second_row = vectorized_parameters[self.first_threshold +
self.second_hidden_size -
1:self.second_threshold]
second_matrix = scipy.linalg.toeplitz(second_column, second_row)
third_column = vectorized_parameters[self.second_threshold:self
.second_threshold +
self.action_dimensionality]
third_row = vectorized_parameters[self.second_threshold +
self.action_dimensionality -
1:self.third_threshold]
third_matrix = scipy.linalg.toeplitz(third_column, third_row)
first_biases = vectorized_parameters[self.third_threshold:self
.fourth_threshold].reshape(
(self.first_hidden_size, 1))
second_biases = vectorized_parameters[self.fourth_threshold:self
.fifth_threshold].reshape(
(self.second_hidden_size, 1))
self.matrices = [first_matrix, second_matrix, third_matrix]
self.biases = [first_biases, second_biases]
def get_action(self, state):
if self.state_normalization:
centered_state = state - self.state_mean
squareroot_covariance = np.diag(self.state_covariance)
squareroot_covariance = np.sqrt(squareroot_covariance)
big_vl = np.power(10.0, 11)
cov_mask = (squareroot_covariance < np.power(10.0, -8)) * big_vl
squareroot_covariance = cov_mask + squareroot_covariance
inverse_squareroot_covariance = 1.0 / squareroot_covariance
state = inverse_squareroot_covariance * centered_state
state = np.reshape(state, (len(state), 1))
state = np.matmul(self.matrices[0], state)
state = np.add(state, self.biases[0])
state = (self.nonlinearity)(state)
state = np.matmul(self.matrices[1], state)
state = np.add(state, self.biases[1])
state = (self.nonlinearity)(state)
action = np.matmul(self.matrices[2], state)
if self.low is not None and self.high is not None:
action = np.tanh(action)
for i in range(len(action)):
action[i][0] = (
action[i][0] * (self.high[i] - self.low[i]) / 2.0 +
(self.low[i] + self.high[i]) / 2.0)
return action
def get_initial(self):
# The initial policy is given by weights and biases taken independently at
# random from the Gaussian distribution.
np.random.seed(100)
vec_first_biases = np.random.randn(self.first_hidden_size)
vec_second_biases = np.random.randn(self.second_hidden_size)
vec_first_vector = 1.0 / math.sqrt(float(
self.first_hidden_size)) * np.random.randn(self.first_hidden_size +
self.state_dimensionality -
1)
vec_second_vector = 1.0 / math.sqrt(float(
self.second_hidden_size)) * np.random.randn(self.second_hidden_size +
self.first_hidden_size - 1)
vec_third_vector = 1.0 / math.sqrt(float(
self.action_dimensionality)) * np.random.randn(
self.action_dimensionality + self.second_hidden_size - 1)
vectorized_network = np.concatenate([
vec_first_vector, vec_second_vector, vec_third_vector, vec_first_biases,
vec_second_biases
])
if self.state_normalization:
num_state_normalization_parameters = 1 + self.state_dimensionality
num_state_normalization_parameters += self.state_dimensionality**2
vectorized_network = np.concatenate(
[np.zeros(num_state_normalization_parameters), vectorized_network])
return vectorized_network
def get_total_num_parameters(self):
total = (self.state_dimensionality + self.first_hidden_size -
1) + (self.first_hidden_size + self.second_hidden_size -
1) + (self.second_hidden_size + self.action_dimensionality -
1) + self.first_hidden_size + self.second_hidden_size
if self.state_normalization:
total += self.state_dimensionality + self.state_dimensionality**2 + 1
return total
def core_convolve(long_vector, short_vector, jump):
index = 0
final = []
long_l = len(long_vector)
short_l = len(short_vector)
while index + short_l <= long_l:
final.append(np.sum(long_vector[index:(index + short_l)] * short_vector))
index += jump
return np.array(final)
def convolve(list_of_vectors, weights, stride, biases, nonlinearity):
"""Convolves the batch of vectors with weight matrix.
Applies a convolutional layer by convolving the batch of vectors with
weight matrix. The convolutional is characterized by stride-scalar, bias
vector and nonlinear mapping applied at the end.
Args:
list_of_vectors:
weights: weight matrix
stride: stride-scalar defining the convolution
biases: vector of bias-terms
nonlinearity: nonlinear mapping applied at the end of the convolution
Returns:
Convolved batch of vectors.
"""
final = []
for i in range(len(weights)):
conv_res_local = None
for j in range(len(weights[i])):
c = core_convolve(list_of_vectors[j], weights[i][j], stride)
if conv_res_local is None:
conv_res_local = c
else:
conv_res_local += c
conv_res_local += biases[i] * np.ones(len(conv_res_local))
r = nonlinearity(np.array(conv_res_local))
final.append(r)
return np.array(final)
class Conv1DPolicy(Policy):
"""Derives from Policy and encodes a convolutional policy.
Convolutional policy that applies to the input state a series of 1d
convolutions followed by the fully connected layer. This policy uses two
element-wise nonlinearities: the first one is applied at the end of every
convolutional layer. The second one is applied in the fully connected
feedforward neural network.
"""
def __init__(self,
state_dimensionality,
action_dimensionality,
filter_sizes,
strides,
feature_detectors_sizes,
nonlinearity,
second_nonlinearity,
nb_input_channels=3):
self.state_dimensionality = state_dimensionality
self.action_dimensionality = action_dimensionality
self.filter_sizes = filter_sizes
self.strides = strides
self.feature_detectors_sizes = feature_detectors_sizes
self.nb_input_channels = nb_input_channels
self.biases = []
self.weights = []
for _ in range(len(filter_sizes)):
(self.biases).append([])
(self.weights).append([])
self.nonlinearity = nonlinearity
self.column = None
self.row = None
self.second_biases = None
self.second_nonlinearity = second_nonlinearity
self.final_s = self.state_dimensionality / self.nb_input_channels
for i in range(len(self.filter_sizes)):
jump = self.strides[i]
d_init = self.final_s
count = 0
index = 0
while index + self.filter_sizes[i] <= d_init:
count += 1
index += jump
self.final_s = count
super().__init__()
def update(self, vectorized_parameters):
self.biases = []
self.weights = []
for _ in range(len(self.filter_sizes)):
(self.biases).append([])
(self.weights).append([])
index = 0
for i in range(self.feature_detectors_sizes[0]):
size = self.filter_sizes[0] * self.nb_input_channels
(self.weights[0]).append(
vectorized_parameters[index:index + size].reshape(
self.nb_input_channels, self.filter_sizes[0]))
index += size
size = 1
(self.biases[0]).append(vectorized_parameters[index:index + size])
index += size
for i in range(1, len(self.filter_sizes)):
for _ in range(self.feature_detectors_sizes[i]):
size = self.filter_sizes[i] * self.feature_detectors_sizes[i - 1]
(self.weights[i]).append(
vectorized_parameters[index:index + size].reshape(
self.feature_detectors_sizes[i - 1], self.filter_sizes[i]))
index += size
size = 1
(self.biases[i]).append(vectorized_parameters[index:index + size])
index += size
size1 = self.final_s * self.feature_detectors_sizes[
len(self.feature_detectors_sizes) - 1]
size2 = self.action_dimensionality
self.row = np.array(vectorized_parameters[index:index + size1])
self.column = np.array(
vectorized_parameters[(index + size1 - 1):(index + size1 + size2 - 1)])
index += size1 + size2 - 1
self.second_biases = vectorized_parameters[index:]
def get_action(self, state):
channels = np.transpose(
np.reshape(state, (self.final_s, self.nb_input_channels)))
for i in range(len(self.filter_sizes)):
channels = convolve(channels, self.weights[i], self.strides[i],
self.biases[i], self.nonlinearity)
action = self.second_nonlinearity(
np.matmul(
scipy.linalg.toeplitz(self.column, self.row),
channels.reshape((len(self.row), 1))) +
(self.second_biases).reshape((len(self.column), 1)))
return action
def get_initial(self):
init_convo = []
num_unstructured = self.final_s * self.feature_detectors_sizes[len(
self.feature_detectors_sizes) - 1] + 2 * self.action_dimensionality - 1
num_unstructured_weights = self.final_s * self.feature_detectors_sizes[
len(self.feature_detectors_sizes) - 1] + self.action_dimensionality - 1
num_unstructured_biases = num_unstructured - num_unstructured_weights
num_convo = (self.filter_sizes[0] * self.nb_input_channels +
1) * self.feature_detectors_sizes[0]
for i in range(self.feature_detectors_sizes[0]):
counter = self.filter_sizes[0] * self.nb_input_channels
init_convo += (1.0 / math.sqrt(float(counter)) *
np.random.randn(counter)).tolist()
init_convo += (np.random.randn(1)).tolist()
for i in range(1, len(self.filter_sizes)):
for _ in range(self.feature_detectors_sizes[i]):
counter = self.filter_sizes[i] * self.feature_detectors_sizes[i - 1]
init_convo += (1.0 / math.sqrt(float(counter)) *
np.random.randn(counter)).tolist()
init_convo += (np.random.randn(1)).tolist()
num_convo += self.filter_sizes[i] * self.feature_detectors_sizes[
i] * self.feature_detectors_sizes[i -
1] + self.feature_detectors_sizes[i]
random_sequence = 2.0 * (np.random.rand(num_unstructured_weights) - 0.5)
init_part_2 = (1.0 / math.sqrt(
float(self.final_s * self.feature_detectors_sizes[
len(self.feature_detectors_sizes) - 1]))) * random_sequence
init_part_3 = np.random.randn(num_unstructured_biases)
return np.array(init_convo + init_part_2.tolist() + init_part_3.tolist())
def get_total_num_parameters(self):
num_unstructured = self.final_s * self.feature_detectors_sizes[len(
self.feature_detectors_sizes) - 1] + 2 * self.action_dimensionality - 1
num_convo = (self.filter_sizes[0] * self.nb_input_channels +
1) * self.feature_detectors_sizes[0]
for i in range(1, len(self.filter_sizes)):
num_convo += self.filter_sizes[i] * self.feature_detectors_sizes[
i] * self.feature_detectors_sizes[i -
1] + self.feature_detectors_sizes[i]
return num_unstructured + num_convo
class IdentityPolicy(Policy):
"""Derives from Policy and encodes identity policy.
Trivial identity policy that outputs as action the state vector. That policy
is not useful on its own but becomes very handy while designing hybrid
policies that split state vector into chunks processed independently by
different policies, concatenated together and finally, fed to ultimate policy
that produces an action.
"""
def update(self, vectorized_parameters):
pass
def get_action(self, state):
return state
def get_initial(self):
return np.array([])
def get_total_num_parameters(self):
return 0
class HybridPolicy(Policy):
"""Derives from Policy and encodes hybrid policy.
Hybrid policy that partitions input state vector into two sub-states. Those
two sub-states are independently processed by two different policies. They
outputs are being concatenated and given as an input state to the third
policy that produces final action.
"""
def __init__(self,
first_policy,
first_state_dim,
second_policy,
third_policy,
flattened=True,
renorm_nonlinearity=None,
size_of_first_state_part=None,
size_of_second_state_part=None):
self.first_policy = first_policy
self.first_state_dim = first_state_dim
self.second_policy = second_policy
self.third_policy = third_policy
self.nb_params_1 = first_policy.get_total_num_parameters()
self.nb_params_2 = second_policy.get_total_num_parameters()
self.nb_params_3 = third_policy.get_total_num_parameters()
self.total = self.nb_params_1 + self.nb_params_2 + self.nb_params_3
self.flattened = flattened
self.renorm_nonlinearity = renorm_nonlinearity
self.size_of_first_state_part = size_of_first_state_part
self.size_of_second_state_part = size_of_second_state_part
super().__init__()
def update(self, vectorized_parameters):
vectorized_parameters_1 = copy.copy(
vectorized_parameters[0:self.nb_params_1])
vectorized_parameters_2 = copy.copy(
vectorized_parameters[self.nb_params_1:(self.nb_params_1 +
self.nb_params_2)])
vectorized_parameters_3 = copy.copy(
vectorized_parameters[(self.nb_params_1 + self.nb_params_2):])
(self.first_policy).update(vectorized_parameters_1)
(self.second_policy).update(vectorized_parameters_2)
(self.third_policy).update(vectorized_parameters_3)
def get_action(self, state):
if not self.flattened:
state = np.array(
(state[0].reshape(self.size_of_first_state_part)).tolist() +
(state[1].reshape(self.size_of_second_state_part)).tolist())
state_1 = state[0:self.first_state_dim]
state_2 = state[self.first_state_dim:]
a1 = (self.first_policy).get_action(state_1)
a1 = a1.reshape(len(a1))
if self.renorm_nonlinearity is not None:
a1 = self.renorm_nonlinearity(a1)
a2 = (self.second_policy).get_action(state_2)
a2 = a2.reshape(len(a2))
if self.renorm_nonlinearity is not None:
a2 = self.renorm_nonlinearity(a2)
new_state = np.array(a1.tolist() + a2.tolist())
return (self.third_policy).get_action(new_state)
def get_initial(self):
init_1 = ((self.first_policy).get_initial()).tolist()
init_2 = ((self.second_policy).get_initial()).tolist()
init_3 = ((self.third_policy).get_initial()).tolist()
return np.array(init_1 + init_2 + init_3)
def get_total_num_parameters(self):
return self.total
|
google-research/google-research
|
es_optimization/policies.py
|
Python
|
apache-2.0
| 32,194
|
[
"Gaussian"
] |
249839298c3f6b89be3d63b5a2710200300b69bd7fdbccd718c92917617e716b
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class VolumePicker(vtk.test.Testing.vtkTest):
def testVolumePicker(self):
# volume render a medical data set
# renderer and interactor
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin)
# read the volume
v16 = vtk.vtkVolume16Reader()
v16.SetDataDimensions(64, 64)
v16.SetImageRange(1, 93)
v16.SetDataByteOrderToLittleEndian()
v16.SetFilePrefix(VTK_DATA_ROOT + "/Data/headsq/quarter")
v16.SetDataSpacing(3.2, 3.2, 1.5)
#---------------------------------------------------------
# set up the volume rendering
volumeMapper = vtk.vtkFixedPointVolumeRayCastMapper()
volumeMapper.SetInputConnection(v16.GetOutputPort())
volumeColor = vtk.vtkColorTransferFunction()
volumeColor.AddRGBPoint(0, 0.0, 0.0, 0.0)
volumeColor.AddRGBPoint(180, 0.3, 0.1, 0.2)
volumeColor.AddRGBPoint(1000, 1.0, 0.7, 0.6)
volumeColor.AddRGBPoint(2000, 1.0, 1.0, 0.9)
volumeScalarOpacity = vtk.vtkPiecewiseFunction()
volumeScalarOpacity.AddPoint(0, 0.0)
volumeScalarOpacity.AddPoint(180, 0.0)
volumeScalarOpacity.AddPoint(1000, 0.2)
volumeScalarOpacity.AddPoint(2000, 0.8)
volumeGradientOpacity = vtk.vtkPiecewiseFunction()
volumeGradientOpacity.AddPoint(0, 0.0)
volumeGradientOpacity.AddPoint(90, 0.5)
volumeGradientOpacity.AddPoint(100, 1.0)
volumeProperty = vtk.vtkVolumeProperty()
volumeProperty.SetColor(volumeColor)
volumeProperty.SetScalarOpacity(volumeScalarOpacity)
volumeProperty.SetGradientOpacity(volumeGradientOpacity)
volumeProperty.SetInterpolationTypeToLinear()
volumeProperty.ShadeOn()
volumeProperty.SetAmbient(0.6)
volumeProperty.SetDiffuse(0.6)
volumeProperty.SetSpecular(0.1)
volume = vtk.vtkVolume()
volume.SetMapper(volumeMapper)
volume.SetProperty(volumeProperty)
#---------------------------------------------------------
# Do the surface rendering
boneExtractor = vtk.vtkMarchingCubes()
boneExtractor.SetInputConnection(v16.GetOutputPort())
boneExtractor.SetValue(0, 1150)
boneNormals = vtk.vtkPolyDataNormals()
boneNormals.SetInputConnection(boneExtractor.GetOutputPort())
boneNormals.SetFeatureAngle(60.0)
boneStripper = vtk.vtkStripper()
boneStripper.SetInputConnection(boneNormals.GetOutputPort())
boneMapper = vtk.vtkPolyDataMapper()
boneMapper.SetInputConnection(boneStripper.GetOutputPort())
boneMapper.ScalarVisibilityOff()
boneProperty = vtk.vtkProperty()
boneProperty.SetColor(1.0, 1.0, 0.9)
bone = vtk.vtkActor()
bone.SetMapper(boneMapper)
bone.SetProperty(boneProperty)
#---------------------------------------------------------
# Create an image actor
table = vtk.vtkLookupTable()
table.SetRange(0, 2000)
table.SetRampToLinear()
table.SetValueRange(0, 1)
table.SetHueRange(0, 0)
table.SetSaturationRange(0, 0)
mapToColors = vtk.vtkImageMapToColors()
mapToColors.SetInputConnection(v16.GetOutputPort())
mapToColors.SetLookupTable(table)
imageActor = vtk.vtkImageActor()
imageActor.GetMapper().SetInputConnection(mapToColors.GetOutputPort())
imageActor.SetDisplayExtent(32, 32, 0, 63, 0, 92)
#---------------------------------------------------------
# make a transform and some clipping planes
transform = vtk.vtkTransform()
transform.RotateWXYZ(-20, 0.0, -0.7, 0.7)
volume.SetUserTransform(transform)
bone.SetUserTransform(transform)
imageActor.SetUserTransform(transform)
c = volume.GetCenter()
volumeClip = vtk.vtkPlane()
volumeClip.SetNormal(0, 1, 0)
volumeClip.SetOrigin(c)
boneClip = vtk.vtkPlane()
boneClip.SetNormal(0, 0, 1)
boneClip.SetOrigin(c)
volumeMapper.AddClippingPlane(volumeClip)
boneMapper.AddClippingPlane(boneClip)
#---------------------------------------------------------
ren.AddViewProp(volume)
ren.AddViewProp(bone)
ren.AddViewProp(imageActor)
camera = ren.GetActiveCamera()
camera.SetFocalPoint(c)
camera.SetPosition(c[0] + 500, c[1] - 100, c[2] - 100)
camera.SetViewUp(0, 0, -1)
ren.ResetCameraClippingRange()
renWin.Render()
#---------------------------------------------------------
# the cone should point along the Z axis
coneSource = vtk.vtkConeSource()
coneSource.CappingOn()
coneSource.SetHeight(12)
coneSource.SetRadius(5)
coneSource.SetResolution(31)
coneSource.SetCenter(6, 0, 0)
coneSource.SetDirection(-1, 0, 0)
#---------------------------------------------------------
picker = vtk.vtkVolumePicker()
picker.SetTolerance(1.0e-6)
picker.SetVolumeOpacityIsovalue(0.01)
# This should usually be left alone, but is used here to increase coverage
picker.UseVolumeGradientOpacityOn()
# A function to point an actor along a vector
def PointCone(actor, n):
if n[0] < 0.0:
actor.RotateWXYZ(180, 0, 1, 0)
actor.RotateWXYZ(180, (n[0] - 1.0) * 0.5, n[1] * 0.5, n[2] * 0.5)
else:
actor.RotateWXYZ(180, (n[0] + 1.0) * 0.5, n[1] * 0.5, n[2] * 0.5)
# Pick the actor
picker.Pick(192, 103, 0, ren)
#print picker
p = picker.GetPickPosition()
n = picker.GetPickNormal()
coneActor1 = vtk.vtkActor()
coneActor1.PickableOff()
coneMapper1 = vtk.vtkDataSetMapper()
coneMapper1.SetInputConnection(coneSource.GetOutputPort())
coneActor1.SetMapper(coneMapper1)
coneActor1.GetProperty().SetColor(1, 0, 0)
coneActor1.SetPosition(p)
PointCone(coneActor1, n)
ren.AddViewProp(coneActor1)
# Pick the volume
picker.Pick(90, 180, 0, ren)
p = picker.GetPickPosition()
n = picker.GetPickNormal()
coneActor2 = vtk.vtkActor()
coneActor2.PickableOff()
coneMapper2 = vtk.vtkDataSetMapper()
coneMapper2.SetInputConnection(coneSource.GetOutputPort())
coneActor2.SetMapper(coneMapper2)
coneActor2.GetProperty().SetColor(1, 0, 0)
coneActor2.SetPosition(p)
PointCone(coneActor2, n)
ren.AddViewProp(coneActor2)
# Pick the image
picker.Pick(200, 200, 0, ren)
p = picker.GetPickPosition()
n = picker.GetPickNormal()
coneActor3 = vtk.vtkActor()
coneActor3.PickableOff()
coneMapper3 = vtk.vtkDataSetMapper()
coneMapper3.SetInputConnection(coneSource.GetOutputPort())
coneActor3.SetMapper(coneMapper3)
coneActor3.GetProperty().SetColor(1, 0, 0)
coneActor3.SetPosition(p)
PointCone(coneActor3, n)
ren.AddViewProp(coneActor3)
# Pick a clipping plane
picker.PickClippingPlanesOn()
picker.Pick(145, 160, 0, ren)
p = picker.GetPickPosition()
n = picker.GetPickNormal()
coneActor4 = vtk.vtkActor()
coneActor4.PickableOff()
coneMapper4 = vtk.vtkDataSetMapper()
coneMapper4.SetInputConnection(coneSource.GetOutputPort())
coneActor4.SetMapper(coneMapper4)
coneActor4.GetProperty().SetColor(1, 0, 0)
coneActor4.SetPosition(p)
PointCone(coneActor4, n)
ren.AddViewProp(coneActor4)
ren.ResetCameraClippingRange()
# render and interact with data
renWin.Render()
img_file = "VolumePicker.png"
vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(VolumePicker, 'test')])
|
sumedhasingla/VTK
|
Rendering/Volume/Testing/Python/VolumePicker.py
|
Python
|
bsd-3-clause
| 9,002
|
[
"VTK"
] |
9a002fd5cbce8229d2956b8245deb906a3821baf50fe7751285e557f6fb21d1a
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.