text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Pedro Rodrigues - medecau.com - medecau@gmail.com
from BeautifulSoup import BeautifulSoup
from BeautifulSoup import BeautifulStoneSoup
from csv import writer
from urllib import urlopen
'''
More info:
http://www.pinkblue.com/nomes/desc.asp?id=449
http://babynamesworld.parentsconnect.com/profile.php?name=Pedro
http://www.babynames.com/Names/search.php?searchby=byname&searchterm=Pedro
'''
def get_page(url):
return urlopen(urlopen).read()
base_path='http://ferrao.org/onomastica/'
csv_writer=writer(open('nome_genero.csv', 'w'))
print 'Reading initial page...'
page=get_page(base_path)
soup = BeautifulSoup(page)
alphabet_div=soup.body.form.div.table.contents[1].td.div
alphabet_pages={}
print 'Generating list of pages to visit...'
for each_page in alphabet_div:
try:
alphabet_pages[each_page.a.string]=each_page.a['href']
except:
pass
print 'Iterating over pages...'
for letter, page in alphabet_pages.iteritems():
print 'Page %s for %s' % (page, letter)
page=get_page(base_path+page)
soup = BeautifulSoup(page)
names_tr=soup.html.body.form.contents[6].tr
for each_td in names_tr:
for each in each_td:
new_row=[]
try:
if each.name=='a':
new_row.append(BeautifulStoneSoup(each.string, convertEntities=BeautifulStoneSoup.HTML_ENTITIES))
if each['style'].find('#ff6790')>-1:
new_row.append('F')
elif each['style'].find('#0097ff')>-1:
new_row.append('M')
else:
new_row.append('')
csv_writer.writerow(new_row)
except:
pass
|
transparenciahackday/scraper-deputados
|
onomastica.py
|
Python
|
mit
| 1,790
|
[
"VisIt"
] |
c944494f2943a88bdbc2c2f0157d6d88cc32f00c3cdf543135a81911256dbe4a
|
"""Kernel Tuner interface module
This module contains the main functions that Kernel Tuner
offers to its users.
Author
------
Ben van Werkhoven <b.vanwerkhoven@esciencenter.nl>
Copyright and License
---------------------
* Copyright 2016 Netherlands eScience Center
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import json
import os.path
from collections import OrderedDict
import importlib
from datetime import datetime
import logging
import sys
import numpy
import kernel_tuner.util as util
import kernel_tuner.core as core
from kernel_tuner.runners.sequential import SequentialRunner
from kernel_tuner.runners.simulation import SimulationRunner
try:
import torch
except ImportError:
torch = util.TorchPlaceHolder()
from kernel_tuner.strategies import brute_force, random_sample, diff_evo, minimize, basinhopping, genetic_algorithm, mls, pso, simulated_annealing, firefly_algorithm, bayes_opt, greedy_mls, greedy_ils, ordered_greedy_mls, dual_annealing
strategy_map = {
"brute_force": brute_force,
"random_sample": random_sample,
"minimize": minimize,
"basinhopping": basinhopping,
"diff_evo": diff_evo,
"genetic_algorithm": genetic_algorithm,
"greedy_mls": greedy_mls,
"ordered_greedy_mls": ordered_greedy_mls,
"greedy_ils": greedy_ils,
"dual_annealing": dual_annealing,
"mls": mls,
"pso": pso,
"simulated_annealing": simulated_annealing,
"firefly_algorithm": firefly_algorithm,
"bayes_opt": bayes_opt,
}
class Options(OrderedDict):
"""read-only class for passing options around"""
def __getattr__(self, name):
if not name.startswith('_'):
return self[name]
return super(Options, self).__getattr__(name)
def __deepcopy__(self, _):
return self
_kernel_options = Options([("kernel_name", ("""The name of the kernel in the code.""", "string")),
("kernel_source", ("""The CUDA, OpenCL, or C kernel code.
It is allowed for the code to be passed as a string, a filename, a function
that returns a string of code, or a list when the code needs auxilliary files.
To support combined host and device code tuning, a list of
filenames can be passed. The first file in the list should be the
file that contains the host code. The host code is assumed to
include or read in any of the files in the list beyond the first.
The tunable parameters can be used within all files.
Another alternative is to pass a code generating function.
The purpose of this is to support the use of code generating
functions that generate the kernel code based on the specific
parameters. This function should take one positional argument,
which will be used to pass a dict containing the parameters.
The function should return a string with the source code for
the kernel.""", "string or list and/or callable")),
("lang", ("""Specifies the language used for GPU kernels. The kernel_tuner
automatically detects the language, but if it fails, you may specify
the language using this argument, currently supported: "CUDA", "Cupy",
"OpenCL", or "C".""", "string")),
("problem_size", ("""The size of the domain from which the grid dimensions
of the kernel are computed.
This can be specified using an int, string, function, or
1,2,3-dimensional tuple.
In general, do not divide the problem_size yourself by the thread block sizes.
Kernel Tuner does this for you based on tunable parameters,
called "block_size_x", "block_size_y", and "block_size_z".
If more or different parameters divide the grid dimensions use
grid_div_x/y/z options to specify this.
In most use-cases the problem_size is specified using a single integer
or a tuple of integers,
but Kernel Tuner supports more advanced use cases where the problem_size
itself depends on the tunable parameters in some way.
You are allowed to use a function or string to specify the problem_size.
A function should accept a dictionary with the tunable parameters
for this kernel configuration and directly return a tuple
that specifies the problem size in all dimensions.
When passing a string, you are allowed to write Python
arithmetic and use the names of tunable parameters as variables
in these expressions. Kernel Tuner will replace instances of the tunable
parameters with their current value when computing the grid dimensions.
This option exists for convenience, but do note that using a lambda
function is probably safer. The string notation should only return
the problem size for one dimension, but can be used inside
a tuple, possibly in combination with integers or more strings in
different dimensions.
See the reduction CUDA example for an example use of this feature.""", "callable, string, int, or tuple(int or string, ..)")),
("arguments", ("""A list of kernel arguments, use numpy arrays for
arrays, use numpy.int32 or numpy.float32 for scalars.""", "list")),
("grid_div_x", ("""A list of names of the parameters whose values divide
the grid dimensions in the x-direction.
The product of all grid divisor expressions is computed before dividing
the problem_size in that dimension. Also note that the divison is treated
as a float divison and resulting grid dimensions will be rounded up to
the nearest integer number.
Arithmetic expressions can be
used if necessary inside the string containing a parameter name. For
example, in some cases you may want to divide the problem size in the
x-dimension with the number of warps rather than the number of threads
in a block, in such cases one could for example use ["block_size_x/32"].
Another option is to pass a function to grid_div_x that accepts a
dictionary with the tunable parameters and returns the grid divisor
in this dimension, for example: grid_div_x=lambda p:p["block_size_x"]/32.
If not supplied, ["block_size_x"] will be used by default, if you do not
want any grid x-dimension divisors pass an empty list.""", "callable or list")),
("grid_div_y", ("""A list of names of the parameters whose values divide
the grid dimensions in the y-direction, ["block_size_y"] by default.
If you do not want to divide the problem_size, you should pass an empty list.
See grid_div_x for more details.""", "list")),
("grid_div_z", ("""A list of names of the parameters whose values divide
the grid dimensions in the z-direction, ["block_size_z"] by default.
If you do not want to divide the problem_size, you should pass an empty list.
See grid_div_x for more details.""", "list")),
("smem_args", ("""CUDA-specific feature for specifying shared memory options
to the kernel. At the moment only 'size' is supported, but setting the
shared memory configuration on Kepler GPUs for example could be added
in the future. Size should denote the number of bytes for to use when
dynamically allocating shared memory.""", "dict(string: numpy object)")),
("cmem_args", ("""CUDA-specific feature for specifying constant memory
arguments to the kernel. In OpenCL these are handled as normal
kernel arguments, but in CUDA you can copy to a symbol. The way you
specify constant memory arguments is by passing a dictionary with
strings containing the constant memory symbol name together with numpy
objects in the same way as normal kernel arguments.""", "dict(string: numpy object)")),
("texmem_args", ("""CUDA-specific feature for specifying texture memory
arguments to the kernel. You specify texture memory arguments by passing a
dictionary with strings containing the texture reference name together with
the texture contents. These contents can be either simply a numpy object,
or a dictionary containing the numpy object under the key 'array' plus the
configuration options 'filter_mode' ('point' or 'linear), 'address_mode'
(a list of 'border', 'clamp', 'mirror', 'wrap' per axis),
'normalized_coordinates' (True/False).""", "dict(string: numpy object or dict)")),
("block_size_names", ("""A list of strings that replace the defaults for the names
that denote the thread block dimensions. If not passed, the behavior
defaults to ``["block_size_x", "block_size_y", "block_size_z"]``""", "list(string)"))])
_tuning_options = Options([("tune_params", ("""A dictionary containing the parameter names as keys,
and lists of possible parameter settings as values.
Kernel Tuner will try to compile and benchmark all possible
combinations of all possible values for all tuning parameters.
This typically results in a rather large search space of all
possible kernel configurations.
For each kernel configuration, each tuning parameter is
replaced at compile-time with its current value.
Currently, Kernel Tuner uses the convention that the following
list of tuning parameters are used as thread block dimensions:
* "block_size_x" thread block (work group) x-dimension
* "block_size_y" thread block (work group) y-dimension
* "block_size_z" thread block (work group) z-dimension
Options for changing these defaults may be added later. If you
don't want the thread block dimensions to be compiled in, you
may use the built-in variables blockDim.xyz in CUDA or the
built-in function get_local_size() in OpenCL instead.""", "dict( string : [...]")),
("restrictions", ("""An option to limit the search space with restrictions.
The restrictions can be specified using a function or a list of strings.
The function should take one argument, namely a dictionary with the
tunable parameters of the kernel configuration, if the function returns
True the configuration is considered to be part of the search space, or
False otherwise.
The other way to specify restrictions is using a list of strings
containing boolean expression that must be satisfied by the kernel
configuration. These expressions must all be true for the configuration
to be part of the search space. For example:
restrictions=["block_size_x==block_size_y*tile_size_y"] limits the
search to configurations where the block_size_x equals the product
of block_size_y and tile_size_y.
The default is None.""", "callable or list(strings)")),
("answer", ("""A list of arguments, similar to what you pass to arguments,
that contains the expected output of the kernel after it has executed
and contains None for each argument that is input-only. The expected
output of the kernel will then be used to verify the correctness of
each kernel in the parameter space before it will be benchmarked.""", "list")),
("atol", ("""The maximum allowed absolute difference between two elements
in the output and the reference answer, as passed to numpy.allclose().
Ignored if you have not passed a reference answer. Default value is
1e-6, that is 0.000001.""", "float")),
("verify", ("""Python function used for output verification. By default,
numpy.allclose is used for output verification, if this does not suit
your application, you can pass a different function here.
The function is expected to have two positional arguments. The first
is the reference result, the second is the output computed by the
kernel being verified. The types of these arguments depends on the
type of the output arguments you are verifying. The function may also
have an optional argument named atol, to which the value will be
passed that was specified using the atol option to tune_kernel.
The function should return True when the output passes the test, and
False when the output fails the test.""", "func(ref, ans, atol=None)")),
("strategy", ("""Specify the strategy to use for searching through the
parameter space, choose from:
* "basinhopping" Basin Hopping
* "bayes_opt" Bayesian Optimization
* "brute_force" (default) iterates through the entire search space
* "minimize" uses a local minimization algorithm
* "dual annealing" dual annealing
* "diff_evo" differential evolution
* "firefly_algorithm" firefly algorithm strategy
* "genetic_algorithm" a genetic algorithm optimization
* "greedy_ils" greedy randomized iterative local search
* "greedy_mls" greedy randomized multi-start local search
* "mls" best-improvement multi-start local search
* "ordered_greedy_mls" multi-start local search that uses a fixed order
* "pso" particle swarm optimization
* "random_sample" takes a random sample of the search space
* "simulated_annealing" simulated annealing strategy
Strategy-specific parameters and options are explained under strategy_options.
""", "")),
("strategy_options", ("""A dict with options specific to the selected tuning strategy.
* **"basinhopping"**
* "method", string, any of "Nelder-Mead", "Powell", "CG", "BFGS", "L-BFGS-B", "TNC", "COBYLA", or "SLSQP", default "L-BFGS-B".
* "T", float, Temperature parameter for the accept or reject criterion, default 1.0.
* **"bayes_opt"**
* "covariancekernel", any of "constantrbf", "rbf", "matern32", "matern52", default "matern32".
* "covariancelengthscale", float, default 1.5.
* "method": any of "poi", "ei", "lcb", "lcb-srinivas", "multi", "multi-advanced", "multi-fast", default "multi-advanced".
* "samplingmethod" any of "random", "lhs", default "lhs".
* **"diff_evo"**
* "method", string, any of "best1bin", "best1exp", "rand1exp", "randtobest1exp", "best2exp", "rand2exp", "randtobest1bin", "best2bin", "rand2bin", "rand1bin", default "best1bin".
* **"firefly_algorithm"**
* "alpha", float, alpha parameter, default 0.2.
* "B0", float, B0 parameter, default 1.0.
* "gamma", float, gamma parameter, default 1.0.
* "maxiter", integer, number of generations, default 100.
* "popsize", integer, population size, default 20.
* **"genetic_algorithm"**
* "maxiter", integer, number of generations, default 50.
* "method", string, crossover method any of "single_point", "two_point", "uniform", "disruptive_uniform", default "uniform".
* "mutation_chance", integer, specifies the 1 in mutation_chance of a mutation, default 10.
* "max_fevals", integer, specifies the maximum allowed number of unique function evaluations, default 100.
* "popsize", integer, population size, default 20.
* **"greedy_ils"**
* "max_fevals", integer, specifies the maximum allowed number of unique function evaluations, default 100.
* "neighbor", string, either "Hamming" or "adjacent" specifies how to consider two configurations being neighbors, default Hamming.
* "no_improvement", integer, number of evaluations to exceed without improvement before restarting, default 50.
* "randomwalk", float, controls how aggressively to permute the configuration in case no improvement is found, default 0.3.
* "restart", bool, controls greedyness, i.e. whether to restart from a position as soon as an improvement is found, default True.
* **"greedy_mls"**
* "max_fevals", integer, specifies the maximum allowed number of unique function evaluations, default 100.
* "neighbor", string, either "Hamming" or "adjacent" specifies how to consider two configurations being neighbors, default Hamming.
* "restart", bool, controls greedyness, i.e. whether to restart from a position as soon as an improvement is found, default True.
* **"minimize"**
* "method", string, any of "Nelder-Mead", "Powell", "CG", "BFGS", "L-BFGS-B", "TNC", "COBYLA", or "SLSQP", default "L-BFGS-B".
* **"mls"**
* "max_fevals", integer, specifies the maximum allowed number of unique function evaluations, default 100.
* "neighbor", string, either "Hamming" or "adjacent" specifies how to consider two configurations being neighbors, default Hamming.
* **"ordered_greedy_mls"**
* "max_fevals", integer, specifies the maximum allowed number of unique function evaluations, default 100.
* "neighbor", string, either "Hamming" or "adjacent" specifies how to consider two configurations being neighbors, default Hamming.
* "order", list, list of integers that describes the order of dimensions in which to look for neighbors, default uses the tune_params specified order.
* **"pso"**
* "c1", float, cognitive constant, default 2.0.
* "c2", float, social constant, default 1.0.
* "popsize", integer, population size, default 20.
* "maxiter", integer, number of generations, default 100.
* "w", float, inertia constant, default 0.5.
* **"random_sample"**
* "fraction", float, fraction of the search space to cover in [0,1], default 0.1.
* **"simulated_annealing"**
* "alpha", float, alpha parameter, default 0.9.
* "maxiter", integer, number of iterations of possibly accepting neighboring points, default 20.
* "T", float, starting temperature parameter, default 1.0.
* "T_min", float, end temperature parameter, default 0.001.
""", "dict")),
("iterations", ("""The number of times a kernel should be executed and
its execution time measured when benchmarking a kernel, 7 by default.""", "int")),
("verbose", ("""Sets whether or not to report about configurations that
were skipped during the search. This could be due to several reasons:
* kernel configuration fails one or more restrictions
* too many threads per thread block
* too much shared memory used by the kernel
* too many resources requested for launch
verbose is False by default.""", "bool")),
("cache", ("""filename for caching/logging benchmarked instances
filename uses suffix ".json"
if the file exists it is read and tuning continues from this file
""", "string")), ("metrics", ("specifies user-defined metrics", "OrderedDict")),
("simulation_mode", ("Simulate an auto-tuning search from an existing cachefile", "bool")),
("observers", ("""A list of BenchmarkObservers""", "list"))])
_device_options = Options([("device", ("""CUDA/OpenCL device to use, in case you have multiple
CUDA-capable GPUs or OpenCL devices you may use this to select one,
0 by default. Ignored if you are tuning host code by passing
lang="C".""", "int")),
("platform", ("""OpenCL platform to use, in case you have multiple
OpenCL platforms you may use this to select one,
0 by default. Ignored if not using OpenCL. """, "int")),
("quiet", ("""Control whether or not to print to the console which
device is being used, False by default""", "boolean")),
("compiler", ("""A string containing your preferred compiler,
only effective with lang="C". """, "string")), ("compiler_options", ("""A list of strings that specify compiler
options.""", "list(string)"))])
def _get_docstring(opts):
docstr = ""
for k, v in opts.items():
docstr += " :param " + k + ": " + v[0] + "\n"
docstr += " :type " + k + ": " + v[1] + "\n\n"
return docstr
_tune_kernel_docstring = """ Tune a CUDA kernel given a set of tunable parameters
%s
:returns: A list of dictionaries of all executed kernel configurations and their
execution times. And a dictionary with information about the environment
in which the tuning took place. This records device name, properties,
version info, and so on.
:rtype: list(dict()), dict()
""" % _get_docstring(_kernel_options) + _get_docstring(_tuning_options) + _get_docstring(_device_options)
#"""
def tune_kernel(kernel_name, kernel_source, problem_size, arguments, tune_params, grid_div_x=None, grid_div_y=None, grid_div_z=None, restrictions=None,
answer=None, atol=1e-6, verify=None, verbose=False, lang=None, device=0, platform=0, smem_args=None, cmem_args=None, texmem_args=None,
compiler=None, compiler_options=None, log=None, iterations=7, block_size_names=None, quiet=False, strategy=None, strategy_options=None,
cache=None, metrics=None, simulation_mode=False, observers=None):
if log:
logging.basicConfig(filename=kernel_name + datetime.now().strftime('%Y%m%d-%H:%M:%S') + '.log', level=log)
kernelsource = core.KernelSource(kernel_name, kernel_source, lang)
_check_user_input(kernel_name, kernelsource, arguments, block_size_names)
# check for forbidden names in tune parameters
util.check_tune_params_list(tune_params)
# check whether block_size_names are used as expected
util.check_block_size_params_names_list(block_size_names, tune_params)
if iterations < 1:
raise ValueError("Iterations should be at least one!")
#sort all the options into separate dicts
opts = locals()
kernel_options = Options([(k, opts[k]) for k in _kernel_options.keys()])
tuning_options = Options([(k, opts[k]) for k in _tuning_options.keys()])
device_options = Options([(k, opts[k]) for k in _device_options.keys()])
tuning_options["snap"] = True
logging.debug('tune_kernel called')
logging.debug('kernel_options: %s', util.get_config_string(kernel_options))
logging.debug('tuning_options: %s', util.get_config_string(tuning_options))
logging.debug('device_options: %s', util.get_config_string(device_options))
if strategy:
if strategy in strategy_map:
strategy = strategy_map[strategy]
else:
raise ValueError("Strategy %s not recognized" % strategy)
#make strategy_options into an Options object
if tuning_options.strategy_options:
if not isinstance(strategy_options, Options):
tuning_options.strategy_options = Options(strategy_options)
#select strategy based on user options
if "fraction" in tuning_options.strategy_options and not tuning_options.strategy == 'random_sample':
raise ValueError('It is not possible to use fraction in combination with strategies other than "random_sample". ' \
'Please set strategy="random_sample", when using "fraction" in strategy_options')
#check if method is supported by the selected strategy
if "method" in tuning_options.strategy_options:
method = tuning_options.strategy_options.method
if not method in strategy.supported_methods:
raise ValueError('Method %s is not supported for strategy %s' % (method, tuning_options.strategy))
#if no strategy_options dict has been passed, create empty dictionary
else:
tuning_options.strategy_options = Options({})
#if no strategy selected
else:
strategy = brute_force
# select the runner for this job based on input
selected_runner = SimulationRunner if simulation_mode is True else SequentialRunner
with selected_runner(kernelsource, kernel_options, device_options, iterations, observers) as runner:
#the user-specified function may or may not have an optional atol argument;
#we normalize it so that it always accepts atol.
tuning_options.verify = util.normalize_verify_function(tuning_options.verify)
#process cache
if cache:
if cache[-5:] != ".json":
cache += ".json"
util.process_cache(cache, kernel_options, tuning_options, runner)
else:
tuning_options.cache = {}
tuning_options.cachefile = None
#call the strategy to execute the tuning process
results, env = strategy.tune(runner, kernel_options, device_options, tuning_options)
#finished iterating over search space
if not device_options.quiet:
if results: #checks if results is not empty
best_config = min(results, key=lambda x: x['time'])
units = getattr(runner, "units", None)
print("best performing configuration:")
util.print_config_output(tune_params, best_config, device_options.quiet, metrics, units)
else:
print("no results to report")
if cache:
util.close_cache(cache)
return results, env
tune_kernel.__doc__ = _tune_kernel_docstring
_run_kernel_docstring = """Compile and run a single kernel
Compiles and runs a single kernel once, given a specific instance of the kernels tuning parameters.
However, instead of measuring execution time run_kernel returns the output of the kernel.
The output is returned as a list of numpy arrays that contains the state of all the kernel arguments
after execution on the GPU.
To summarize what this function will do for you in one call:
* Compile the kernel according to the set of parameters passed
* Allocate GPU memory to hold all kernel arguments
* Move the all data to the GPU
* Execute the kernel on the GPU
* Copy all data from the GPU back to the host and return it as a list of Numpy arrays
This function was added to Kernel Tuner mostly to allow easy testing for kernel correctness.
On purpose, the interface is a lot like `tune_kernel()`.
%s
:param params: A dictionary containing the tuning parameter names as keys
and a single value per tuning parameter as values.
:type params: dict( string: int )
:returns: A list of numpy arrays, similar to the arguments passed to this
function, containing the output after kernel execution.
:rtype: list
""" % _get_docstring(_kernel_options) + _get_docstring(_device_options)
def run_kernel(kernel_name, kernel_source, problem_size, arguments, params, grid_div_x=None, grid_div_y=None, grid_div_z=None, lang=None, device=0, platform=0,
smem_args=None, cmem_args=None, texmem_args=None, compiler=None, compiler_options=None, block_size_names=None, quiet=False, log=None):
if log:
logging.basicConfig(filename=kernel_name + datetime.now().strftime('%Y%m%d-%H:%M:%S') + '.log', level=log)
kernelsource = core.KernelSource(kernel_name, kernel_source, lang)
_check_user_input(kernel_name, kernelsource, arguments, block_size_names)
#sort options into separate dicts
opts = locals()
kernel_options = Options([(k, opts[k]) for k in _kernel_options.keys()])
device_options = Options([(k, opts[k]) for k in _device_options.keys()])
#detect language and create the right device function interface
with core.DeviceInterface(kernelsource, iterations=1, **device_options) as dev:
#move data to the GPU
gpu_args = dev.ready_argument_list(arguments)
instance = None
try:
#create kernel instance
instance = dev.create_kernel_instance(kernelsource, kernel_options, params, False)
if instance is None:
raise RuntimeError("cannot create kernel instance, too many threads per block")
# see if the kernel arguments have correct type
util.check_argument_list(instance.name, instance.kernel_string, arguments)
#compile the kernel
func = dev.compile_kernel(instance, False)
if func is None:
raise RuntimeError("cannot compile kernel, too much shared memory used")
#add shared memory arguments to compiled module
if smem_args is not None:
dev.copy_shared_memory_args(util.get_smem_args(smem_args, params))
#add constant memory arguments to compiled module
if cmem_args is not None:
dev.copy_constant_memory_args(cmem_args)
#add texture memory arguments to compiled module
if texmem_args is not None:
dev.copy_texture_memory_args(texmem_args)
finally:
#delete temp files
if instance is not None:
instance.delete_temp_files()
#run the kernel
if not dev.run_kernel(func, gpu_args, instance):
raise RuntimeError("runtime error occured, too many resources requested")
#copy data in GPU memory back to the host
results = []
for i, arg in enumerate(arguments):
if numpy.isscalar(arg):
results.append(arg)
elif isinstance(arg, torch.Tensor):
results.append(arg.cpu())
else:
results.append(numpy.zeros_like(arg))
dev.memcpy_dtoh(results[-1], gpu_args[i])
return results
run_kernel.__doc__ = _run_kernel_docstring
def _check_user_input(kernel_name, kernelsource, arguments, block_size_names):
# see if the kernel arguments have correct type
kernelsource.check_argument_lists(kernel_name, arguments)
# check for types and length of block_size_names
util.check_block_size_names(block_size_names)
|
benvanwerkhoven/kernel_tuner
|
kernel_tuner/interface.py
|
Python
|
apache-2.0
| 31,740
|
[
"Firefly"
] |
4c0e964865a70eaccc51724f06d1428b8b598bd36dae725276e2288017e7e56e
|
#!/usr/bin/python
"""
Example script to realise a mock galaxy catalogue.
"""
import numpy as np
import pylab as P
import scipy.integrate
import sys
sys.path.append('src/')
import ghost
MHALO_MIN = 1e13 # Min. halo mass
NHALO = 100 # Number of halos to generate
np.random.seed(10)
# Load halo mass function (calculated using HMFcalc)
dat = np.genfromtxt("hmfcalc_massfn.dat").T
mhbin = dat[0]
dndlogm = dat[6]
# Keep only mass bins above the mass threshold
idxs = np.where(mhbin >= MHALO_MIN)
mhbin = mhbin[idxs]
dndlogm = dndlogm[idxs]
# Calculate normalisation
norm = scipy.integrate.simps(dndlogm, np.log(mhbin))
# Generate the correct number of halos
mhalo = []
for i in range(mhbin.size - 1):
# Interpolate number density in this mass bin
dm = mhbin[i+1] - mhbin[i]
n = 0.5 * (dndlogm[i] + dndlogm[i+1]) * dm / (0.5 * (mhbin[i] + mhbin[i+1]))
# Calculate no. of halos in this bin
N = int( n * NHALO / norm )
# Realise halo masses in this bin (with log-uniform distribution of masses)
logmh = np.random.uniform( low=np.log(mhbin[i]),
high=np.log(mhbin[i+1]),
size=N )
mhalo += [np.exp(logmh)]
mhalo = np.concatenate(mhalo)
params = {} #{'ms_cen_beta': 0.5}
z = np.zeros(mhalo.size)
mstar, sfr, passive = ghost.add_physical_properties(mhalo, z, params)
mag_int, mag_obs = ghost.add_optical_mags(mstar, sfr, z, 'u', params, atten=True)
P.plot(mag_int, mag_obs, 'r,')
P.xlim((-22., -4.))
P.ylim((-22., -4.))
P.show()
exit()
#help(ghost.add_optical_mags)
print np.where(passive == True)[0].size, np.where(passive == False)[0].size
P.plot(mstar, sfr, 'r,')
P.xscale('log')
P.yscale('log')
P.show()
#P.hist(np.log10(sfr), bins=50)
#P.yscale('log')
#P.show()
"""
# Compare mass function of generated halos with input mass function
P.hist(np.log(mhalos), bins=200, normed=True)
P.plot(np.log(mhbin), dndlogm / norm, 'r-', lw=2.)
P.yscale('log')
P.show()
"""
|
philbull/ghost
|
realise_cat.py
|
Python
|
mit
| 1,976
|
[
"Galaxy"
] |
fe25e7d93c7a166c9a264892d6a3b6c88856ea6d57a0d2adbe6d95eb9b4af085
|
# This file is part of the Fluggo Media Library for high-quality
# video and audio processing.
#
# Copyright 2010 Brian J. Crowell <brian@fluggo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from fluggo import signal, sortlist
from fluggo.media import process
from fluggo.media.basetypes import *
from datetime import datetime
from fluggo import logging
_log = logging.getLogger(__name__)
_queue = process.VideoPullQueue()
_cache_by_time = sortlist.SortedList(keyfunc=lambda i: i.last_accessed, index_attr='time_index')
_cache_by_frame = {}
# Default 32M
_max_cache_size = 32 * 1024 * 1024
_current_cache_size = 0
class _CacheEntry:
def __init__(self, stream_key, frame, image):
self.frame_key = (stream_key, frame)
self._image = None
self.image = image
self.time_index = None
self.frame_index = None
self.touch()
def touch(self):
self.last_accessed = datetime.utcnow()
@property
def image(self):
return self._image
@image.setter
def image(self, value):
global _current_cache_size
if isinstance(self._image, QImage):
_current_cache_size -= self._image.byteCount()
self._image = value
if isinstance(self._image, QImage):
_current_cache_size += self._image.byteCount()
trim_thumbnail_cache()
def trim_thumbnail_cache():
while _current_cache_size > _max_cache_size:
entry = _cache_by_time[0]
del _cache_by_frame[entry.frame_key]
del _cache_by_time[0]
if isinstance(entry.image, QImage):
current_cache_size -= entry.image.byteCount()
def _get_cached(stream_key, frame):
entry = _cache_by_frame.get((stream_key, frame))
if entry:
entry.touch()
_cache_by_time.move(entry.time_index)
return entry
def _cache_frame(stream_key, frame, image):
global _current_cache_size
entry = _get_cached(stream_key, frame)
if not entry:
entry = _CacheEntry(stream_key, frame, image)
_cache_by_frame[entry.frame_key] = entry
_cache_by_time.add(entry)
else:
# Replace the image
entry.image = image
# Fix the size of the cache
trim_thumbnail_cache()
return entry
class ThumbnailPainter(object):
def __init__(self):
self._thumbnails = []
self._thumbnail_indexes = []
self._thumbnail_width = 1.0
self._thumbnail_count = 0
self._stream = None
self._stream_key = None
self.updated = signal.Signal()
self._rect = None
self._length = 1
self._offset = 0
def set_stream(self, stream_key, stream):
self.clear()
self._stream = stream
self._stream_key = stream_key
self.updated(QRectF())
def set_length(self, length):
# TODO: Really, we should work to preserve as many
# thumbnails as we can
self.clear()
self._length = length
self.updated(QRectF())
def set_offset(self, offset):
# TODO: Really, we should work to preserve as many
# thumbnails as we can
self.clear()
self._offset = offset
self.updated(QRectF())
def set_rect(self, rect):
if self._rect != rect or self._thumbnail_count == 0:
self._rect = rect
if self._stream and self._stream.format:
box = self._stream.format.thumbnail_box
aspect = self._stream.format.pixel_aspect_ratio
frame_count = self._length
self._thumbnail_width = (rect.height() * float(box.width) * float(aspect)) / float(box.height)
self._thumbnail_count = min(max(int(rect.width() / self._thumbnail_width), 1), frame_count)
self.clear()
else:
self._thumbnail_count = 0
self.clear()
def clear(self):
self._thumbnail_indexes = []
# Calculate how many thumbnails fit
frame_count = self._length
count = self._thumbnail_count
if len(self._thumbnail_indexes) == count:
return
if count == 1:
self._thumbnail_indexes = [self._offset]
else:
self._thumbnail_indexes = [self._offset + int(float(a) * (frame_count - 1) / (count - 1)) for a in range(count)]
def get_thumbnail_rect(self, index):
# "index" is relative to the start of the clip (not the stream)
rect = self._rect
if self._thumbnail_count == 1:
return QRect(rect.x() + (index * (rect.width() - self._thumbnail_width)),
rect.y(),
self._thumbnail_width, rect.height())
else:
return QRect(rect.x() + (index * (rect.width() - self._thumbnail_width) / (self._thumbnail_count - 1)),
rect.y(),
self._thumbnail_width, rect.height())
def paint(self, painter, rect, clip_rect, transform):
# Figure out which thumbnails belong here and paint them
# The thumbnail lefts are at (i * (rect.width - thumbnail_width) / (len(thumbnails) - 1)) + rect.x()
# Rights are at left + thumbnail_width
self.set_rect(rect)
stream = self._stream
if stream:
if not stream.format:
_log.warning('Encountered stream with no format')
return
box = stream.format.thumbnail_box
inverted_transform = transform.inverted()[0]
left_nail = int((clip_rect.x() - self._thumbnail_width - rect.x()) *
(self._thumbnail_count - 1) / (rect.width() - self._thumbnail_width))
right_nail = int((clip_rect.x() + clip_rect.width() - rect.x()) *
(self._thumbnail_count - 1) / (rect.width() - self._thumbnail_width)) + 1
left_nail = max(0, left_nail)
right_nail = min(self._thumbnail_count, right_nail)
scale = process.VideoScaler(stream,
target_point=v2f(0, 0), source_point=box.min,
scale_factors=v2f(rect.height() * float(stream.format.pixel_aspect_ratio) / box.height,
rect.height() / box.height),
source_rect=box)
def callback(frame_index, frame, user_data):
try:
(entry, i) = user_data
size = frame.current_window.size()
img_str = frame.to_argb32_bytes()
entry.image = QImage(img_str, size.x, size.y, QImage.Format_ARGB32_Premultiplied).copy()
self.updated(inverted_transform.mapRect(QRectF(self.get_thumbnail_rect(i))))
except:
_log.warning('Error in thumbnail callback', exc_info=True)
for i in range(left_nail, right_nail):
# TODO: If two people go after the same thumbnail, it might not
# paint right (one will get notification the thumbnail has arrived,
# the other won't)
frame_index = self._thumbnail_indexes[i]
entry = _get_cached(self._stream_key, frame_index)
if not entry:
entry = _cache_frame(self._stream_key, frame_index, None)
entry.image = _queue.enqueue(source=scale, frame_index=frame_index,
window=stream.format.thumbnail_box,
callback=callback, user_data=(entry, i))
# TODO: Scale existing thumbnails to fit
if isinstance(entry.image, QImage):
thumbnail_rect = self.get_thumbnail_rect(i)
painter.drawImage(thumbnail_rect, entry.image)
else:
_log.debug('Thumbnail painter has no stream')
# TODO: Show a slug or something?
pass
|
fluggo/Canvas
|
fluggo/editor/ui/canvas/thumbnails.py
|
Python
|
gpl-3.0
| 8,528
|
[
"Brian"
] |
6bafc317346b46f2f3b2a3f1adf8bc57abb701feda7f771d5ef7ef2ed51c3645
|
from __future__ import absolute_import
import math
import copy
import numpy
import scipy.ndimage.measurements
from . import lapjv
import six
from six.moves import range
from six.moves import zip
invalid_match = 1000000 # limiting the choices of the algorithms
def euclidean_dist(point1, point2):
"""Compute the Euclidean distance between two points.
Parameters
----------
point1, point2 : 2-tuples of float
The input points.
Returns
-------
d : float
The distance between the input points.
Examples
--------
>>> point1 = (1.0, 2.0)
>>> point2 = (4.0, 6.0) # (3., 4.) away, simplest Pythagorean triangle
>>> euclidean_dist(point1, point2)
5.0
"""
(x1, y1) = point1
(x2, y2) = point2
return math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
class NeighbourMovementTrackingParameters(object):
parameters_nbrs = {"nbrs_number": 6, "nbrs_maxdist": 30}
# max_distance is not scaled by avgCellDiameter as it solely depends on the image motion
parameters_tracking = {
"avgCellDiameter": 35,
"iterations": 20,
"big_size": 200,
"max_distance": 300,
}
parameters_cost_initial = {
"check_if_big": False,
"default_empty_cost": 15,
"default_empty_reliable_cost_mult": 2.5,
"area_weight": 25,
}
parameters_cost_iteration = {
"check_if_big": True,
"default_empty_cost": 15,
"default_empty_reliable_cost_mult": 2,
"area_weight": 30,
}
parameters_cell_features = {"reliable_area": 150, "reliable_distance": 30}
class CellFeatures(object):
"""
Represents cell features such as center and its area.
@ivar number: cell number in the image
@ivar center: position in the image (row,col)
@ivar area: area of the cell in pixels
@ivar image_size: size of the image that feature inhabits (rows,cols)
"""
def __init__(self, center, area, number, image_size):
self.parameters = copy.deepcopy(
NeighbourMovementTrackingParameters.parameters_cell_features
)
self.center = center
self.area = area
self.number = number
self.image_size = image_size
def __str__(self):
return self.__repr__()
def __repr__(self):
return (
str(self.number)
+ ": "
+ "Center is "
+ str(self.center)
+ ", Area is "
+ str(self.area)
)
def distance(self, to_cell):
return euclidean_dist(self.center, to_cell.center)
@staticmethod
def from_labels(labels):
"""
Creates list of cell features based on label image (1-oo pixel values)
@return: list of cell features in the same order as labels
"""
labels = labels.astype(int)
areas = scipy.ndimage.measurements.sum(
labels != 0, labels, list(range(1, numpy.max(labels) + 1))
)
existing_labels = [i for (i, a) in enumerate(areas, 1) if a > 0]
existing_areas = [a for a in areas if a > 0]
existing_centers = scipy.ndimage.measurements.center_of_mass(
labels != 0, labels, existing_labels
)
zipped = zip(existing_labels, existing_centers, existing_areas)
features = [CellFeatures(c, a, i, labels.shape) for i, c, a in zipped if a != 0]
return features
def is_reliable(self, min_size=-1):
"""
Determine if detection is considered reliable.
@return: it is reliable?
"""
if min_size == -1:
min_size = self.parameters["reliable_area"]
return (
self.area > min_size
and min(
self.center[0],
min(
self.center[1],
min(
self.image_size[0] - self.center[0],
self.image_size[1] - self.center[1],
),
),
)
> self.parameters["reliable_distance"]
)
class Trace(object):
"""
Cell track thorugh time.
@ivar timepoint: moment in time that represents (not used currently)
@ivar previous_cell: features of the cell in the previous image
@ivar current_cell: features of the cell in current image
@ivar cell_motion: difference in cell position.
"""
def __init__(self, frame1_cell, frame2_cell):
self.timepoint = 1
self.previous_cell = frame1_cell
self.current_cell = frame2_cell
self.cell_motion = euclidean_dist(frame1_cell.center, frame2_cell.center)
self.cell_motion_vector = (
frame1_cell.center[0] - frame2_cell.center[0],
frame1_cell.center[1] - frame2_cell.center[1],
)
def __str__(self):
return self.__repr__()
def __repr__(self):
return (
"Trace: cells are "
+ str((self.previous_cell.number, self.current_cell.number))
+ " motion is "
+ str(self.cell_motion)
)
@staticmethod
def from_detections_assignment(detections_1, detections_2, assignments):
"""
Creates traces out of given assignment and cell data.
"""
traces = []
for d1n, d2n in six.iteritems(assignments):
# check if the match is between existing cells
if d1n < len(detections_1) and d2n < len(detections_2):
traces.append(Trace(detections_1[d1n], detections_2[d2n]))
return traces
class NeighbourMovementTracking(object):
def __init__(self):
# Copy initial parameters.
self.parameters_nbrs = copy.deepcopy(
NeighbourMovementTrackingParameters.parameters_nbrs
)
self.parameters_tracking = copy.deepcopy(
NeighbourMovementTrackingParameters.parameters_tracking
)
self.parameters_cost_initial = copy.deepcopy(
NeighbourMovementTrackingParameters.parameters_cost_initial
)
self.parameters_cost_iteration = copy.deepcopy(
NeighbourMovementTrackingParameters.parameters_cost_iteration
)
self.parameters_cell_features = copy.deepcopy(
NeighbourMovementTrackingParameters.parameters_cell_features
)
def run_tracking(self, label_image_1, label_image_2):
"""
Tracks cells between input label images.
@returns: injective function from old objects to new objects (pairs of [old, new]). Number are compatible with labels.
"""
self.scale = self.parameters_tracking["avgCellDiameter"] / 35.0
detections_1 = self.derive_detections(label_image_1)
detections_2 = self.derive_detections(label_image_2)
# Calculate tracking based on cell features and position.
traces = self.find_initials_traces(detections_1, detections_2)
# Use neighbourhoods to improve tracking.
for _ in range(int(self.parameters_tracking["iterations"])):
traces = self.improve_traces(detections_1, detections_2, traces)
# Filter traces.
return [
(trace.previous_cell.number, trace.current_cell.number) for trace in traces
]
def is_cell_big(self, cell_detection):
"""
Check if the cell is considered big.
@param CellFeature cell_detection:
@return:
"""
return (
cell_detection.area
> self.parameters_tracking["big_size"] * self.scale * self.scale
)
@staticmethod
def derive_detections(label_image):
"""
Calculate properties for every label/cell.
List: centroid, area
"""
return CellFeatures.from_labels(label_image)
def find_initials_traces(self, detections_1, detections_2):
# calculate initial costs
costs = self.calculate_costs(
detections_1,
detections_2,
self.calculate_basic_cost,
self.parameters_cost_initial,
)
# solve tracking problem
assignment = self.solve_assignement(costs)
# create tracks
traces = Trace.from_detections_assignment(
detections_1, detections_2, assignment
)
return traces
@staticmethod
def find_closest_neighbours(cell, all_cells, k, max_dist):
"""
Find k closest neighbours of the given cell.
:param CellFeatures cell: cell of interest
:param all_cells: cell to consider as neighbours
:param int k: number of neighbours to be returned
:param int max_dist: maximal distance in pixels to consider neighbours
:return: k closest neighbours
"""
all_cells = [c for c in all_cells if c != cell]
sorted_cells = sorted([(cell.distance(c), c) for c in all_cells], key=lambda sc: sc[0])
return [sc[1] for sc in sorted_cells[:k] if sc[0] <= max_dist]
def calculate_basic_cost(self, d1, d2):
"""
Calculates assignment cost between two cells.
"""
distance = euclidean_dist(d1.center, d2.center) / self.scale
area_change = 1 - min(d1.area, d2.area) / max(d1.area, d2.area)
return distance + self.parameters_cost_initial["area_weight"] * area_change
def calculate_localised_cost(self, d1, d2, neighbours, motions):
"""
Calculates assignment cost between two cells taking into account the movement of cells neighbours.
:param CellFeatures d1: detection in first frame
:param CellFeatures d2: detection in second frame
"""
my_nbrs_with_motion = [n for n in neighbours[d1] if n in motions]
my_motion = (d1.center[0] - d2.center[0], d1.center[1] - d2.center[1])
if my_nbrs_with_motion == []:
distance = euclidean_dist(d1.center, d2.center) / self.scale
else:
# it is not in motions if there is no trace (cell is considered to vanish)
distance = (
min(
[euclidean_dist(my_motion, motions[n]) for n in my_nbrs_with_motion]
)
/ self.scale
)
area_change = 1 - min(d1.area, d2.area) / max(d1.area, d2.area)
return distance + self.parameters_cost_iteration["area_weight"] * area_change
def calculate_costs(self, detections_1, detections_2, calculate_match_cost, params):
"""
Calculates assignment costs between detections and 'empty' spaces. The smaller cost the better.
@param detections_1: cell list of size n in previous frame
@param detections_2: cell list of size m in current frame
@return: cost matrix (n+m)x(n+m) extended by cost of matching cells with emptiness
"""
global invalid_match
size_sum = len(detections_1) + len(detections_2)
# Cost matrix extended by matching cells with nothing
# (for detection 1 it means losing cells, for detection 2 it means new cells).
cost_matrix = numpy.zeros((size_sum, size_sum))
# lost cells cost
cost_matrix[0 : len(detections_1), len(detections_2) : size_sum] = (
params["default_empty_cost"]
+ (1 - numpy.eye(len(detections_1), len(detections_1))) * invalid_match
)
# new cells cost
cost_matrix[len(detections_1) : size_sum, 0 : len(detections_2)] = (
params["default_empty_cost"]
+ (1 - numpy.eye(len(detections_2), len(detections_2))) * invalid_match
)
# increase costs for reliable detections
for row in [
i
for i in range(0, len(detections_1))
if detections_1[i].is_reliable()
and (not params["check_if_big"] or self.is_cell_big(detections_1[i]))
]:
cost_matrix[row, len(detections_2) : size_sum] *= params[
"default_empty_reliable_cost_mult"
]
for col in [
i
for i in range(0, len(detections_2))
if detections_2[i].is_reliable()
and (not params["check_if_big"] or self.is_cell_big(detections_2[i]))
]:
cost_matrix[len(detections_1) : size_sum, col] *= params[
"default_empty_reliable_cost_mult"
]
# calculate cost of matching cells
def cost_if_not_too_far(detection_1, detection_2):
if (
detection_1.distance(detection_2)
<= self.parameters_tracking["max_distance"]
):
return calculate_match_cost(detection_1, detection_2)
else:
return invalid_match
cost_matrix[0 : len(detections_1), 0 : len(detections_2)] = [
[cost_if_not_too_far(d1, d2) for d2 in detections_2] for d1 in detections_1
]
return cost_matrix
def improve_traces(self, detections_1, detections_2, traces):
# calculate cell motion and neighbours
cells_motion = dict([(t.previous_cell, t.cell_motion_vector) for t in traces])
neighbours = dict(
[
(
d,
NeighbourMovementTracking.find_closest_neighbours(
d,
detections_1,
self.parameters_nbrs["nbrs_number"],
self.parameters_nbrs["nbrs_maxdist"] * self.scale,
),
)
for d in detections_1
]
)
# calculate localised costs
cost_function = lambda d1, d2: self.calculate_localised_cost(
d1, d2, neighbours, cells_motion
)
localized_costs = self.calculate_costs(
detections_1, detections_2, cost_function, self.parameters_cost_iteration
)
# solve tracking problem
assignment = self.solve_assignement(localized_costs)
# create tracks
improved_traces = Trace.from_detections_assignment(
detections_1, detections_2, assignment
)
return improved_traces
def solve_assignement(self, costs):
"""
Solves assignment problem using Hungarian implementation by Brian M. Clapper.
@param costs: square cost matrix
@return: assignment function
@rtype: int->int
"""
if costs is None or len(costs) == 0:
return dict()
n = costs.shape[0]
pairs = [
(i, j)
for i in range(0, n)
for j in range(0, n)
if costs[i, j] < invalid_match
]
costs_list = [costs[i, j] for (i, j) in pairs]
assignment = lapjv.lapjv(list(zip(*pairs))[0], list(zip(*pairs))[1], costs_list)
indexes = enumerate(list(assignment[0]))
return dict([(row, col) for row, col in indexes])
|
CellProfiler/centrosome
|
centrosome/neighmovetrack.py
|
Python
|
bsd-3-clause
| 14,949
|
[
"Brian"
] |
d3019361057174caf0175fd2cdbcf302f4a318c24fcb74da937416fad555c461
|
__author__ = 'sibirrer'
from astrofunc.LensingProfiles.spep import SPEP
from astrofunc.LensingProfiles.spp import SPP
from astrofunc.LensingProfiles.sis import SIS
import numpy as np
import numpy.testing as npt
import pytest
class TestSPEP(object):
"""
tests the Gaussian methods
"""
def setup(self):
self.SPEP = SPEP()
self.SPP = SPP()
self.SIS = SIS()
def test_function(self):
x = np.array([1])
y = np.array([2])
phi_E = 1.
gamma = 1.9
q = 1
phi_G = 0.
E = phi_E / (((3-gamma)/2.)**(1./(1-gamma))*np.sqrt(q))
values_spep = self.SPEP.function(x, y, E, gamma,q,phi_G)
values_spp = self.SPP.function(x, y, E, gamma)
assert values_spep[0] == values_spp[0]
x = np.array([0])
y = np.array([0])
values_spep = self.SPEP.function(x, y, E, gamma,q,phi_G)
values_spp = self.SPP.function(x, y, E, gamma)
assert values_spep[0] == values_spp[0]
x = np.array([2,3,4])
y = np.array([1,1,1])
values_spep = self.SPEP.function(x, y, E, gamma,q,phi_G)
values_spp = self.SPP.function(x, y, E, gamma)
assert values_spep[0] == values_spp[0]
assert values_spep[1] == values_spp[1]
assert values_spep[2] == values_spp[2]
def test_derivatives(self):
x = np.array([1])
y = np.array([2])
phi_E = 1.
gamma = 1.9
q = 1
phi_G = 0.
E = phi_E / (((3-gamma)/2.)**(1./(1-gamma))*np.sqrt(q))
f_x_spep, f_y_spep = self.SPEP.derivatives(x, y, E, gamma,q,phi_G)
f_x_spp, f_y_spp = self.SPP.derivatives(x, y, E, gamma)
assert f_x_spep[0] == f_x_spp[0]
assert f_y_spep[0] == f_y_spp[0]
x = np.array([0])
y = np.array([0])
f_x_spep, f_y_spep = self.SPEP.derivatives(x, y, E, gamma,q,phi_G)
f_x_spp, f_y_spp = self.SPP.derivatives(x, y, E, gamma)
assert f_x_spep[0] == f_x_spp[0]
assert f_y_spep[0] == f_y_spp[0]
x = np.array([1,3,4])
y = np.array([2,1,1])
f_x_spep, f_y_spep = self.SPEP.derivatives(x, y, E, gamma,q,phi_G)
f_x_spp, f_y_spp = self.SPP.derivatives(x, y, E, gamma)
assert f_x_spep[0] == f_x_spp[0]
assert f_y_spep[0] == f_y_spp[0]
assert f_x_spep[1] == f_x_spp[1]
assert f_y_spep[1] == f_y_spp[1]
assert f_x_spep[2] == f_x_spp[2]
assert f_y_spep[2] == f_y_spp[2]
def test_hessian(self):
x = np.array([1])
y = np.array([2])
phi_E = 1.
gamma = 1.9
q = 1.
phi_G = 0.
E = phi_E / (((3-gamma)/2.)**(1./(1-gamma))*np.sqrt(q))
f_xx, f_yy,f_xy = self.SPEP.hessian( x, y, E,gamma,q,phi_G)
f_xx_spep, f_yy_spep, f_xy_spep = self.SPEP.hessian(x, y, E, gamma,q,phi_G)
f_xx_spp, f_yy_spp, f_xy_spp = self.SPP.hessian(x, y, E, gamma)
assert f_xx_spep[0] == f_xx_spp[0]
assert f_yy_spep[0] == f_yy_spp[0]
assert f_xy_spep[0] == f_xy_spp[0]
x = np.array([1,3,4])
y = np.array([2,1,1])
f_xx_spep, f_yy_spep, f_xy_spep = self.SPEP.hessian(x, y, E, gamma,q,phi_G)
f_xx_spp, f_yy_spp, f_xy_spp = self.SPP.hessian(x, y, E, gamma)
assert f_xx_spep[0] == f_xx_spp[0]
assert f_yy_spep[0] == f_yy_spp[0]
assert f_xy_spep[0] == f_xy_spp[0]
assert f_xx_spep[1] == f_xx_spp[1]
assert f_yy_spep[1] == f_yy_spp[1]
assert f_xy_spep[1] == f_xy_spp[1]
assert f_xx_spep[2] == f_xx_spp[2]
assert f_yy_spep[2] == f_yy_spp[2]
assert f_xy_spep[2] == f_xy_spp[2]
def test_compare_sis(self):
x = np.array([1])
y = np.array([2])
theta_E = 1.
gamma = 2.
f_sis = self.SIS.function( x, y, theta_E)
f_spp = self.SPP.function(x, y, theta_E, gamma)
f_x_sis, f_y_sis = self.SIS.derivatives( x, y, theta_E)
f_x_spp, f_y_spp = self.SPP.derivatives(x, y, theta_E, gamma)
f_xx_sis, f_yy_sis, f_xy_sis = self.SIS.hessian( x, y, theta_E)
f_xx_spp, f_yy_spp, f_xy_spp = self.SPP.hessian(x, y, theta_E, gamma)
npt.assert_almost_equal(f_sis[0],f_spp[0], decimal=7)
npt.assert_almost_equal(f_x_sis[0], f_x_spp[0], decimal=7)
npt.assert_almost_equal(f_y_sis[0], f_y_spp[0], decimal=7)
npt.assert_almost_equal(f_xx_sis[0], f_xx_spp[0], decimal=7)
npt.assert_almost_equal(f_yy_sis[0], f_yy_spp[0], decimal=7)
npt.assert_almost_equal(f_xy_sis[0], f_xy_spp[0], decimal=7)
def test_unit_conversion(self):
theta_E = 2.
gamma = 2.2
rho0 = self.SPP.theta2rho(theta_E, gamma)
theta_E_out = self.SPP.rho2theta(rho0, gamma)
assert theta_E == theta_E_out
if __name__ == '__main__':
pytest.main()
|
sibirrer/astrofunc
|
test/test_spp.py
|
Python
|
mit
| 4,841
|
[
"Gaussian"
] |
45197e4e05a45659c8b854502ea92418a9d588dfd02cacdaad4c1b1475e72c6d
|
from paraview.simple import *
from paraview.web.dataset_builder import *
# Can.ex2 file path
fileToLoad = '/Users/seb/Downloads/ParaViewData-3.10.1/Data/can.ex2'
dataset_destination_path = '/tmp/can'
# Initial ParaView scene setup
can = OpenDataFile(fileToLoad)
can.ElementVariables = ['EQPS']
can.PointVariables = ['DISPL', 'VEL', 'ACCL']
can.GlobalVariables = ['KE', 'XMOM', 'YMOM', 'ZMOM', 'NSTEPS', 'TMSTEP']
can.ElementBlocks = ['Unnamed block ID: 1 Type: HEX', 'Unnamed block ID: 2 Type: HEX']
rep = Show()
view = Render()
anim = GetAnimationScene()
anim.UpdateAnimationUsingDataTimeSteps()
anim.GoToLast()
ColorBy(rep, ('POINTS', 'DISPL'))
rep.RescaleTransferFunctionToDataRange(True)
timeValues = anim.TimeKeeper.TimestepValues
view.CameraPosition = [-18.29191376466667, 21.185677224902403, -45.68993692892029]
view.CameraFocalPoint = [-0.5119223594665527, 3.3483874797821045, -11.321756362915039]
view.CameraViewUp = [0.29015080553622485, -0.779749133967588, -0.5548006832399148]
view.ResetCamera()
view.CenterOfRotation = view.CameraFocalPoint
Render()
# Create Tonic Dataset
dsb = ImageDataSetBuilder(dataset_destination_path, 'image/jpg', {'type': 'spherical', 'phi': range(0, 360, 45), 'theta': range(-60, 61, 30)})
# Add time information
dsb.getDataHandler().registerArgument(priority=1, name='time', values=timeValues, ui='slider', loop='modulo')
# Explore dataset
dsb.start(view)
for time in dsb.getDataHandler().time:
anim.TimeKeeper.Time = time
dsb.writeImages()
dsb.stop()
|
Kitware/arctic-viewer
|
scripts/examples/paraview/samples/time-management.py
|
Python
|
bsd-3-clause
| 1,511
|
[
"ParaView"
] |
196d49c152121b6e24c62bb24b65dc071ac6049ddb4b82e6540367e1f1b36d09
|
#!/usr/bin/env python3
from __future__ import print_function
import os, sys, errno
from itertools import groupby
# def faidx_load_all(faidx):
# chrs = {}
# for ref in faidx.references:
# chrs[ref] = faidx.fetch(reference=ref)
# return(chrs)
# def load_fasta(path):
# faidx = pysam.FastaFile(path)
# chrs = faidx_load_all(faidx)
# faidx.close()
# return(chrs)
def mkdir_p(path):
"""
http://stackoverflow.com/a/600612/431087
"""
try:
os.makedirs(path)
except OSError as exc:
if not (exc.errno == errno.EEXIST and os.path.isdir(path)): raise
def fasta_iter(file_path):
"""
Given a fasta file. yield tuples of header, sequence
author: brentp
url: https://www.biostars.org/p/710/
"""
with open(file_path) as fh:
# ditch the boolean (x[0]) and just keep the header or sequence since
# we know they alternate.
faiter = (x[1] for x in groupby(fh, lambda line: line[0] == ">"))
for header in faiter:
# drop the ">"
header = next(header)[1:].strip()
# join all sequence lines to one.
seq = "".join(s.strip() for s in next(faiter))
yield header, seq
def quack():
print("QUACK!")
def reverse_complement(s):
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', \
'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}
# get returns the base if not in dict
bases = reversed([complement.get(b,b) for b in s])
return ''.join(bases)
def dna_key(s):
r = reverse_complement(s)
return s if s <= r else r
# given a string s and kmer size `k`, return a list of the kmers (contains dups)
def kmers(s,k):
return [ s[i:i+k] for i in range(len(s)-k+1) ]
def load_fasta(path):
"""
Load all chromosomes from a FASTA file.
"""
chrs = {}
try:
g=fasta_iter(path)
for (n,s) in g:
chrs[n] = s.upper()
except FileNotFoundError as fne:
print("Cannot find file:",path,file=sys.stderr)
sys.exit(-1)
return chrs
class BufferedGenerator:
def __init__(self, iter):
self.iter = iter
self.buffer = []
def __iter__(self):
return self
def next(self):
if self.buffer:
return self.buffer.pop() # remove last item
else:
return self.iter.next()
def unnext(self,item):
self.buffer.append(item)
|
mcveanlab/mccortex
|
scripts/python/mccortex.py
|
Python
|
mit
| 2,254
|
[
"pysam"
] |
165d72409c2fa260165f72de33c79cd8dcad46c320f6063621482f925c135a28
|
#!/usr/bin/env python
import roslib,rospy,sys,cv2,time
import numpy as np
roslib.load_manifest('lane_follower')
# from __future__ import print_function
from std_msgs.msg import Int32
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
bridge = CvBridge()
pub = rospy.Publisher('lane_detection', Int32, queue_size=10) #ros-lane-detection
pub_image = rospy.Publisher('lane_detection_image',Image,queue_size=1)
def callback(data):
# convert image to cv2 standard format
img = bridge.imgmsg_to_cv2(data)
# start time
start_time = cv2.getTickCount()
# Gaussian Filter to remove noise
img = cv2.medianBlur(img,5)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# print img.shape = (200,350,3)
rows,cols,channels = img.shape
# ROI
roi_mask = np.zeros(img.shape,dtype=np.uint8)
roi_mask[10:rows,0:cols] = 255
street = cv2.bitwise_and(img,roi_mask)
stop_roi_mask = np.zeros(gray.shape,dtype=np.uint8)
stop_roi_mask[150:rows,150:250] = 255
right_roi_mask = np.zeros(gray.shape,dtype=np.uint8)
right_roi_mask[150:rows,200:360] = 255
right_roi = cv2.bitwise_and(img,img,right_roi_mask)
left_roi_mask = np.zeros(gray.shape,dtype=np.uint8)
left_roi_mask[150:rows,0:200] = 255
left_roi = cv2.bitwise_and(img,img,left_roi_mask)
# define range of color in HSV
hsv = cv2.cvtColor(street,cv2.COLOR_BGR2HSV)
sensitivity = 160 # range of sensitivity=[90,150]
lower_white = np.array([0,0,255-sensitivity])
upper_white = np.array([255,sensitivity,255])
white_mask = cv2.inRange(hsv,lower_white,upper_white)
white_mask = cv2.erode(white_mask, None, iterations=2)
white_mask = cv2.dilate(white_mask, None, iterations=2)
lower_red = np.array([150,70,50])#150
upper_red = np.array([200,255,255])
lower_red2 = np.array([0,100,100])
upper_red2 = np.array([9,255,255])#10
red_mask1 = cv2.inRange(hsv,lower_red,upper_red)
red_mask1 = cv2.erode(red_mask1, None, iterations=2)
red_mask1 = cv2.dilate(red_mask1, None, iterations=2)
red_mask2 = cv2.inRange(hsv, lower_red2, upper_red2)
red_mask2 = cv2.erode(red_mask2, None, iterations=2)
red_mask2 = cv2.dilate(red_mask2, None, iterations=2)
red_mask = cv2.bitwise_or(red_mask1,red_mask2)
lower_yellow = np.array([0,100,100]) #0,100,100
upper_yellow = np.array([40,255,255]) #30,255,255
yellow_mask = cv2.inRange(hsv,lower_yellow,upper_yellow)
yellow_mask = cv2.erode(yellow_mask, None, iterations=2)
yellow_mask = cv2.dilate(yellow_mask, None, iterations=2)
# mask AND original img
whitehsvthresh = cv2.bitwise_and(right_roi,right_roi,mask=white_mask)
yellowhsvthresh = cv2.bitwise_and(street,street,mask=yellow_mask)
redhsvthresh = cv2.bitwise_and(street,street,mask=red_mask1)
# Canny Edge Detection
right_edges = cv2.Canny(whitehsvthresh,100,200)
left_edges = cv2.Canny(yellowhsvthresh,100,200)
right_edges = cv2.bitwise_and(right_edges,right_roi_mask)
left_edges = cv2.bitwise_and(left_edges,left_roi_mask)
red_edges_hsv = cv2.Canny(redhsvthresh,100,200)
red_edges = cv2.bitwise_and(red_edges_hsv,stop_roi_mask)
# Standard Hough Transform
right_lines = cv2.HoughLines(right_edges,0.8,np.pi/180,35)
left_lines = cv2.HoughLines(left_edges,0.8,np.pi/180,30)
red_lines = cv2.HoughLines(red_edges,1,np.pi/180,40)
xm = cols/2
ym = rows
# Draw right lane
x = []
i = 0
if right_lines is not None:
right_lines = np.array(right_lines[0])
for rho, theta in right_lines:
a=np.cos(theta)
b=np.sin(theta)
x0,y0=a*rho,b*rho
y3 = 140
x3 = int(x0+((y0-y3)*np.sin(theta)/np.cos(theta)))
x.insert(i,x3)
i+1
pt1=(int(x0+1000*(-b)),int(y0+1000*(a)))
pt2=(int(x0-1000*(-b)),int(y0-1000*(a)))
cv2.line(img,pt1,pt2,(255,0,0),2)
if len(x) != 0:
xmin = x[0]
for k in range(0,len(x)):
if x[k] < xmin and x[k] > 0:
xmin = x[k]
kr = int(np.sqrt(((xmin-xm)*(xmin-xm))+((y3-ym)*(y3-ym))))
else:
kr = 0
xmin = 0
# Draw left lane
x = []
i = 0
if left_lines is not None:
left_lines = np.array(left_lines[0])
for rho, theta in left_lines:
a=np.cos(theta)
b=np.sin(theta)
x0,y0=a*rho,b*rho
y3 = 140
x3 = int(x0+((y0-y3)*np.sin(theta)/np.cos(theta)))
x.insert(i,x3)
i+1
pt1=(int(x0+1000*(-b)),int(y0+1000*(a)))
pt2=(int(x0-1000*(-b)),int(y0-1000*(a)))
cv2.line(img,pt1,pt2,(0,255,0),2)
if len(x) != 0:
xmax = x[0]
for k in range(0,len(x)):
if x[k] > xmax and x[k]<cols:
xmax = x[k]
kl = int(np.sqrt(((xmax-xm)*(xmax-xm))+((y3-ym)*(y3-ym))))
else:
kl = 0
xmax = 0
error = kr - kl
#end time
end_time = cv2.getTickCount()
time_count= (end_time - start_time) / cv2.getTickFrequency()
# rospy.loginfo(time_count)
if red_lines is not None:
rospy.loginfo("STOP")
message = 154 #stop
elif right_lines is not None and left_lines is not None:
rospy.loginfo(error)
if error > 150:
error = 150
elif error < -150:
error = -150
message = error
elif left_lines is not None and right_lines is None:
rospy.loginfo("Turn Right")
rospy.loginfo(kl)
message = 152 #turn right
elif left_lines is None and right_lines is not None:
rospy.loginfo("Turn Left")
message = 153 #turn let
elif left_lines is None and right_lines is None:
rospy.loginfo("No line")
message = 155 #no line found
else:
message = 155 #no line found
pub.publish(message)
image = bridge.cv2_to_imgmsg(img)
pub_image.publish(image)
def lane_detection():
rospy.init_node('lane-detection',anonymous=True)
rospy.Subscriber("image_topic",Image,callback,queue_size=1,buff_size=2**24)
try:
rospy.loginfo("Enetering ROS Spin")
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
if __name__ == '__main__':
try:
lane_detection()
except rospy.ROSInterruptException:
pass
|
isarlab-department-engineering/ros_dt_lane_follower
|
deprecated_nodes/old/lane_detection.py
|
Python
|
bsd-3-clause
| 6,194
|
[
"Gaussian"
] |
a9d0733a8f4d2356467feecf4587f169b76c804bf0286cdd8e029a80d033b77f
|
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
class Visit(models.Model):
student = models.ForeignKey(User, related_name='visits')
submitter = models.ForeignKey(User, related_name='submitted_visits')
campus = models.CharField(max_length=255, choices=settings.CAMPUS_CHOICES)
contact_type = models.CharField(max_length=255,
choices=settings.VISIT_CONTACT_TYPE_CHOICES)
reason = models.CharField(max_length=255,
choices=settings.VISIT_REASON_CHOICES)
department = models.CharField(max_length=255,
choices=settings.VISIT_DEPARTMENT_CHOICES)
undecided_financial_aid = models.BooleanField()
career_services_outcome = models.CharField(
max_length=255,
choices=settings.VISIT_CAREER_SERVICES_OUTCOME_CHOICES)
note = models.TextField()
private = models.BooleanField()
date_submitted = models.DateTimeField(auto_now_add=True)
class Meta(object):
ordering = ('-date_submitted',)
|
marklocklear/Online-Student-Profile
|
osp/visits/models.py
|
Python
|
lgpl-3.0
| 1,102
|
[
"VisIt"
] |
f91e8d69b7e1a24254cd2c862c9a367e5525a72386787b6ea987f6c55dd83b4c
|
# -*- coding: utf-8 -*-
'''
Describe test for Django
@author: Laurent GAY
@organization: sd-libre.fr
@contact: info@sd-libre.fr
@copyright: 2015 sd-libre.fr
@license: This file is part of Lucterios.
Lucterios is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Lucterios is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Lucterios. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import unicode_literals
from shutil import rmtree
from importlib import import_module
from base64 import b64decode
from lucterios.framework.test import LucteriosTest
from lucterios.framework.filetools import get_user_dir
from lucterios.CORE.parameters import Params
from lucterios.documents.views import DocumentSearch
from diacamma.accounting.test_tools import initial_thirds_fr, default_compta_fr, fill_entries_fr, set_accounting_system, add_entry,\
create_account, check_pdfreport
from diacamma.accounting.views_accounts import ChartsAccountList, ChartsAccountDel, ChartsAccountShow, ChartsAccountAddModify, ChartsAccountListing, ChartsAccountImportFiscalYear
from diacamma.accounting.views_accounts import FiscalYearBegin, FiscalYearClose, FiscalYearReportLastYear
from diacamma.accounting.views_entries import EntryAccountEdit, EntryAccountList
from diacamma.accounting.models import FiscalYear
from diacamma.accounting.views import ThirdList
from diacamma.accounting.views_budget import BudgetList, BudgetAddModify, BudgetDel
from diacamma.payoff.test_tools import PaymentTest
from diacamma.accounting.views_reports import FiscalYearIncomeStatement,\
FiscalYearBalanceSheet
class ChartsAccountTest(LucteriosTest):
def setUp(self):
LucteriosTest.setUp(self)
set_accounting_system()
initial_thirds_fr()
default_compta_fr()
fill_entries_fr(1)
rmtree(get_user_dir(), True)
def test_all(self):
self.factory.xfer = ChartsAccountList()
self.calljson('/diacamma.accounting/chartsAccountList', {'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountList')
self.assert_count_equal('', 8)
self.assert_grid_equal('chartsaccount', {"code": "code", "name": "nom", "last_year_total": "total de l'exercice précédent", "current_total": "total de l'exercice", "current_validated": "total validé"}, 17) # nb=5
self.assert_json_equal('LABELFORM', 'result', [230.62, 348.60, -117.98, 1050.66, 1244.74])
self.assert_select_equal('type_of_account', {0: 'Actif', 1: 'Passif', 2: 'Capitaux', 3: 'Produit', 4: 'Charge', 5: 'Autres comptes', -1: '---'})
def test_asset(self):
self.factory.xfer = ChartsAccountList()
self.calljson('/diacamma.accounting/chartsAccountList', {'year': '1', 'type_of_account': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountList')
self.assert_count_equal('', 8)
self.assert_count_equal('chartsaccount', 3)
self.assert_json_equal('', '#chartsaccount/headers/@2/@0', 'last_year_total')
self.assert_json_equal('', '#chartsaccount/headers/@2/@2', "C2EUR")
self.assert_json_equal('', '#chartsaccount/headers/@2/@4', '{[p align=\'right\']}{[font color="green"]}Crédit: %s{[/font]}{[/p]};{[p align=\'right\']}{[font color="blue"]}Débit: %s{[/font]}{[/p]};{[p align=\'right\']}%s{[/p]}')
self.assert_json_equal('', '#chartsaccount/headers/@3/@0', 'current_total')
self.assert_json_equal('', '#chartsaccount/headers/@3/@2', "C2EUR")
self.assert_json_equal('', '#chartsaccount/headers/@3/@4', '{[p align=\'right\']}{[font color="green"]}Crédit: %s{[/font]}{[/p]};{[p align=\'right\']}{[font color="blue"]}Débit: %s{[/font]}{[/p]};{[p align=\'right\']}%s{[/p]}')
self.assert_json_equal('', '#chartsaccount/headers/@4/@0', 'current_validated')
self.assert_json_equal('', '#chartsaccount/headers/@4/@2', "C2EUR")
self.assert_json_equal('', '#chartsaccount/headers/@4/@4', '{[p align=\'right\']}{[font color="green"]}Crédit: %s{[/font]}{[/p]};{[p align=\'right\']}{[font color="blue"]}Débit: %s{[/font]}{[/p]};{[p align=\'right\']}%s{[/p]}')
self.assert_json_equal('', 'chartsaccount/@0/code', '411')
self.assert_json_equal('', 'chartsaccount/@0/name', '411')
self.assert_json_equal('', 'chartsaccount/@0/last_year_total', 0.00)
self.assert_json_equal('', 'chartsaccount/@0/current_total', -159.98)
self.assert_json_equal('', 'chartsaccount/@0/current_validated', -125.97)
self.assert_json_equal('', 'chartsaccount/@1/code', '512')
self.assert_json_equal('', 'chartsaccount/@1/name', '512')
self.assert_json_equal('', 'chartsaccount/@1/last_year_total', -1135.93)
self.assert_json_equal('', 'chartsaccount/@1/current_total', -1130.29)
self.assert_json_equal('', 'chartsaccount/@1/current_validated', -1130.29)
self.assert_json_equal('', 'chartsaccount/@2/code', '531')
self.assert_json_equal('', 'chartsaccount/@2/name', '531')
self.assert_json_equal('', 'chartsaccount/@2/last_year_total', -114.45)
self.assert_json_equal('', 'chartsaccount/@2/current_total', 79.63)
self.assert_json_equal('', 'chartsaccount/@2/current_validated', -114.45)
def test_liability(self):
self.factory.xfer = ChartsAccountList()
self.calljson('/diacamma.accounting/chartsAccountList', {'year': '1', 'type_of_account': '1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountList')
self.assert_count_equal('', 8)
self.assert_count_equal('chartsaccount', 1)
self.assert_json_equal('', 'chartsaccount/@0/code', '401')
self.assert_json_equal('', 'chartsaccount/@0/name', '401')
self.assert_json_equal('', 'chartsaccount/@0/last_year_total', 0.00)
self.assert_json_equal('', 'chartsaccount/@0/current_total', 78.24)
self.assert_json_equal('', 'chartsaccount/@0/current_validated', 0.00)
def test_equity(self):
self.factory.xfer = ChartsAccountList()
self.calljson('/diacamma.accounting/chartsAccountList', {'year': '1', 'type_of_account': '2'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountList')
self.assert_count_equal('', 8)
self.assert_count_equal('chartsaccount', 5)
self.assert_json_equal('', 'chartsaccount/@0/code', '106')
self.assert_json_equal('', 'chartsaccount/@0/name', '106')
self.assert_json_equal('', 'chartsaccount/@0/last_year_total', 1250.38)
self.assert_json_equal('', 'chartsaccount/@0/current_total', 1250.38)
self.assert_json_equal('', 'chartsaccount/@0/current_validated', 1250.38)
def test_revenue(self):
self.factory.xfer = ChartsAccountList()
self.calljson('/diacamma.accounting/chartsAccountList', {'year': '1', 'type_of_account': '3'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountList')
self.assert_count_equal('', 8)
self.assert_count_equal('chartsaccount', 3)
self.assert_json_equal('', 'chartsaccount/@2/code', '707')
self.assert_json_equal('', 'chartsaccount/@2/name', '707')
self.assert_json_equal('', 'chartsaccount/@2/last_year_total', 0.00)
self.assert_json_equal('', 'chartsaccount/@2/current_total', 230.62)
self.assert_json_equal('', 'chartsaccount/@2/current_validated', 196.61)
def test_expense(self):
self.factory.xfer = ChartsAccountList()
self.calljson('/diacamma.accounting/chartsAccountList', {'year': '1', 'type_of_account': '4'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountList')
self.assert_count_equal('', 8)
self.assert_count_equal('chartsaccount', 5)
self.assert_json_equal('', 'chartsaccount/@0/code', '601')
self.assert_json_equal('', 'chartsaccount/@0/name', '601')
self.assert_json_equal('', 'chartsaccount/@0/last_year_total', 0.00)
self.assert_json_equal('', 'chartsaccount/@0/current_total', -78.24)
self.assert_json_equal('', 'chartsaccount/@0/current_validated', 0.00)
self.assert_json_equal('', 'chartsaccount/@1/code', '602')
self.assert_json_equal('', 'chartsaccount/@1/name', '602')
self.assert_json_equal('', 'chartsaccount/@1/last_year_total', 0.00)
self.assert_json_equal('', 'chartsaccount/@1/current_total', -63.94)
self.assert_json_equal('', 'chartsaccount/@1/current_validated', -63.94)
def test_contraaccounts(self):
self.factory.xfer = ChartsAccountList()
self.calljson('/diacamma.accounting/chartsAccountList', {'year': '1', 'type_of_account': '5'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountList')
self.assert_count_equal('', 8)
self.assert_count_equal('chartsaccount', 0)
def test_show(self):
self.factory.xfer = ChartsAccountShow()
self.calljson('/diacamma.accounting/chartsAccountShow', {'year': '1', 'type_of_account': '-1', 'chartsaccount': '10'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountShow')
self.assert_count_equal('', 5)
self.assert_json_equal('LABELFORM', 'code', '707')
self.assert_json_equal('LABELFORM', 'name', '707')
self.assert_json_equal('LABELFORM', 'type_of_account', 3)
self.assert_json_equal('', '#type_of_account/formatnum', {'0': 'Actif', '1': 'Passif', '2': 'Capitaux', '3': 'Produit', '4': 'Charge', '5': 'Autres comptes'})
self.assert_grid_equal('entryaccount', {"num": "N°", "date_entry": "date d'écriture", "date_value": "date de pièce", "description": "description"}, 3) # nb=5
self.assert_json_equal('', 'entryaccount/@0/num', '4')
self.assert_json_equal('', 'entryaccount/@0/date_value', '2015-02-21')
description = self.json_data['entryaccount'][0]['description']
self.assertTrue('vente 1' in description, description)
self.assertTrue('70,64 €' in description, description)
self.assert_json_equal('', 'entryaccount/@1/num', '6')
self.assert_json_equal('', 'entryaccount/@1/date_value', '2015-02-21')
description = self.json_data['entryaccount'][1]['description']
self.assertTrue('vente 2' in description, description)
self.assertTrue('125,97 €' in description, description)
self.assert_json_equal('', 'entryaccount/@2/num', None)
self.assert_json_equal('', 'entryaccount/@2/date_value', '2015-02-24')
description = self.json_data['entryaccount'][2]['description']
self.assertTrue('vente 3' in description, description)
self.assertTrue('34,01 €' in description, description)
def test_delete(self):
self.factory.xfer = ChartsAccountDel()
self.calljson('/diacamma.accounting/chartsAccountDel',
{'CONFIRME': 'YES', 'year': '1', 'type_of_account': '5', 'chartsaccount': '10'}, False)
self.assert_observer('core.exception', 'diacamma.accounting', 'chartsAccountDel')
self.assert_json_equal('', 'message', "Impossible de supprimer cet enregistrement: il est associé avec d'autres sous-enregistrements")
self.factory.xfer = ChartsAccountDel()
self.calljson('/diacamma.accounting/chartsAccountDel',
{'CONFIRME': 'YES', 'year': '1', 'type_of_account': '5', 'chartsaccount': '9'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'chartsAccountDel')
def test_add(self):
self.factory.xfer = ChartsAccountAddModify()
self.calljson('/diacamma.accounting/chartsAccountAddModify',
{'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountAddModify')
self.assert_count_equal('', 5)
self.assert_json_equal('EDIT', 'code', '')
self.assert_json_equal('EDIT', 'name', '')
self.assert_json_equal('LABELFORM', 'type_of_account', None)
self.assert_json_equal('LABELFORM', 'error_code', "")
self.assert_json_equal('', '#error_code/formatstr', '{[center]}{[font color="red"]}%s{[/font]}{[/center]}')
self.factory.xfer = ChartsAccountAddModify()
self.calljson('/diacamma.accounting/chartsAccountAddModify',
{'year': '1', 'type_of_account': '-1', 'code': '2301'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountAddModify')
self.assert_count_equal('', 5)
self.assert_json_equal('EDIT', 'code', '2301')
self.assert_json_equal('EDIT', 'name', 'Immobilisations en cours')
self.assert_json_equal('LABELFORM', 'type_of_account', 0)
self.assert_json_equal('LABELFORM', 'error_code', "")
self.factory.xfer = ChartsAccountAddModify()
self.calljson('/diacamma.accounting/chartsAccountAddModify',
{'year': '1', 'type_of_account': '-1', 'code': '3015'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountAddModify')
self.assert_count_equal('', 5)
self.assert_json_equal('EDIT', 'code', '3015!')
self.assert_json_equal('EDIT', 'name', '')
self.assert_json_equal('LABELFORM', 'type_of_account', None)
self.assert_json_equal('LABELFORM', 'error_code', "Code invalide !")
self.factory.xfer = ChartsAccountAddModify()
self.calljson('/diacamma.accounting/chartsAccountAddModify',
{'year': '1', 'type_of_account': '-1', 'code': 'abcd'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountAddModify')
self.assert_count_equal('', 5)
self.assert_json_equal('EDIT', 'code', 'abcd!')
self.assert_json_equal('EDIT', 'name', '')
self.assert_json_equal('LABELFORM', 'type_of_account', None)
self.assert_json_equal('LABELFORM', 'error_code', "Code invalide !")
def test_modify(self):
self.factory.xfer = ChartsAccountAddModify()
self.calljson('/diacamma.accounting/chartsAccountAddModify',
{'year': '1', 'type_of_account': '-1', 'chartsaccount': '9'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountAddModify')
self.assert_count_equal('', 5)
self.assert_json_equal('EDIT', 'code', '706')
self.assert_json_equal('EDIT', 'name', '706')
self.assert_json_equal('LABELFORM', 'type_of_account', 3)
self.assert_json_equal('LABELFORM', 'error_code', "")
self.factory.xfer = ChartsAccountAddModify()
self.calljson('/diacamma.accounting/chartsAccountAddModify',
{'year': '1', 'type_of_account': '-1', 'chartsaccount': '9', 'code': '7061'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountAddModify')
self.assert_count_equal('', 5)
self.assert_json_equal('EDIT', 'code', '7061')
self.assert_json_equal('EDIT', 'name', '706')
self.assert_json_equal('LABELFORM', 'type_of_account', 3)
self.assert_json_equal('LABELFORM', 'error_code', "")
self.factory.xfer = ChartsAccountAddModify()
self.calljson('/diacamma.accounting/chartsAccountAddModify',
{'year': '1', 'type_of_account': '-1', 'chartsaccount': '9', 'code': '3015'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountAddModify')
self.assert_count_equal('', 5)
self.assert_json_equal('EDIT', 'code', '3015!')
self.assert_json_equal('EDIT', 'name', '706')
self.assert_json_equal('LABELFORM', 'type_of_account', 3)
self.assert_json_equal('LABELFORM', 'error_code', "Code invalide !")
self.factory.xfer = ChartsAccountAddModify()
self.calljson('/diacamma.accounting/chartsAccountAddModify',
{'year': '1', 'type_of_account': '-1', 'chartsaccount': '9', 'code': 'abcd'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountAddModify')
self.assert_count_equal('', 5)
self.assert_json_equal('EDIT', 'code', 'abcd!')
self.assert_json_equal('EDIT', 'name', '706')
self.assert_json_equal('LABELFORM', 'type_of_account', 3)
self.assert_json_equal('LABELFORM', 'error_code', "Code invalide !")
self.factory.xfer = ChartsAccountAddModify()
self.calljson('/diacamma.accounting/chartsAccountAddModify',
{'year': '1', 'type_of_account': '-1', 'chartsaccount': '9', 'code': '6125'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountAddModify')
self.assert_count_equal('', 5)
self.assert_json_equal('EDIT', 'code', '6125!')
self.assert_json_equal('EDIT', 'name', '706')
self.assert_json_equal('LABELFORM', 'type_of_account', 3)
self.assert_json_equal('LABELFORM', 'error_code', "Changement non permis !")
def test_modify_with_validated_line(self):
entry = add_entry(1, 3, '2015-04-15', 'Subvention 2', '-1|9|0|100.000000|0|0|None|\n-2|2|0|100.000000|0|0|None|', False)
self.factory.xfer = ChartsAccountAddModify()
self.calljson('/diacamma.accounting/chartsAccountAddModify',
{'year': '1', 'type_of_account': '-1', 'chartsaccount': '9'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountAddModify')
self.assert_count_equal('', 5)
self.assert_json_equal('EDIT', 'code', '706')
self.assert_json_equal('EDIT', 'name', '706')
self.assert_json_equal('LABELFORM', 'type_of_account', 3)
self.assert_json_equal('LABELFORM', 'error_code', "")
entry.closed()
self.factory.xfer = ChartsAccountAddModify()
self.calljson('/diacamma.accounting/chartsAccountAddModify',
{'year': '1', 'type_of_account': '-1', 'chartsaccount': '9'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountAddModify')
self.assert_count_equal('', 5)
self.assert_json_equal('LABELFORM', 'code', '706')
self.assert_json_equal('EDIT', 'name', '706')
self.assert_json_equal('LABELFORM', 'type_of_account', 3)
self.assert_json_equal('LABELFORM', 'error_code', "")
self.factory.xfer = ChartsAccountAddModify()
self.calljson('/diacamma.accounting/chartsAccountAddModify',
{'year': '1', 'type_of_account': '-1', 'chartsaccount': '9', 'code': '7061'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountAddModify')
self.assert_count_equal('', 5)
self.assert_json_equal('LABELFORM', 'code', '706')
self.assert_json_equal('EDIT', 'name', '706')
self.assert_json_equal('LABELFORM', 'type_of_account', 3)
self.assert_json_equal('LABELFORM', 'error_code', "")
self.factory.xfer = ChartsAccountAddModify()
self.calljson('/diacamma.accounting/chartsAccountAddModify',
{'SAVE': 'YES', 'year': '1', 'type_of_account': '-1', 'chartsaccount': '9', 'code': '7061', 'name': "new code name"}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'chartsAccountAddModify')
self.factory.xfer = ChartsAccountAddModify()
self.calljson('/diacamma.accounting/chartsAccountAddModify',
{'year': '1', 'type_of_account': '-1', 'chartsaccount': '9'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountAddModify')
self.assert_count_equal('', 5)
self.assert_json_equal('LABELFORM', 'code', '706')
self.assert_json_equal('EDIT', 'name', 'new code name')
self.assert_json_equal('LABELFORM', 'type_of_account', 3)
self.assert_json_equal('LABELFORM', 'error_code', "")
def test_listing(self):
self.factory.xfer = ChartsAccountListing()
self.calljson('/diacamma.accounting/chartsAccountListing',
{'year': '1', 'type_of_account': '-1', 'PRINT_MODE': '4', 'MODEL': 6}, False)
self.assert_observer('core.print', 'diacamma.accounting', 'chartsAccountListing')
csv_value = b64decode(
str(self.response_json['print']['content'])).decode("utf-8")
content_csv = csv_value.split('\n')
self.assertEqual(len(content_csv), 30, str(content_csv))
self.assertEqual(content_csv[1].strip()[:27], '"Liste des comptes du plan ')
self.assertEqual(content_csv[6].strip(), '"code";"nom";"total de l\'exercice précédent";"total de l\'exercice";"total validé";')
self.assertEqual(content_csv[7].strip(), '"106";"106";"Crédit: 1 250,38 €";"Crédit: 1 250,38 €";"Crédit: 1 250,38 €";')
self.assertEqual(content_csv[14].strip(), '"512";"512";"Débit: 1 135,93 €";"Débit: 1 130,29 €";"Débit: 1 130,29 €";')
self.assertEqual(content_csv[15].strip(), '"531";"531";"Débit: 114,45 €";"Crédit: 79,63 €";"Débit: 114,45 €";')
self.factory.xfer = ChartsAccountListing()
self.calljson('/diacamma.accounting/chartsAccountListing',
{'year': '1', 'type_of_account': '4', 'PRINT_MODE': '4', 'MODEL': 6}, False)
self.assert_observer('core.print', 'diacamma.accounting', 'chartsAccountListing')
csv_value = b64decode(
str(self.response_json['print']['content'])).decode("utf-8")
content_csv = csv_value.split('\n')
self.assertEqual(len(content_csv), 18, str(content_csv))
def test_budget(self):
self.factory.xfer = BudgetList()
self.calljson('/diacamma.accounting/budgetList', {'year': '1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'budgetList')
self.assert_count_equal('', 6)
self.assertEqual(len(self.json_actions), 4)
self.assert_count_equal('budget_revenue', 2)
self.assert_json_equal('', '#budget_revenue/headers/@0/@0', 'budget')
self.assert_json_equal('', '#budget_revenue/headers/@0/@2', None)
self.assert_json_equal('', '#budget_revenue/headers/@0/@4', "%s")
self.assert_json_equal('', '#budget_revenue/headers/@1/@0', 'montant')
self.assert_json_equal('', '#budget_revenue/headers/@1/@2', "C2EUR")
self.assert_json_equal('', '#budget_revenue/headers/@1/@4', '{[p align=\'right\']}{[font color="green"]}Crédit: %s{[/font]}{[/p]};{[p align=\'right\']}{[font color="blue"]}Débit: %s{[/font]}{[/p]};{[p align=\'right\']}%s{[/p]}')
self.assert_json_equal('', '#budget_expense/headers/@0/@0', 'budget')
self.assert_json_equal('', '#budget_expense/headers/@0/@2', None)
self.assert_json_equal('', '#budget_expense/headers/@0/@4', "%s")
self.assert_json_equal('', '#budget_expense/headers/@1/@0', 'montant')
self.assert_json_equal('', '#budget_expense/headers/@1/@2', "C2EUR")
self.assert_json_equal('', '#budget_expense/headers/@1/@4', '{[p align=\'right\']}{[font color="green"]}Crédit: %s{[/font]}{[/p]};{[p align=\'right\']}{[font color="blue"]}Débit: %s{[/font]}{[/p]};{[p align=\'right\']}%s{[/p]}')
self.assert_count_equal('#budget_revenue/actions', 2)
self.assert_json_equal('', 'budget_revenue/@0/budget', '[701] 701')
self.assert_json_equal('', 'budget_revenue/@0/montant', 67.89)
self.assert_json_equal('', 'budget_revenue/@1/budget', '[707] 707')
self.assert_json_equal('', 'budget_revenue/@1/montant', 123.45)
self.assert_count_equal('budget_expense', 3)
self.assert_json_equal('', 'budget_expense/@0/budget', '[601] 601')
self.assert_json_equal('', 'budget_expense/@0/montant', -8.19)
self.assert_json_equal('', 'budget_expense/@1/budget', '[602] 602')
self.assert_json_equal('', 'budget_expense/@1/montant', -7.35)
self.assert_json_equal('', 'budget_expense/@2/budget', '[604] 604')
self.assert_json_equal('', 'budget_expense/@2/montant', -6.24)
self.assert_count_equal('#budget_expense/actions', 2)
self.assert_json_equal('LABELFORM', 'result', 169.56)
self.factory.xfer = BudgetAddModify()
self.calljson('/diacamma.accounting/budgetAddModify', {'year': '1', 'budget_expense': 'C602'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'budgetAddModify')
self.assert_count_equal('', 4)
self.assertEqual(len(self.json_actions), 2)
self.assert_json_equal('', 'code', '602')
self.assert_json_equal('', 'debit_val', '7.35')
self.assert_json_equal('', 'credit_val', '0.00')
self.factory.xfer = BudgetAddModify()
self.calljson('/diacamma.accounting/budgetAddModify', {'year': '1', 'budget_expense': 'C602', 'code': '602', 'debit_val': '19.64', 'credit_val': '0.00', 'SAVE': 'YES'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'budgetAddModify')
self.factory.xfer = BudgetList()
self.calljson('/diacamma.accounting/budgetList', {'year': '1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'budgetList')
self.assert_count_equal('budget_revenue', 2)
self.assert_count_equal('budget_expense', 3)
self.assert_json_equal('', 'budget_expense/@1/budget', '[602] 602')
self.assert_json_equal('', 'budget_expense/@1/montant', -19.64)
self.assert_json_equal('LABELFORM', 'result', 157.27)
self.factory.xfer = BudgetAddModify()
self.calljson('/diacamma.accounting/budgetAddModify', {'year': '1', 'code': '607', 'debit_val': '92.73', 'credit_val': '0.00', 'SAVE': 'YES'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'budgetAddModify')
self.factory.xfer = BudgetList()
self.calljson('/diacamma.accounting/budgetList', {'year': '1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'budgetList')
self.assert_count_equal('budget_revenue', 2)
self.assert_count_equal('budget_expense', 4)
self.assert_json_equal('', 'budget_expense/@3/budget', '[607] 607')
self.assert_json_equal('', 'budget_expense/@3/montant', -92.73)
self.assert_json_equal('LABELFORM', 'result', 64.54)
self.factory.xfer = BudgetDel()
self.calljson('/diacamma.accounting/budgetDel', {'year': '1', 'budget_expense': 'C604', 'CONFIRME': 'YES'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'budgetDel')
self.factory.xfer = BudgetList()
self.calljson('/diacamma.accounting/budgetList', {'year': '1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'budgetList')
self.assert_count_equal('budget_revenue', 2)
self.assert_count_equal('budget_expense', 3)
self.assert_json_equal('LABELFORM', 'result', 70.78)
self.factory.xfer = BudgetList()
self.calljson('/diacamma.accounting/budgetList', {'year': '1', 'readonly': True}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'budgetList')
self.assert_count_equal('', 6)
self.assertEqual(len(self.json_actions), 2)
self.assert_count_equal('budget_revenue', 2)
self.assert_count_equal('#budget_revenue/actions', 0)
self.assert_count_equal('budget_expense', 3)
self.assert_count_equal('#budget_expense/actions', 0)
self.assert_json_equal('LABELFORM', 'result', 70.78)
class FiscalYearWorkflowTest(PaymentTest):
def setUp(self):
# BudgetList.url_text
LucteriosTest.setUp(self)
set_accounting_system()
initial_thirds_fr()
default_compta_fr()
fill_entries_fr(1)
rmtree(get_user_dir(), True)
def _add_subvention(self):
create_account(['441'], 1) # subvention (état) N°18
create_account(['740'], 3) # subvention (revenu) N°19
add_entry(1, 3, '2015-03-10', 'Subvention 1', '-1|19|0|35.500000|0|0|None|\n-2|18|0|-35.500000|0|0|None|', True) # 23 24
add_entry(1, 3, '2015-04-15', 'Subvention 2', '-1|19|0|99.950000|0|0|None|\n-2|18|0|-99.950000|0|0|None|', True) # 25 26
def test_begin_simple(self):
self.assertEqual(FiscalYear.objects.get(id=1).status, 0)
self.factory.xfer = ChartsAccountList()
self.calljson('/diacamma.accounting/chartsAccountList',
{'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountList')
self.assert_count_equal('', 8)
self.assertEqual(len(self.json_actions), 4)
self.assert_action_equal('POST', self.json_actions[0], ('Commencer', 'images/ok.png', 'diacamma.accounting', 'fiscalYearBegin', 0, 1, 1))
self.factory.xfer = FiscalYearBegin()
self.calljson('/diacamma.accounting/fiscalYearBegin',
{'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.dialogbox', 'diacamma.accounting', 'fiscalYearBegin')
self.assert_json_equal('', 'text', "Voulez-vous commencer 'Exercice du 1 janvier 2015 au 31 décembre 2015", True)
self.factory.xfer = FiscalYearBegin()
self.calljson('/diacamma.accounting/fiscalYearBegin',
{'CONFIRME': 'YES', 'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'fiscalYearBegin')
self.assertEqual(FiscalYear.objects.get(id=1).status, 1)
self.factory.xfer = ChartsAccountList()
self.calljson('/diacamma.accounting/chartsAccountList',
{'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountList')
self.assertEqual(len(self.json_actions), 4)
self.assert_action_equal('POST', self.json_actions[0], ('Clôture', 'images/ok.png', 'diacamma.accounting', 'fiscalYearClose', 0, 1, 1))
def test_begin_lastyearnovalid(self):
self.assertEqual(FiscalYear.objects.get(id=1).status, 0)
new_entry = add_entry(1, 1, '2015-04-11', 'Report à nouveau aussi', '-1|1|0|37.61|0|0|None|\n-2|2|0|-37.61|0|0|None|', False)
self.factory.xfer = FiscalYearBegin()
self.calljson('/diacamma.accounting/fiscalYearBegin',
{'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.exception', 'diacamma.accounting', 'fiscalYearBegin')
self.assert_json_equal('', 'message', "Des écritures au journal Report à nouveau ne sont pas validées !")
new_entry.closed()
self.factory.xfer = FiscalYearBegin()
self.calljson('/diacamma.accounting/fiscalYearBegin',
{'CONFIRME': 'YES', 'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'fiscalYearBegin')
self.assertEqual(FiscalYear.objects.get(id=1).status, 1)
def test_begin_withbenef(self):
self.assertEqual(FiscalYear.objects.get(id=1).status, 0)
add_entry(1, 1, '2015-04-11', 'Report à nouveau bénèf', '-1|16|0|123.45|0|0|None|\n-2|2|0|123.45|0|0|None|', True)
self.factory.xfer = ChartsAccountList()
self.calljson('/diacamma.accounting/chartsAccountList',
{'year': '1', 'type_of_account': '2'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountList')
self.assert_count_equal('chartsaccount', 5)
self.assert_json_equal('', 'chartsaccount/@0/code', '106')
self.assert_json_equal('', 'chartsaccount/@0/last_year_total', 1250.38)
self.assert_json_equal('', 'chartsaccount/@3/code', '120')
self.assert_json_equal('', 'chartsaccount/@3/last_year_total', 123.45)
self.factory.xfer = FiscalYearBegin()
self.calljson('/diacamma.accounting/fiscalYearBegin',
{'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'fiscalYearBegin')
self.assert_count_equal('', 4)
self.assert_json_equal('LABELFORM', 'info', "{[i]}Vous avez un bénéfice de 123,45 €.{[br/]}", True)
self.assert_json_equal('SELECT', 'profit_account', '5')
self.assert_select_equal('profit_account', 3) # nb=3
self.assertEqual(len(self.json_actions), 2)
self.factory.xfer = FiscalYearBegin()
self.calljson('/diacamma.accounting/fiscalYearBegin',
{'profit_account': '5', 'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'fiscalYearBegin')
self.assertEqual(FiscalYear.objects.get(id=1).status, 1)
self.factory.xfer = ChartsAccountList()
self.calljson('/diacamma.accounting/chartsAccountList',
{'year': '1', 'type_of_account': '2'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountList')
self.assert_count_equal('chartsaccount', 5)
self.assert_json_equal('', 'chartsaccount/@0/code', '106')
self.assert_json_equal('', 'chartsaccount/@0/last_year_total', 1250.38)
self.assert_json_equal('', 'chartsaccount/@0/current_total', 1373.83)
self.assert_json_equal('', 'chartsaccount/@3/code', '120')
self.assert_json_equal('', 'chartsaccount/@3/last_year_total', 123.45)
self.assert_json_equal('', 'chartsaccount/@3/current_total', 0.00)
def test_begin_dont_add_report(self):
self.factory.xfer = FiscalYearBegin()
self.calljson('/diacamma.accounting/fiscalYearBegin',
{'CONFIRME': 'YES', 'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'fiscalYearBegin')
self.assertEqual(FiscalYear.objects.get(id=1).status, 1)
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'year': '1', 'journal': '1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountEdit')
self.assert_count_equal('', 4)
self.assert_select_equal('journal', 4) # nb=4
self.assert_json_equal('SELECT', 'journal', '2')
self.assertEqual(len(self.json_actions), 2)
def test_import_charsaccount(self):
import_module("diacamma.asso.views")
FiscalYear.objects.create(begin='2016-01-01', end='2016-12-31', status=0,
last_fiscalyear=FiscalYear.objects.get(id=1))
self.assertEqual(FiscalYear.objects.get(id=1).status, 0)
self.assertEqual(FiscalYear.objects.get(id=2).status, 0)
self.factory.xfer = ChartsAccountImportFiscalYear()
self.calljson('/diacamma.accounting/chartsAccountImportFiscalYear',
{'CONFIRME': 'YES', 'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.exception', 'diacamma.accounting', 'chartsAccountImportFiscalYear')
self.assert_json_equal('', 'message', "Cet exercice n'a pas d'exercice précédent !")
self.factory.xfer = ChartsAccountList()
self.calljson('/diacamma.accounting/chartsAccountList', {'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountList')
self.assert_count_equal('chartsaccount', 17)
self.assert_count_equal('#chartsaccount/actions', 5)
self.factory.xfer = ChartsAccountList()
self.calljson('/diacamma.accounting/chartsAccountList', {'year': '2', 'type_of_account': '-1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountList')
self.assert_count_equal('chartsaccount', 0)
self.assert_count_equal('#chartsaccount/actions', 6)
self.assert_action_equal('POST', '#chartsaccount/actions/@3',
('Import', 'images/right.png', 'diacamma.accounting', 'chartsAccountImportFiscalYear', 0, 1, 1))
self.factory.xfer = ChartsAccountImportFiscalYear()
self.calljson('/diacamma.accounting/chartsAccountImportFiscalYear',
{'CONFIRME': 'YES', 'year': '2', 'type_of_account': '-1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'chartsAccountImportFiscalYear')
self.factory.xfer = ChartsAccountList()
self.calljson('/diacamma.accounting/chartsAccountList',
{'year': '2', 'type_of_account': '-1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountList')
self.assert_count_equal('chartsaccount', 17)
self.factory.xfer = ChartsAccountImportFiscalYear()
self.calljson('/diacamma.accounting/chartsAccountImportFiscalYear',
{'CONFIRME': 'YES', 'year': '2', 'type_of_account': '-1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'chartsAccountImportFiscalYear')
self.factory.xfer = ChartsAccountList()
self.calljson('/diacamma.accounting/chartsAccountList',
{'year': '2', 'type_of_account': '-1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountList')
self.assert_count_equal('chartsaccount', 17)
def test_close(self):
self._add_subvention()
Params.setvalue('accounting-needcost', '1')
self.factory.xfer = DocumentSearch()
self.calljson('/lucterios.documents/documentSearch', {}, False)
self.assert_observer('core.custom', 'lucterios.documents', 'documentSearch')
self.assert_count_equal('document', 0)
self.assertEqual(FiscalYear.objects.get(id=1).status, 0)
self.factory.xfer = FiscalYearClose()
self.calljson('/diacamma.accounting/fiscalYearClose', {'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.exception', 'diacamma.accounting', 'fiscalYearClose')
self.assert_json_equal('', 'message', "Cet exercice n'est pas 'en cours' !")
self.factory.xfer = FiscalYearBegin()
self.calljson('/diacamma.accounting/fiscalYearBegin', {'CONFIRME': 'YES', 'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'fiscalYearBegin')
self.assertEqual(FiscalYear.objects.get(id=1).status, 1)
self.factory.xfer = ThirdList()
self.calljson('/diacamma.accounting/thirdList', {'show_filter': '1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'thirdList')
self.assert_json_equal('', '#third/headers/@2/@0', 'total')
self.assert_json_equal('', '#third/headers/@2/@1', 'total')
self.assert_json_equal('', '#third/headers/@2/@2', "C2EUR")
self.assert_json_equal('', '#third/headers/@2/@4', "{[p align='right']}%s{[/p]}")
self.assert_json_equal('', 'third/@1/contact', 'Dalton Jack')
self.assert_json_equal('', 'third/@1/total', 0.0)
self.assert_json_equal('', 'third/@3/contact', 'Dalton William')
self.assert_json_equal('', 'third/@3/total', -125.97)
self.assert_json_equal('', 'third/@6/contact', 'Minimum')
self.assert_json_equal('', 'third/@6/total', -34.01)
self.check_account(1, '411', 159.98)
self.check_account(1, '401', 78.24)
self.check_account(1, '441', -135.45)
self.factory.xfer = FiscalYearClose()
self.calljson('/diacamma.accounting/fiscalYearClose', {'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.exception', 'diacamma.accounting', 'fiscalYearClose')
self.assert_json_equal('', 'message', "Cet exercice a des écritures non-validées et pas d'exercice suivant !")
FiscalYear.objects.create(begin='2016-01-01', end='2016-12-31', status=0, last_fiscalyear=FiscalYear.objects.get(id=1))
self.factory.xfer = FiscalYearClose()
self.calljson('/diacamma.accounting/fiscalYearClose', {'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'fiscalYearClose')
text_value = self.json_data['info']
self.assertTrue('Voulez-vous clôturer cet exercice ?' in text_value, text_value)
self.assertTrue('les 4 écritures non validées' in text_value, text_value)
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 25)
self.assert_json_equal('LABELFORM', 'result', [366.07, 348.60, 17.47, 1050.66, 1244.74])
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '2', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 0)
self.assert_json_equal('LABELFORM', 'result', [0.00, 0.00, 0.00, 0.00, 0.00])
self.factory.xfer = FiscalYearClose()
self.calljson('/diacamma.accounting/fiscalYearClose', {'CONFIRME': 'YES', 'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'fiscalYearClose')
self.assertEqual(FiscalYear.objects.get(id=1).status, 2)
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 25)
self.assert_json_equal('LABELFORM', 'result', [332.06, 76.28, 255.78, 1244.74, 1244.74])
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '5', 'filter': '2'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 9)
self.assert_json_equal('', 'entryline/@2/designation_ref', "Cloture d'exercice - Résultat")
self.assert_json_equal('', 'entryline/@2/entry_account', "[120] 120")
self.assert_json_equal('', 'entryline/@2/credit', 255.78)
self.assert_json_equal('', 'entryline/@2/link', None)
self.assert_json_equal('', 'entryline/@3/designation_ref', "Cloture d'exercice - Résultat")
self.assert_json_equal('', 'entryline/@3/entry_account', "[602] 602")
self.assert_json_equal('', 'entryline/@3/credit', 63.94)
self.assert_json_equal('', 'entryline/@4/designation_ref', "Cloture d'exercice - Résultat")
self.assert_json_equal('', 'entryline/@4/entry_account', "[627] 627")
self.assert_json_equal('', 'entryline/@4/credit', 12.34)
self.assert_json_equal('', 'entryline/@5/designation_ref', "Cloture d'exercice - Résultat")
self.assert_json_equal('', 'entryline/@5/entry_account', "[707] 707")
self.assert_json_equal('', 'entryline/@5/debit', -196.61)
self.assert_json_equal('', 'entryline/@6/designation_ref', "Cloture d'exercice - Résultat")
self.assert_json_equal('', 'entryline/@6/entry_account', "[740] 740")
self.assert_json_equal('', 'entryline/@6/debit', -135.45)
self.assert_json_equal('', 'entryline/@7/designation_ref', "Cloture d'exercice - Tiers")
self.assert_json_equal('', 'entryline/@7/entry_account', "[411] 411")
self.assert_json_equal('', 'entryline/@7/debit', -125.97)
self.assert_json_equal('', 'entryline/@7/link', None)
self.assert_json_equal('', 'entryline/@8/designation_ref', "Cloture d'exercice - Tiers{[br/]}vente 2")
self.assert_json_equal('', 'entryline/@8/entry_account', "[411 Dalton William]")
self.assert_json_equal('', 'entryline/@8/credit', 125.97)
self.assert_json_equal('', 'entryline/@8/link', "E")
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '2', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 8)
self.assert_json_equal('LABELFORM', 'result', [34.01, 272.32, -238.31, -194.08, 0.00])
self.factory.xfer = ChartsAccountList()
self.calljson('/diacamma.accounting/chartsAccountList', {'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountList')
self.assert_count_equal('', 8)
self.assert_count_equal('chartsaccount', 19)
self.assert_json_equal('', 'chartsaccount/@3/code', '120')
self.assert_json_equal('', 'chartsaccount/@3/current_total', 255.78)
self.assert_json_equal('', 'chartsaccount/@5/code', '401')
self.assert_json_equal('', 'chartsaccount/@5/current_total', 0.00)
self.assert_json_equal('', 'chartsaccount/@6/code', '411')
self.assert_json_equal('', 'chartsaccount/@6/current_total', -125.97)
self.assert_json_equal('', 'chartsaccount/@7/code', '441')
self.assert_json_equal('', 'chartsaccount/@7/current_total', -135.45)
check_pdfreport(self, '1', 'Bilan.pdf', "FiscalYearBalanceSheet", "diacamma.accounting.views_reports")
check_pdfreport(self, '1', 'Compte de resultat.pdf', "FiscalYearIncomeStatement", "diacamma.accounting.views_reports")
self.factory.xfer = DocumentSearch()
self.calljson('/lucterios.documents/documentSearch', {}, False)
self.assert_observer('core.custom', 'lucterios.documents', 'documentSearch')
self.assert_count_equal('document', 4)
self.factory.xfer = FiscalYearIncomeStatement()
self.calljson('/diacamma.accounting/fiscalYearIncomeStatement', {'year': '1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'fiscalYearIncomeStatement')
self.assert_count_equal('', 4)
self.assert_count_equal('report_1', 8)
self.assert_json_equal('', 'report_1/@0/left', "[601] 601")
self.assert_json_equal('', 'report_1/@0/left_n', "")
self.assert_json_equal('', 'report_1/@0/left_b', 8.19)
self.assert_json_equal('', 'report_1/@0/right', "[701] 701")
self.assert_json_equal('', 'report_1/@0/right_n', "")
self.assert_json_equal('', 'report_1/@0/right_b', 67.89)
self.assert_json_equal('', 'report_1/@1/left', "[602] 602")
self.assert_json_equal('', 'report_1/@1/left_n', 63.94)
self.assert_json_equal('', 'report_1/@1/left_b', 7.359)
self.assert_json_equal('', 'report_1/@1/right', "[707] 707")
self.assert_json_equal('', 'report_1/@1/right_n', 196.61)
self.assert_json_equal('', 'report_1/@1/right_b', 123.45)
self.assert_json_equal('', 'report_1/@2/left', "[604] 604")
self.assert_json_equal('', 'report_1/@2/left_n', "")
self.assert_json_equal('', 'report_1/@2/left_b', 6.24)
self.assert_json_equal('', 'report_1/@2/right', "[740] 740")
self.assert_json_equal('', 'report_1/@2/right_n', 135.45)
self.assert_json_equal('', 'report_1/@2/right_b', "")
self.assert_json_equal('', 'report_1/@3/left', "[627] 627")
self.assert_json_equal('', 'report_1/@3/left_n', 12.34)
self.assert_json_equal('', 'report_1/@3/left_b', "")
self.assert_json_equal('', 'report_1/@3/right', "")
self.assert_json_equal('', 'report_1/@3/right_n', "")
self.assert_json_equal('', 'report_1/@3/right_b', "")
self.assert_json_equal('', 'report_1/@5/left', "          {[u]}{[b]}total{[/b]}{[/u]}")
self.assert_json_equal('', 'report_1/@5/left_n', {"value": 76.28, "format": "{[u]}{[b]}{0}{[/b]}{[/u]}"})
self.assert_json_equal('', 'report_1/@5/left_b', {"value": 21.78, "format": "{[u]}{[b]}{0}{[/b]}{[/u]}"})
self.assert_json_equal('', 'report_1/@5/right', "          {[u]}{[b]}total{[/b]}{[/u]}")
self.assert_json_equal('', 'report_1/@5/right_n', {"value": 332.06, "format": "{[u]}{[b]}{0}{[/b]}{[/u]}"})
self.assert_json_equal('', 'report_1/@5/right_b', {"value": 191.34, "format": "{[u]}{[b]}{0}{[/b]}{[/u]}"})
self.assert_json_equal('', 'report_1/@6/left', "     {[i]}résultat (excédent){[/i]}")
self.assert_json_equal('', 'report_1/@6/left_n', 255.78)
self.assert_json_equal('', 'report_1/@6/left_b', 169.56)
self.assert_json_equal('', 'report_1/@6/right', "")
self.assert_json_equal('', 'report_1/@6/right_n', "")
self.assert_json_equal('', 'report_1/@6/right_b', "")
self.factory.xfer = FiscalYearBalanceSheet()
self.calljson('/diacamma.accounting/fiscalYearBalanceSheet', {'year': '1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'fiscalYearBalanceSheet')
self.assert_count_equal('report_1', 11)
self.assert_json_equal('', 'report_1/@1/left', "[411] 411")
self.assert_json_equal('', 'report_1/@1/left_n', 125.97)
self.assert_json_equal('', 'report_1/@1/right', "[106] 106")
self.assert_json_equal('', 'report_1/@1/right_n', 1250.38)
self.assert_json_equal('', 'report_1/@2/left', "[441] 441")
self.assert_json_equal('', 'report_1/@2/left_n', 135.45)
self.assert_json_equal('', 'report_1/@2/right', "[120] 120")
self.assert_json_equal('', 'report_1/@2/right_n', 255.78)
self.assert_json_equal('', 'report_1/@3/left', {"format": "{[i]}{0}{[/i]}", "value": "          Sous-total"})
self.assert_json_equal('', 'report_1/@3/left_n', {"format": "{[i]}{0}{[/i]}", "value": 261.41999999999996})
self.assert_json_equal('', 'report_1/@3/right', {"format": "{[i]}{0}{[/i]}", "value": "          Sous-total"})
self.assert_json_equal('', 'report_1/@3/right_n', {"format": "{[i]}{0}{[/i]}", "value": 1506.16})
self.assert_json_equal('', 'report_1/@6/left', "[512] 512")
self.assert_json_equal('', 'report_1/@6/left_n', 1130.2900000000002)
self.assert_json_equal('', 'report_1/@6/right', "")
self.assert_json_equal('', 'report_1/@6/right_n', "")
self.assert_json_equal('', 'report_1/@7/left', "[531] 531")
self.assert_json_equal('', 'report_1/@7/left_n', 114.45)
self.assert_json_equal('', 'report_1/@7/right', "")
self.assert_json_equal('', 'report_1/@7/right_n', "")
self.assert_json_equal('', 'report_1/@8/left', {"format": "{[i]}{0}{[/i]}", "value": "          Sous-total"})
self.assert_json_equal('', 'report_1/@8/left_n', {"format": "{[i]}{0}{[/i]}", "value": 1244.7400000000002})
self.assert_json_equal('', 'report_1/@8/right', "")
self.assert_json_equal('', 'report_1/@8/right_n', "")
self.assert_json_equal('', 'report_1/@10/left', {"format": "{[u]}{[b]}{0}{[/b]}{[/u]}", "value": "Total"})
self.assert_json_equal('', 'report_1/@10/left_n', {"value": 1506.1600000000003, "format": "{[u]}{[b]}{0}{[/b]}{[/u]}"})
self.assert_json_equal('', 'report_1/@10/right', {"format": "{[u]}{[b]}{0}{[/b]}{[/u]}", "value": "Total"})
self.assert_json_equal('', 'report_1/@10/right_n', {"value": 1506.16, "format": "{[u]}{[b]}{0}{[/b]}{[/u]}"})
def test_import_lastyear(self):
self._add_subvention()
FiscalYear.objects.create(begin='2016-01-01', end='2016-12-31', status=0, last_fiscalyear=FiscalYear.objects.get(id=1))
self.factory.xfer = FiscalYearBegin()
self.calljson('/diacamma.accounting/fiscalYearBegin', {'CONFIRME': 'YES', 'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'fiscalYearBegin')
self.assertEqual(FiscalYear.objects.get(id=1).status, 1)
self.factory.xfer = FiscalYearClose()
self.calljson('/diacamma.accounting/fiscalYearClose', {'CONFIRME': 'YES', 'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'fiscalYearClose')
self.assertEqual(FiscalYear.objects.get(id=1).status, 2)
self.assertEqual(FiscalYear.objects.get(id=2).status, 0)
self.factory.xfer = FiscalYearReportLastYear()
self.calljson('/diacamma.accounting/fiscalYearReportLastYear', {'CONFIRME': 'YES', 'year': '2', 'type_of_account': '-1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'fiscalYearReportLastYear')
self.assertEqual(FiscalYear.objects.get(id=2).status, 0)
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '2', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 16)
self.assert_json_equal('LABELFORM', 'result', [34.01, 272.32, -238.31, 1050.66, 1244.74])
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '2', 'journal': '1', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 8)
self.assert_json_equal('', 'entryline/@0/designation_ref', "Report à nouveau - Bilan")
self.assert_json_equal('', 'entryline/@0/entry_account', "[106] 106")
self.assert_json_equal('', 'entryline/@0/link', None)
self.assert_json_equal('', 'entryline/@1/designation_ref', "Report à nouveau - Bilan")
self.assert_json_equal('', 'entryline/@1/entry_account', "[120] 120")
self.assert_json_equal('', 'entryline/@1/link', None)
self.assert_json_equal('', 'entryline/@2/designation_ref', "Report à nouveau - Bilan")
self.assert_json_equal('', 'entryline/@2/entry_account', "[411] 411")
self.assert_json_equal('', 'entryline/@2/link', None)
self.assert_json_equal('', 'entryline/@3/designation_ref', "Report à nouveau - Bilan")
self.assert_json_equal('', 'entryline/@3/entry_account', "[441] 441")
self.assert_json_equal('', 'entryline/@3/link', None)
self.assert_json_equal('', 'entryline/@4/designation_ref', "Report à nouveau - Bilan")
self.assert_json_equal('', 'entryline/@4/entry_account', "[512] 512")
self.assert_json_equal('', 'entryline/@4/link', None)
self.assert_json_equal('', 'entryline/@5/designation_ref', "Report à nouveau - Bilan")
self.assert_json_equal('', 'entryline/@5/entry_account', "[531] 531")
self.assert_json_equal('', 'entryline/@5/link', None)
self.assert_json_equal('', 'entryline/@6/designation_ref', "Report à nouveau - Dette tiers")
self.assert_json_equal('', 'entryline/@6/entry_account', "[411] 411")
self.assert_json_equal('', 'entryline/@6/credit', 125.97)
self.assert_json_equal('', 'entryline/@6/link', None)
self.assert_json_equal('', 'entryline/@7/designation_ref', "Report à nouveau - Dette tiers{[br/]}vente 2")
self.assert_json_equal('', 'entryline/@7/entry_account', "[411 Dalton William]")
self.assert_json_equal('', 'entryline/@7/debit', -125.97)
self.assert_json_equal('', 'entryline/@7/link', None)
self.factory.xfer = ChartsAccountList()
self.calljson('/diacamma.accounting/chartsAccountList', {'year': '2', 'type_of_account': '-1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountList')
self.assert_count_equal('', 8)
self.assertEqual(len(self.json_actions), 4)
self.assert_count_equal('chartsaccount', 10)
self.assert_json_equal('', 'chartsaccount/@1/code', '120')
self.assert_json_equal('', 'chartsaccount/@1/current_total', 255.78)
self.assert_json_equal('', 'chartsaccount/@3/code', '411')
self.assert_json_equal('', 'chartsaccount/@3/current_total', -159.98)
self.assert_json_equal('', 'chartsaccount/@4/code', '441')
self.assert_json_equal('', 'chartsaccount/@4/current_total', -135.45)
self.factory.xfer = ChartsAccountList()
self.calljson('/diacamma.accounting/chartsAccountList', {'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountList')
self.assertEqual(len(self.json_actions), 3)
|
Diacamma2/financial
|
diacamma/accounting/tests_accounts.py
|
Python
|
gpl-3.0
| 57,816
|
[
"Dalton"
] |
19207c01d4159d4beb0de495464635220ec9dcca7bf1a0cf853f4a4492502b1b
|
"""Generate html report from MNE database
"""
# Authors: Alex Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Mainak Jas <mainak@neuro.hut.fi>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
import os
import os.path as op
import fnmatch
import re
import codecs
import time
from glob import glob
import warnings
import base64
from datetime import datetime as dt
import numpy as np
from . import read_evokeds, read_events, pick_types, read_cov
from .io import Raw, read_info
from .utils import _TempDir, logger, verbose, get_subjects_dir
from .viz import plot_events, plot_trans, plot_cov
from .viz._3d import _plot_mri_contours
from .forward import read_forward_solution
from .epochs import read_epochs
from .minimum_norm import read_inverse_operator
from .parallel import parallel_func, check_n_jobs
from .externals.tempita import HTMLTemplate, Template
from .externals.six import BytesIO
from .externals.six import moves
VALID_EXTENSIONS = ['raw.fif', 'raw.fif.gz', 'sss.fif', 'sss.fif.gz',
'-eve.fif', '-eve.fif.gz', '-cov.fif', '-cov.fif.gz',
'-trans.fif', '-trans.fif.gz', '-fwd.fif', '-fwd.fif.gz',
'-epo.fif', '-epo.fif.gz', '-inv.fif', '-inv.fif.gz',
'-ave.fif', '-ave.fif.gz', 'T1.mgz']
SECTION_ORDER = ['raw', 'events', 'epochs', 'evoked', 'covariance', 'trans',
'mri', 'forward', 'inverse']
###############################################################################
# PLOTTING FUNCTIONS
def _fig_to_img(function=None, fig=None, image_format='png',
scale=None, **kwargs):
"""Wrapper function to plot figure and create a binary image"""
import matplotlib.pyplot as plt
if function is not None:
plt.close('all')
fig = function(**kwargs)
output = BytesIO()
if scale is not None:
_scale_mpl_figure(fig, scale)
fig.savefig(output, format=image_format, bbox_inches='tight',
dpi=fig.get_dpi())
plt.close(fig)
output = output.getvalue()
return (output if image_format == 'svg' else
base64.b64encode(output).decode('ascii'))
def _scale_mpl_figure(fig, scale):
"""Magic scaling helper
Keeps font-size and artist sizes constant
0.5 : current font - 4pt
2.0 : current font + 4pt
XXX it's unclear why this works, but good to go for most cases
"""
fig.set_size_inches(fig.get_size_inches() * scale)
fig.set_dpi(fig.get_dpi() * scale)
import matplotlib as mpl
if scale >= 1:
sfactor = scale ** 2
elif scale < 1:
sfactor = -((1. / scale) ** 2)
for text in fig.findobj(mpl.text.Text):
fs = text.get_fontsize()
new_size = fs + sfactor
if new_size <= 0:
raise ValueError('could not rescale matplotlib fonts, consider '
'increasing "scale"')
text.set_fontsize(new_size)
fig.canvas.draw()
def _figs_to_mrislices(sl, n_jobs, **kwargs):
import matplotlib.pyplot as plt
plt.close('all')
use_jobs = min(n_jobs, max(1, len(sl)))
parallel, p_fun, _ = parallel_func(_plot_mri_contours, use_jobs)
outs = parallel(p_fun(slices=s, **kwargs)
for s in np.array_split(sl, use_jobs))
for o in outs[1:]:
outs[0] += o
return outs[0]
def _iterate_trans_views(function, **kwargs):
"""Auxiliary function to iterate over views in trans fig.
"""
from scipy.misc import imread
import matplotlib.pyplot as plt
import mayavi
fig = function(**kwargs)
assert isinstance(fig, mayavi.core.scene.Scene)
views = [(90, 90), (0, 90), (0, -90)]
fig2, axes = plt.subplots(1, len(views))
for view, ax in zip(views, axes):
mayavi.mlab.view(view[0], view[1])
# XXX: save_bmp / save_png / ...
tempdir = _TempDir()
temp_fname = op.join(tempdir, 'test.png')
if fig.scene is not None:
fig.scene.save_png(temp_fname)
im = imread(temp_fname)
else: # Testing mode
im = np.zeros((2, 2, 3))
ax.imshow(im)
ax.axis('off')
mayavi.mlab.close(fig)
img = _fig_to_img(fig=fig2)
return img
###############################################################################
# TOC FUNCTIONS
def _is_bad_fname(fname):
"""Auxiliary function for identifying bad file naming patterns
and highlighting them in red in the TOC.
"""
if fname.endswith('(whitened)'):
fname = fname[:-11]
if not fname.endswith(tuple(VALID_EXTENSIONS + ['bem', 'custom'])):
return 'red'
else:
return ''
def _get_toc_property(fname):
"""Auxiliary function to assign class names to TOC
list elements to allow toggling with buttons.
"""
if fname.endswith(('-eve.fif', '-eve.fif.gz')):
div_klass = 'events'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-ave.fif', '-ave.fif.gz')):
div_klass = 'evoked'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-cov.fif', '-cov.fif.gz')):
div_klass = 'covariance'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('raw.fif', 'raw.fif.gz',
'sss.fif', 'sss.fif.gz')):
div_klass = 'raw'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-trans.fif', '-trans.fif.gz')):
div_klass = 'trans'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-fwd.fif', '-fwd.fif.gz')):
div_klass = 'forward'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-inv.fif', '-inv.fif.gz')):
div_klass = 'inverse'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-epo.fif', '-epo.fif.gz')):
div_klass = 'epochs'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('.nii', '.nii.gz', '.mgh', '.mgz')):
div_klass = 'mri'
tooltip = 'MRI'
text = 'MRI'
elif fname.endswith(('bem')):
div_klass = 'mri'
tooltip = 'MRI'
text = 'MRI'
elif fname.endswith('(whitened)'):
div_klass = 'evoked'
tooltip = fname
text = op.basename(fname[:-11]) + '(whitened)'
else:
div_klass = fname.split('-#-')[1]
tooltip = fname.split('-#-')[0]
text = fname.split('-#-')[0]
return div_klass, tooltip, text
def _iterate_files(report, fnames, info, cov, baseline, sfreq, on_error):
"""Auxiliary function to parallel process in batch mode.
"""
htmls, report_fnames, report_sectionlabels = [], [], []
def _update_html(html, report_fname, report_sectionlabel):
"""Update the lists above."""
htmls.append(html)
report_fnames.append(report_fname)
report_sectionlabels.append(report_sectionlabel)
for fname in fnames:
logger.info("Rendering : %s"
% op.join('...' + report.data_path[-20:],
fname))
try:
if fname.endswith(('raw.fif', 'raw.fif.gz',
'sss.fif', 'sss.fif.gz')):
html = report._render_raw(fname)
report_fname = fname
report_sectionlabel = 'raw'
elif fname.endswith(('-fwd.fif', '-fwd.fif.gz')):
html = report._render_forward(fname)
report_fname = fname
report_sectionlabel = 'forward'
elif fname.endswith(('-inv.fif', '-inv.fif.gz')):
html = report._render_inverse(fname)
report_fname = fname
report_sectionlabel = 'inverse'
elif fname.endswith(('-ave.fif', '-ave.fif.gz')):
if cov is not None:
html = report._render_whitened_evoked(fname, cov, baseline)
report_fname = fname + ' (whitened)'
report_sectionlabel = 'evoked'
_update_html(html, report_fname, report_sectionlabel)
html = report._render_evoked(fname, baseline)
report_fname = fname
report_sectionlabel = 'evoked'
elif fname.endswith(('-eve.fif', '-eve.fif.gz')):
html = report._render_eve(fname, sfreq)
report_fname = fname
report_sectionlabel = 'events'
elif fname.endswith(('-epo.fif', '-epo.fif.gz')):
html = report._render_epochs(fname)
report_fname = fname
report_sectionlabel = 'epochs'
elif (fname.endswith(('-cov.fif', '-cov.fif.gz')) and
report.info_fname is not None):
html = report._render_cov(fname, info)
report_fname = fname
report_sectionlabel = 'covariance'
elif (fname.endswith(('-trans.fif', '-trans.fif.gz')) and
report.info_fname is not None and report.subjects_dir
is not None and report.subject is not None):
html = report._render_trans(fname, report.data_path, info,
report.subject,
report.subjects_dir)
report_fname = fname
report_sectionlabel = 'trans'
else:
html = None
report_fname = None
report_sectionlabel = None
except Exception as e:
if on_error == 'warn':
logger.warning('Failed to process file %s:\n"%s"' % (fname, e))
elif on_error == 'raise':
raise
html = None
report_fname = None
report_sectionlabel = None
_update_html(html, report_fname, report_sectionlabel)
return htmls, report_fnames, report_sectionlabels
###############################################################################
# IMAGE FUNCTIONS
def _build_image(data, cmap='gray'):
"""Build an image encoded in base64.
"""
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
figsize = data.shape[::-1]
if figsize[0] == 1:
figsize = tuple(figsize[1:])
data = data[:, :, 0]
fig = Figure(figsize=figsize, dpi=1.0, frameon=False)
FigureCanvas(fig)
cmap = getattr(plt.cm, cmap, plt.cm.gray)
fig.figimage(data, cmap=cmap)
output = BytesIO()
fig.savefig(output, dpi=1.0, format='png')
return base64.b64encode(output.getvalue()).decode('ascii')
def _iterate_sagittal_slices(array, limits=None):
"""Iterate sagittal slice.
"""
shape = array.shape[0]
for ind in range(shape):
if limits and ind not in limits:
continue
yield ind, array[ind, :, :]
def _iterate_axial_slices(array, limits=None):
"""Iterate axial slice.
"""
shape = array.shape[1]
for ind in range(shape):
if limits and ind not in limits:
continue
yield ind, array[:, ind, :]
def _iterate_coronal_slices(array, limits=None):
"""Iterate coronal slice.
"""
shape = array.shape[2]
for ind in range(shape):
if limits and ind not in limits:
continue
yield ind, np.flipud(np.rot90(array[:, :, ind]))
def _iterate_mri_slices(name, ind, global_id, slides_klass, data, cmap,
image_format='png'):
"""Auxiliary function for parallel processing of mri slices.
"""
img_klass = 'slideimg-%s' % name
caption = u'Slice %s %s' % (name, ind)
slice_id = '%s-%s-%s' % (name, global_id, ind)
div_klass = 'span12 %s' % slides_klass
img = _build_image(data, cmap=cmap)
first = True if ind == 0 else False
html = _build_html_image(img, slice_id, div_klass,
img_klass, caption, first)
return ind, html
###############################################################################
# HTML functions
def _build_html_image(img, id, div_klass, img_klass, caption=None, show=True):
"""Build a html image from a slice array.
"""
html = []
add_style = u'' if show else u'style="display: none"'
html.append(u'<li class="%s" id="%s" %s>' % (div_klass, id, add_style))
html.append(u'<div class="thumbnail">')
html.append(u'<img class="%s" alt="" style="width:90%%;" '
'src="data:image/png;base64,%s">'
% (img_klass, img))
html.append(u'</div>')
if caption:
html.append(u'<h4>%s</h4>' % caption)
html.append(u'</li>')
return u'\n'.join(html)
slider_template = HTMLTemplate(u"""
<script>$("#{{slider_id}}").slider({
range: "min",
/*orientation: "vertical",*/
min: {{minvalue}},
max: {{maxvalue}},
step: {{step}},
value: {{startvalue}},
create: function(event, ui) {
$(".{{klass}}").hide();
$("#{{klass}}-{{startvalue}}").show();},
stop: function(event, ui) {
var list_value = $("#{{slider_id}}").slider("value");
$(".{{klass}}").hide();
$("#{{klass}}-"+list_value).show();}
})</script>
""")
def _build_html_slider(slices_range, slides_klass, slider_id):
"""Build an html slider for a given slices range and a slices klass.
"""
startvalue = slices_range[len(slices_range) // 2]
return slider_template.substitute(slider_id=slider_id,
klass=slides_klass,
step=slices_range[1] - slices_range[0],
minvalue=slices_range[0],
maxvalue=slices_range[-1],
startvalue=startvalue)
###############################################################################
# HTML scan renderer
header_template = Template(u"""
<!DOCTYPE html>
<html lang="fr">
<head>
{{include}}
<script type="text/javascript">
var toggle_state = false;
$(document).on('keydown', function (event) {
if (event.which == 84){
if (!toggle_state)
$('.has_toggle').trigger('click');
else if (toggle_state)
$('.has_toggle').trigger('click');
toggle_state = !toggle_state;
}
});
function togglebutton(class_name){
$(class_name).toggle();
if ($(class_name + '-btn').hasClass('active'))
$(class_name + '-btn').removeClass('active');
else
$(class_name + '-btn').addClass('active');
}
/* Scroll down on click to #id so that caption is not hidden
by navbar */
var shiftWindow = function() { scrollBy(0, -60) };
if (location.hash) shiftWindow();
window.addEventListener("hashchange", shiftWindow);
</script>
<style type="text/css">
body {
line-height: 1.5em;
font-family: arial, sans-serif;
}
h1 {
font-size: 30px;
text-align: center;
}
h4 {
text-align: center;
}
@link-color: @brand-primary;
@link-hover-color: darken(@link-color, 15%);
a{
color: @link-color;
&:hover {
color: @link-hover-color;
text-decoration: underline;
}
}
li{
list-style-type:none;
}
#wrapper {
text-align: left;
margin: 5em auto;
width: 700px;
}
#container{
position: relative;
}
#content{
margin-left: 22%;
margin-top: 60px;
width: 75%;
}
#toc {
margin-top: navbar-height;
position: fixed;
width: 20%;
height: 90%;
overflow: auto;
}
#toc li {
overflow: hidden;
padding-bottom: 2px;
margin-left: 20px;
}
#toc span {
float: left;
padding: 0 2px 3px 0;
}
div.footer {
background-color: #C0C0C0;
color: #000000;
padding: 3px 8px 3px 0;
clear: both;
font-size: 0.8em;
text-align: right;
}
</style>
</head>
<body>
<nav class="navbar navbar-inverse navbar-fixed-top" role="navigation">
<div class="container-fluid">
<div class="navbar-header navbar-left">
<ul class="nav nav-pills"><li class="active">
<a class="navbar-btn" data-toggle="collapse"
data-target="#viewnavbar" href="javascript:void(0)">
></a></li></ul>
</div>
<h3 class="navbar-text" style="color:white">{{title}}</h3>
<ul class="nav nav-pills navbar-right" style="margin-top: 7px;"
id="viewnavbar">
{{for section in sections}}
<li class="active {{sectionvars[section]}}-btn">
<a href="javascript:void(0)"
onclick="togglebutton('.{{sectionvars[section]}}')"
class="has_toggle">
{{section if section != 'mri' else 'MRI'}}
</a>
</li>
{{endfor}}
</ul>
</div>
</nav>
""")
footer_template = HTMLTemplate(u"""
</div></body>
<div class="footer">
© Copyright 2012-{{current_year}}, MNE Developers.
Created on {{date}}.
Powered by <a href="http://martinos.org/mne">MNE.
</div>
</html>
""")
html_template = Template(u"""
<li class="{{div_klass}}" id="{{id}}">
<h4>{{caption}}</h4>
<div class="thumbnail">{{html}}</div>
</li>
""")
image_template = Template(u"""
{{default interactive = False}}
{{default width = 50}}
{{default id = False}}
{{default image_format = 'png'}}
{{default scale = None}}
{{default comment = None}}
<li class="{{div_klass}}" {{if id}}id="{{id}}"{{endif}}
{{if not show}}style="display: none"{{endif}}>
{{if caption}}
<h4>{{caption}}</h4>
{{endif}}
<div class="thumbnail">
{{if not interactive}}
{{if image_format == 'png'}}
{{if scale is not None}}
<img alt="" style="width:{{width}}%;"
src="data:image/png;base64,{{img}}">
{{else}}
<img alt=""
src="data:image/png;base64,{{img}}">
{{endif}}
{{elif image_format == 'svg'}}
<div style="text-align:center;">
{{img}}
</div>
{{endif}}
{{if comment is not None}}
<br><br>
<div style="text-align:center;">
<style>
p.test {word-wrap: break-word;}
</style>
<p class="test">
{{comment}}
</p>
</div>
{{endif}}
{{else}}
<center>{{interactive}}</center>
{{endif}}
</div>
</li>
""")
repr_template = Template(u"""
<li class="{{div_klass}}" id="{{id}}">
<h4>{{caption}}</h4><hr>
{{repr}}
<hr></li>
""")
raw_template = Template(u"""
<li class="{{div_klass}}" id="{{id}}">
<h4>{{caption}}</h4>
<table class="table table-hover">
<tr>
<th>Measurement date</th>
{{if meas_date is not None}}
<td>{{meas_date}}</td>
{{else}}<td>Unknown</td>{{endif}}
</tr>
<tr>
<th>Experimenter</th>
{{if info['experimenter'] is not None}}
<td>{{info['experimenter']}}</td>
{{else}}<td>Unknown</td>{{endif}}
</tr>
<tr>
<th>Digitized points</th>
{{if info['dig'] is not None}}
<td>{{len(info['dig'])}} points</td>
{{else}}
<td>Not available</td>
{{endif}}
</tr>
<tr>
<th>Good channels</th>
<td>{{n_mag}} magnetometer, {{n_grad}} gradiometer,
and {{n_eeg}} EEG channels</td>
</tr>
<tr>
<th>Bad channels</th>
{{if info['bads'] is not None}}
<td>{{', '.join(info['bads'])}}</td>
{{else}}<td>None</td>{{endif}}
</tr>
<tr>
<th>EOG channels</th>
<td>{{eog}}</td>
</tr>
<tr>
<th>ECG channels</th>
<td>{{ecg}}</td>
<tr>
<th>Measurement time range</th>
<td>{{u'%0.2f' % tmin}} to {{u'%0.2f' % tmax}} sec.</td>
</tr>
<tr>
<th>Sampling frequency</th>
<td>{{u'%0.2f' % info['sfreq']}} Hz</td>
</tr>
<tr>
<th>Highpass</th>
<td>{{u'%0.2f' % info['highpass']}} Hz</td>
</tr>
<tr>
<th>Lowpass</th>
<td>{{u'%0.2f' % info['lowpass']}} Hz</td>
</tr>
</table>
</li>
""")
toc_list = Template(u"""
<li class="{{div_klass}}">
{{if id}}
<a href="javascript:void(0)" onclick="window.location.hash={{id}};">
{{endif}}
<span title="{{tooltip}}" style="color:{{color}}"> {{text}}</span>
{{if id}}</a>{{endif}}
</li>
""")
def _check_scale(scale):
"""Helper to ensure valid scale value is passed"""
if np.isscalar(scale) and scale <= 0:
raise ValueError('scale must be positive, not %s' % scale)
class Report(object):
"""Object for rendering HTML
Parameters
----------
info_fname : str
Name of the file containing the info dictionary.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
subject : str | None
Subject name.
title : str
Title of the report.
cov_fname : str
Name of the file containing the noise covariance.
baseline : None or tuple of length 2 (default (None, 0))
The time interval to apply baseline correction for evokeds.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
The baseline (a, b) includes both endpoints, i.e. all
timepoints t such that a <= t <= b.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Notes
-----
To toggle the show/hide state of all sections in the html report, press 't'
.. versionadded:: 0.8.0
"""
def __init__(self, info_fname=None, subjects_dir=None,
subject=None, title=None, cov_fname=None, baseline=None,
verbose=None):
self.info_fname = info_fname
self.cov_fname = cov_fname
self.baseline = baseline
self.subjects_dir = get_subjects_dir(subjects_dir, raise_error=False)
self.subject = subject
self.title = title
self.verbose = verbose
self.initial_id = 0
self.html = []
self.fnames = [] # List of file names rendered
self.sections = [] # List of sections
self._sectionlabels = [] # Section labels
self._sectionvars = {} # Section variable names in js
# boolean to specify if sections should be ordered in natural
# order of processing (raw -> events ... -> inverse)
self._sort_sections = False
self._init_render() # Initialize the renderer
def _get_id(self):
"""Get id of plot.
"""
self.initial_id += 1
return self.initial_id
def _validate_input(self, items, captions, section, comments=None):
"""Validate input.
"""
if not isinstance(items, (list, tuple)):
items = [items]
if not isinstance(captions, (list, tuple)):
captions = [captions]
if not isinstance(comments, (list, tuple)):
if comments is None:
comments = [comments] * len(captions)
else:
comments = [comments]
if len(comments) != len(items):
raise ValueError('Comments and report items must have the same '
'length or comments should be None.')
elif len(captions) != len(items):
raise ValueError('Captions and report items must have the same '
'length.')
# Book-keeping of section names
if section not in self.sections:
self.sections.append(section)
self._sectionvars[section] = _clean_varnames(section)
return items, captions, comments
def _add_figs_to_section(self, figs, captions, section='custom',
image_format='png', scale=None, comments=None):
"""Auxiliary method for `add_section` and `add_figs_to_section`.
"""
from scipy.misc import imread
import matplotlib.pyplot as plt
mayavi = None
try:
# on some version mayavi.core won't be exposed unless ...
from mayavi import mlab # noqa, mlab imported
import mayavi
except: # on some systems importing Mayavi raises SystemExit (!)
warnings.warn('Could not import mayavi. Trying to render '
'`mayavi.core.scene.Scene` figure instances'
' will throw an error.')
figs, captions, comments = self._validate_input(figs, captions,
section, comments)
_check_scale(scale)
for fig, caption, comment in zip(figs, captions, comments):
caption = 'custom plot' if caption == '' else caption
sectionvar = self._sectionvars[section]
global_id = self._get_id()
div_klass = self._sectionvars[section]
img_klass = self._sectionvars[section]
if mayavi is not None and isinstance(fig, mayavi.core.scene.Scene):
tempdir = _TempDir()
temp_fname = op.join(tempdir, 'test')
if fig.scene is not None:
fig.scene.save_png(temp_fname)
img = imread(temp_fname)
else: # Testing mode
img = np.zeros((2, 2, 3))
mayavi.mlab.close(fig)
fig = plt.figure()
plt.imshow(img)
plt.axis('off')
img = _fig_to_img(fig=fig, scale=scale,
image_format=image_format)
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=True,
image_format=image_format,
comment=comment)
self.fnames.append('%s-#-%s-#-custom' % (caption, sectionvar))
self._sectionlabels.append(sectionvar)
self.html.append(html)
def add_figs_to_section(self, figs, captions, section='custom',
scale=None, image_format='png', comments=None):
"""Append custom user-defined figures.
Parameters
----------
figs : list of figures.
Each figure in the list can be an instance of
matplotlib.pyplot.Figure, mayavi.core.scene.Scene,
or np.ndarray (images read in using scipy.imread).
captions : list of str
A list of captions to the figures.
section : str
Name of the section. If section already exists, the figures
will be appended to the end of the section
scale : float | None | callable
Scale the images maintaining the aspect ratio.
If None, no scaling is applied. If float, scale will determine
the relative scaling (might not work for scale <= 1 depending on
font sizes). If function, should take a figure object as input
parameter. Defaults to None.
image_format : {'png', 'svg'}
The image format to be used for the report. Defaults to 'png'.
comments : None | str | list of str
A string of text or a list of strings of text to be appended after
the figure.
"""
return self._add_figs_to_section(figs=figs, captions=captions,
section=section, scale=scale,
image_format=image_format,
comments=comments)
def add_images_to_section(self, fnames, captions, scale=None,
section='custom', comments=None):
"""Append custom user-defined images.
Parameters
----------
fnames : str | list of str
A filename or a list of filenames from which images are read.
captions : str | list of str
A caption or a list of captions to the images.
scale : float | None
Scale the images maintaining the aspect ratio.
Defaults to None. If None, no scaling will be applied.
section : str
Name of the section. If section already exists, the images
will be appended to the end of the section.
comments : None | str | list of str
A string of text or a list of strings of text to be appended after
the image.
"""
# Note: using scipy.misc is equivalent because scipy internally
# imports PIL anyway. It's not possible to redirect image output
# to binary string using scipy.misc.
from PIL import Image
fnames, captions, comments = self._validate_input(fnames, captions,
section, comments)
_check_scale(scale)
for fname, caption, comment in zip(fnames, captions, comments):
caption = 'custom plot' if caption == '' else caption
sectionvar = self._sectionvars[section]
global_id = self._get_id()
div_klass = self._sectionvars[section]
img_klass = self._sectionvars[section]
# Convert image to binary string.
im = Image.open(fname)
output = BytesIO()
im.save(output, format='png')
img = base64.b64encode(output.getvalue()).decode('ascii')
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
width=scale,
comment=comment,
show=True)
self.fnames.append('%s-#-%s-#-custom' % (caption, sectionvar))
self._sectionlabels.append(sectionvar)
self.html.append(html)
def add_htmls_to_section(self, htmls, captions, section='custom'):
"""Append htmls to the report.
Parameters
----------
htmls : str | list of str
An html str or a list of html str.
captions : str | list of str
A caption or a list of captions to the htmls.
section : str
Name of the section. If section already exists, the images
will be appended to the end of the section.
Notes
-----
.. versionadded:: 0.9.0
"""
htmls, captions, _ = self._validate_input(htmls, captions, section)
for html, caption in zip(htmls, captions):
caption = 'custom plot' if caption == '' else caption
sectionvar = self._sectionvars[section]
global_id = self._get_id()
div_klass = self._sectionvars[section]
self.fnames.append('%s-#-%s-#-custom' % (caption, sectionvar))
self._sectionlabels.append(sectionvar)
self.html.append(
html_template.substitute(div_klass=div_klass, id=global_id,
caption=caption, html=html))
def add_bem_to_section(self, subject, caption='BEM', section='bem',
decim=2, n_jobs=1, subjects_dir=None):
"""Renders a bem slider html str.
Parameters
----------
subject : str
Subject name.
caption : str
A caption for the bem.
section : str
Name of the section. If section already exists, the bem
will be appended to the end of the section.
decim : int
Use this decimation factor for generating MRI/BEM images
(since it can be time consuming).
n_jobs : int
Number of jobs to run in parallel.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
Notes
-----
.. versionadded:: 0.9.0
"""
caption = 'custom plot' if caption == '' else caption
html = self._render_bem(subject=subject, subjects_dir=subjects_dir,
decim=decim, n_jobs=n_jobs, section=section,
caption=caption)
html, caption, _ = self._validate_input(html, caption, section)
sectionvar = self._sectionvars[section]
self.fnames.append('%s-#-%s-#-custom' % (caption[0], sectionvar))
self._sectionlabels.append(sectionvar)
self.html.extend(html)
###########################################################################
# HTML rendering
def _render_one_axis(self, slices_iter, name, global_id, cmap,
n_elements, n_jobs):
"""Render one axis of the array.
"""
global_id = global_id or name
html = []
slices, slices_range = [], []
html.append(u'<div class="col-xs-6 col-md-4">')
slides_klass = '%s-%s' % (name, global_id)
use_jobs = min(n_jobs, max(1, n_elements))
parallel, p_fun, _ = parallel_func(_iterate_mri_slices, use_jobs)
r = parallel(p_fun(name, ind, global_id, slides_klass, data, cmap)
for ind, data in slices_iter)
slices_range, slices = zip(*r)
# Render the slider
slider_id = 'select-%s-%s' % (name, global_id)
html.append(u'<div id="%s"></div>' % slider_id)
html.append(u'<ul class="thumbnails">')
# Render the slices
html.append(u'\n'.join(slices))
html.append(u'</ul>')
html.append(_build_html_slider(slices_range, slides_klass, slider_id))
html.append(u'</div>')
return '\n'.join(html)
###########################################################################
# global rendering functions
@verbose
def _init_render(self, verbose=None):
"""Initialize the renderer.
"""
inc_fnames = ['jquery-1.10.2.min.js', 'jquery-ui.min.js',
'bootstrap.min.js', 'jquery-ui.min.css',
'bootstrap.min.css']
include = list()
for inc_fname in inc_fnames:
logger.info('Embedding : %s' % inc_fname)
f = open(op.join(op.dirname(__file__), 'html', inc_fname),
'r')
if inc_fname.endswith('.js'):
include.append(u'<script type="text/javascript">' +
f.read() + u'</script>')
elif inc_fname.endswith('.css'):
include.append(u'<style type="text/css">' +
f.read() + u'</style>')
f.close()
self.include = ''.join(include)
@verbose
def parse_folder(self, data_path, pattern='*.fif', n_jobs=1, mri_decim=2,
sort_sections=True, on_error='warn', verbose=None):
"""Renders all the files in the folder.
Parameters
----------
data_path : str
Path to the folder containing data whose HTML report will be
created.
pattern : str | list of str
Filename pattern(s) to include in the report.
Example: [\*raw.fif, \*ave.fif] will include Raw as well as Evoked
files.
n_jobs : int
Number of jobs to run in parallel.
mri_decim : int
Use this decimation factor for generating MRI/BEM images
(since it can be time consuming).
sort_sections : bool
If True, sort sections in the order: raw -> events -> epochs
-> evoked -> covariance -> trans -> mri -> forward -> inverse.
on_error : str
What to do if a file cannot be rendered. Can be 'ignore',
'warn' (default), or 'raise'.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
valid_errors = ['ignore', 'warn', 'raise']
if on_error not in valid_errors:
raise ValueError('on_error must be one of %s, not %s'
% (valid_errors, on_error))
self._sort = sort_sections
n_jobs = check_n_jobs(n_jobs)
self.data_path = data_path
if self.title is None:
self.title = 'MNE Report for ...%s' % self.data_path[-20:]
if not isinstance(pattern, (list, tuple)):
pattern = [pattern]
# iterate through the possible patterns
fnames = list()
for p in pattern:
fnames.extend(_recursive_search(self.data_path, p))
if self.info_fname is not None:
info = read_info(self.info_fname)
sfreq = info['sfreq']
else:
warnings.warn('`info_fname` not provided. Cannot render'
'-cov.fif(.gz) and -trans.fif(.gz) files.')
info, sfreq = None, None
cov = None
if self.cov_fname is not None:
cov = read_cov(self.cov_fname)
baseline = self.baseline
# render plots in parallel; check that n_jobs <= # of files
logger.info('Iterating over %s potential files (this may take some '
'time)' % len(fnames))
use_jobs = min(n_jobs, max(1, len(fnames)))
parallel, p_fun, _ = parallel_func(_iterate_files, use_jobs)
r = parallel(p_fun(self, fname, info, cov, baseline, sfreq, on_error)
for fname in np.array_split(fnames, use_jobs))
htmls, report_fnames, report_sectionlabels = zip(*r)
# combine results from n_jobs discarding plots not rendered
self.html = [html for html in sum(htmls, []) if html is not None]
self.fnames = [fname for fname in sum(report_fnames, []) if
fname is not None]
self._sectionlabels = [slabel for slabel in
sum(report_sectionlabels, [])
if slabel is not None]
# find unique section labels
self.sections = sorted(set(self._sectionlabels))
self._sectionvars = dict(zip(self.sections, self.sections))
# render mri
if self.subjects_dir is not None and self.subject is not None:
logger.info('Rendering BEM')
self.html.append(self._render_bem(self.subject, self.subjects_dir,
mri_decim, n_jobs))
self.fnames.append('bem')
self._sectionlabels.append('mri')
else:
warnings.warn('`subjects_dir` and `subject` not provided.'
' Cannot render MRI and -trans.fif(.gz) files.')
def save(self, fname=None, open_browser=True, overwrite=False):
"""Save html report and open it in browser.
Parameters
----------
fname : str
File name of the report.
open_browser : bool
Open html browser after saving if True.
overwrite : bool
If True, overwrite report if it already exists.
"""
if fname is None:
if not hasattr(self, 'data_path'):
self.data_path = op.dirname(__file__)
warnings.warn('`data_path` not provided. Using %s instead'
% self.data_path)
fname = op.realpath(op.join(self.data_path, 'report.html'))
else:
fname = op.realpath(fname)
self._render_toc()
html = footer_template.substitute(date=time.strftime("%B %d, %Y"),
current_year=time.strftime("%Y"))
self.html.append(html)
if not overwrite and op.isfile(fname):
msg = ('Report already exists at location %s. '
'Overwrite it (y/[n])? '
% fname)
answer = moves.input(msg)
if answer.lower() == 'y':
overwrite = True
if overwrite or not op.isfile(fname):
logger.info('Saving report to location %s' % fname)
fobj = codecs.open(fname, 'w', 'utf-8')
fobj.write(_fix_global_ids(u''.join(self.html)))
fobj.close()
# remove header, TOC and footer to allow more saves
self.html.pop(0)
self.html.pop(0)
self.html.pop()
if open_browser:
import webbrowser
webbrowser.open_new_tab('file://' + fname)
return fname
@verbose
def _render_toc(self, verbose=None):
"""Render the Table of Contents.
"""
logger.info('Rendering : Table of Contents')
html_toc = u'<div id="container">'
html_toc += u'<div id="toc"><center><h4>CONTENTS</h4></center>'
global_id = 1
# Reorder self.sections to reflect natural ordering
if self._sort_sections:
sections = list(set(self.sections) & set(SECTION_ORDER))
custom = [section for section in self.sections if section
not in SECTION_ORDER]
order = [sections.index(section) for section in SECTION_ORDER if
section in sections]
self.sections = np.array(sections)[order].tolist() + custom
# Sort by section
html, fnames, sectionlabels = [], [], []
for section in self.sections:
logger.info('%s' % section)
for sectionlabel, this_html, fname in (zip(self._sectionlabels,
self.html, self.fnames)):
if self._sectionvars[section] == sectionlabel:
html.append(this_html)
fnames.append(fname)
sectionlabels.append(sectionlabel)
logger.info('\t... %s' % fname[-20:])
color = _is_bad_fname(fname)
div_klass, tooltip, text = _get_toc_property(fname)
# loop through conditions for evoked
if fname.endswith(('-ave.fif', '-ave.fif.gz',
'(whitened)')):
text = os.path.basename(fname)
if fname.endswith('(whitened)'):
fname = fname[:-11]
# XXX: remove redundant read_evokeds
evokeds = read_evokeds(fname, verbose=False)
html_toc += toc_list.substitute(
div_klass=div_klass, id=None, tooltip=fname,
color='#428bca', text=text)
html_toc += u'<li class="evoked"><ul>'
for ev in evokeds:
html_toc += toc_list.substitute(
div_klass=div_klass, id=global_id,
tooltip=fname, color=color, text=ev.comment)
global_id += 1
html_toc += u'</ul></li>'
elif fname.endswith(tuple(VALID_EXTENSIONS +
['bem', 'custom'])):
html_toc += toc_list.substitute(div_klass=div_klass,
id=global_id,
tooltip=tooltip,
color=color,
text=text)
global_id += 1
html_toc += u'\n</ul></div>'
html_toc += u'<div id="content">'
# The sorted html (according to section)
self.html = html
self.fnames = fnames
self._sectionlabels = sectionlabels
html_header = header_template.substitute(title=self.title,
include=self.include,
sections=self.sections,
sectionvars=self._sectionvars)
self.html.insert(0, html_header) # Insert header at position 0
self.html.insert(1, html_toc) # insert TOC
def _render_array(self, array, global_id=None, cmap='gray',
limits=None, n_jobs=1):
"""Render mri without bem contours.
"""
html = []
html.append(u'<div class="row">')
# Axial
limits = limits or {}
axial_limit = limits.get('axial')
axial_slices_gen = _iterate_axial_slices(array, axial_limit)
html.append(
self._render_one_axis(axial_slices_gen, 'axial',
global_id, cmap, array.shape[1], n_jobs))
# Sagittal
sagittal_limit = limits.get('sagittal')
sagittal_slices_gen = _iterate_sagittal_slices(array, sagittal_limit)
html.append(
self._render_one_axis(sagittal_slices_gen, 'sagittal',
global_id, cmap, array.shape[1], n_jobs))
html.append(u'</div>')
html.append(u'<div class="row">')
# Coronal
coronal_limit = limits.get('coronal')
coronal_slices_gen = _iterate_coronal_slices(array, coronal_limit)
html.append(
self._render_one_axis(coronal_slices_gen, 'coronal',
global_id, cmap, array.shape[1], n_jobs))
# Close section
html.append(u'</div>')
return '\n'.join(html)
def _render_one_bem_axis(self, mri_fname, surf_fnames, global_id,
shape, orientation='coronal', decim=2, n_jobs=1):
"""Render one axis of bem contours.
"""
orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
orientation_axis = orientation_name2axis[orientation]
n_slices = shape[orientation_axis]
orig_size = np.roll(shape, orientation_axis)[[1, 2]]
name = orientation
html = []
html.append(u'<div class="col-xs-6 col-md-4">')
slides_klass = '%s-%s' % (name, global_id)
sl = np.arange(0, n_slices, decim)
kwargs = dict(mri_fname=mri_fname, surf_fnames=surf_fnames, show=False,
orientation=orientation, img_output=orig_size)
imgs = _figs_to_mrislices(sl, n_jobs, **kwargs)
slices = []
img_klass = 'slideimg-%s' % name
div_klass = 'span12 %s' % slides_klass
for ii, img in enumerate(imgs):
slice_id = '%s-%s-%s' % (name, global_id, sl[ii])
caption = u'Slice %s %s' % (name, sl[ii])
first = True if ii == 0 else False
slices.append(_build_html_image(img, slice_id, div_klass,
img_klass, caption, first))
# Render the slider
slider_id = 'select-%s-%s' % (name, global_id)
html.append(u'<div id="%s"></div>' % slider_id)
html.append(u'<ul class="thumbnails">')
# Render the slices
html.append(u'\n'.join(slices))
html.append(u'</ul>')
html.append(_build_html_slider(sl, slides_klass, slider_id))
html.append(u'</div>')
return '\n'.join(html)
def _render_image(self, image, cmap='gray', n_jobs=1):
"""Render one slice of mri without bem.
"""
import nibabel as nib
global_id = self._get_id()
if 'mri' not in self.sections:
self.sections.append('mri')
self._sectionvars['mri'] = 'mri'
nim = nib.load(image)
data = nim.get_data()
shape = data.shape
limits = {'sagittal': range(0, shape[0], 2),
'axial': range(0, shape[1], 2),
'coronal': range(0, shape[2], 2)}
name = op.basename(image)
html = u'<li class="mri" id="%d">\n' % global_id
html += u'<h2>%s</h2>\n' % name
html += self._render_array(data, global_id=global_id,
cmap=cmap, limits=limits,
n_jobs=n_jobs)
html += u'</li>\n'
return html
def _render_raw(self, raw_fname):
"""Render raw.
"""
global_id = self._get_id()
div_klass = 'raw'
caption = u'Raw : %s' % raw_fname
raw = Raw(raw_fname)
n_eeg = len(pick_types(raw.info, meg=False, eeg=True))
n_grad = len(pick_types(raw.info, meg='grad'))
n_mag = len(pick_types(raw.info, meg='mag'))
pick_eog = pick_types(raw.info, meg=False, eog=True)
if len(pick_eog) > 0:
eog = ', '.join(np.array(raw.info['ch_names'])[pick_eog])
else:
eog = 'Not available'
pick_ecg = pick_types(raw.info, meg=False, ecg=True)
if len(pick_ecg) > 0:
ecg = ', '.join(np.array(raw.info['ch_names'])[pick_ecg])
else:
ecg = 'Not available'
meas_date = raw.info['meas_date']
if meas_date is not None:
meas_date = dt.fromtimestamp(meas_date[0]).strftime("%B %d, %Y")
tmin = raw.first_samp / raw.info['sfreq']
tmax = raw.last_samp / raw.info['sfreq']
html = raw_template.substitute(div_klass=div_klass,
id=global_id,
caption=caption,
info=raw.info,
meas_date=meas_date,
n_eeg=n_eeg, n_grad=n_grad,
n_mag=n_mag, eog=eog,
ecg=ecg, tmin=tmin, tmax=tmax)
return html
def _render_forward(self, fwd_fname):
"""Render forward.
"""
div_klass = 'forward'
caption = u'Forward: %s' % fwd_fname
fwd = read_forward_solution(fwd_fname)
repr_fwd = re.sub('>', '', re.sub('<', '', repr(fwd)))
global_id = self._get_id()
html = repr_template.substitute(div_klass=div_klass,
id=global_id,
caption=caption,
repr=repr_fwd)
return html
def _render_inverse(self, inv_fname):
"""Render inverse.
"""
div_klass = 'inverse'
caption = u'Inverse: %s' % inv_fname
inv = read_inverse_operator(inv_fname)
repr_inv = re.sub('>', '', re.sub('<', '', repr(inv)))
global_id = self._get_id()
html = repr_template.substitute(div_klass=div_klass,
id=global_id,
caption=caption,
repr=repr_inv)
return html
def _render_evoked(self, evoked_fname, baseline=None, figsize=None):
"""Render evoked.
"""
evokeds = read_evokeds(evoked_fname, baseline=baseline, verbose=False)
html = []
for ev in evokeds:
global_id = self._get_id()
kwargs = dict(show=False)
img = _fig_to_img(ev.plot, **kwargs)
caption = u'Evoked : %s (%s)' % (evoked_fname, ev.comment)
div_klass = 'evoked'
img_klass = 'evoked'
show = True
html.append(image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show))
has_types = []
if len(pick_types(ev.info, meg=False, eeg=True)) > 0:
has_types.append('eeg')
if len(pick_types(ev.info, meg='grad', eeg=False)) > 0:
has_types.append('grad')
if len(pick_types(ev.info, meg='mag', eeg=False)) > 0:
has_types.append('mag')
for ch_type in has_types:
kwargs.update(ch_type=ch_type)
img = _fig_to_img(ev.plot_topomap, **kwargs)
caption = u'Topomap (ch_type = %s)' % ch_type
html.append(image_template.substitute(img=img,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show))
return '\n'.join(html)
def _render_eve(self, eve_fname, sfreq=None):
"""Render events.
"""
global_id = self._get_id()
events = read_events(eve_fname)
kwargs = dict(events=events, sfreq=sfreq, show=False)
img = _fig_to_img(plot_events, **kwargs)
caption = 'Events : ' + eve_fname
div_klass = 'events'
img_klass = 'events'
show = True
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show)
return html
def _render_epochs(self, epo_fname):
"""Render epochs.
"""
global_id = self._get_id()
epochs = read_epochs(epo_fname)
kwargs = dict(subject=self.subject, show=False)
img = _fig_to_img(epochs.plot_drop_log, **kwargs)
caption = 'Epochs : ' + epo_fname
div_klass = 'epochs'
img_klass = 'epochs'
show = True
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show)
return html
def _render_cov(self, cov_fname, info_fname):
"""Render cov.
"""
global_id = self._get_id()
cov = read_cov(cov_fname)
fig, _ = plot_cov(cov, info_fname, show=False)
img = _fig_to_img(fig=fig)
caption = 'Covariance : %s (n_samples: %s)' % (cov_fname, cov.nfree)
div_klass = 'covariance'
img_klass = 'covariance'
show = True
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show)
return html
def _render_whitened_evoked(self, evoked_fname, noise_cov, baseline):
"""Show whitened evoked.
"""
global_id = self._get_id()
evokeds = read_evokeds(evoked_fname, verbose=False)
html = []
for ev in evokeds:
ev = read_evokeds(evoked_fname, ev.comment, baseline=baseline,
verbose=False)
global_id = self._get_id()
kwargs = dict(noise_cov=noise_cov, show=False)
img = _fig_to_img(ev.plot_white, **kwargs)
caption = u'Whitened evoked : %s (%s)' % (evoked_fname, ev.comment)
div_klass = 'evoked'
img_klass = 'evoked'
show = True
html.append(image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show))
return '\n'.join(html)
def _render_trans(self, trans, path, info, subject,
subjects_dir, image_format='png'):
"""Render trans.
"""
kwargs = dict(info=info, trans=trans, subject=subject,
subjects_dir=subjects_dir)
try:
img = _iterate_trans_views(function=plot_trans, **kwargs)
except IOError:
img = _iterate_trans_views(function=plot_trans, source='head',
**kwargs)
if img is not None:
global_id = self._get_id()
caption = 'Trans : ' + trans
div_klass = 'trans'
img_klass = 'trans'
show = True
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
width=75,
show=show)
return html
def _render_bem(self, subject, subjects_dir, decim, n_jobs,
section='mri', caption='BEM'):
"""Render mri+bem.
"""
import nibabel as nib
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
# Get the MRI filename
mri_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
if not op.isfile(mri_fname):
warnings.warn('MRI file "%s" does not exist' % mri_fname)
# Get the BEM surface filenames
bem_path = op.join(subjects_dir, subject, 'bem')
if not op.isdir(bem_path):
warnings.warn('Subject bem directory "%s" does not exist' %
bem_path)
return self._render_image(mri_fname, cmap='gray', n_jobs=n_jobs)
surf_fnames = []
for surf_name in ['*inner_skull', '*outer_skull', '*outer_skin']:
surf_fname = glob(op.join(bem_path, surf_name + '.surf'))
if len(surf_fname) > 0:
surf_fname = surf_fname[0]
else:
warnings.warn('No surface found for %s.' % surf_name)
return self._render_image(mri_fname, cmap='gray')
surf_fnames.append(surf_fname)
# XXX : find a better way to get max range of slices
nim = nib.load(mri_fname)
data = nim.get_data()
shape = data.shape
del data # free up memory
html = []
global_id = self._get_id()
if section == 'mri' and 'mri' not in self.sections:
self.sections.append('mri')
self._sectionvars['mri'] = 'mri'
name = caption
html += u'<li class="mri" id="%d">\n' % global_id
html += u'<h2>%s</h2>\n' % name
html += u'<div class="row">'
html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
shape, 'axial', decim, n_jobs)
html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
shape, 'sagittal', decim, n_jobs)
html += u'</div><div class="row">'
html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
shape, 'coronal', decim, n_jobs)
html += u'</div>'
html += u'</li>\n'
return ''.join(html)
def _clean_varnames(s):
# Remove invalid characters
s = re.sub('[^0-9a-zA-Z_]', '', s)
# add report_ at the beginning so that the javascript class names
# are valid ones
return 'report_' + s
def _recursive_search(path, pattern):
"""Auxiliary function for recursive_search of the directory.
"""
filtered_files = list()
for dirpath, dirnames, files in os.walk(path):
for f in fnmatch.filter(files, pattern):
# only the following file types are supported
# this ensures equitable distribution of jobs
if f.endswith(tuple(VALID_EXTENSIONS)):
filtered_files.append(op.realpath(op.join(dirpath, f)))
return filtered_files
def _fix_global_ids(html):
"""Auxiliary function for fixing the global_ids after reordering in
_render_toc().
"""
html = re.sub('id="\d+"', 'id="###"', html)
global_id = 1
while len(re.findall('id="###"', html)) > 0:
html = re.sub('id="###"', 'id="%s"' % global_id, html, count=1)
global_id += 1
return html
|
dimkal/mne-python
|
mne/report.py
|
Python
|
bsd-3-clause
| 61,251
|
[
"Mayavi"
] |
2e1473dbea8541e39f04dc549804568f0b7e16359a49b39be084e08e0e599f17
|
#
# Copyright (c) 2009, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Greg Landrum and Anna Vulpetti, March 2009
from rdkit import Chem
from rdkit import DataStructs
from CreateFps import GetMolFingerprint
from rdkit.RDLogger import logger
logger = logger()
import sys
# maxPathLength is the maximum path length in atoms
# maxPathLength=6 corresponds to F-FP-5
# maxPathLength=7 corresponds to F-FP-6
# maxPathLength=8 corresponds to F-FP-7
maxPathLength = 8
# nameField is the name of the property (from the SD file) that has molecule
# names... If the molecules have names in the first row of the file, use "_Name"
nameField = 'Compound_orig'
#nameField = '_Name'
# propField is the name of the property (from the SD file) you want to use
# as the "activity"
propField = 'chemical_shift_1'
# similarity threshold for a pair to be considered interesting.
# (i.e. pairs with a similiarity below this value will not be
# added to the output.
similarityThreshold = 0.5
if __name__ == '__main__':
suppl = Chem.SDMolSupplier(sys.argv[1])
outF = file(sys.argv[2], 'w+')
data = []
logger.info('reading molecules and generating fingeprints')
for i, mol in enumerate(suppl):
if not mol:
continue
smi = Chem.MolToSmiles(mol, True)
nm = mol.GetProp(nameField)
property = float(mol.GetProp(propField))
fp = GetMolFingerprint(mol, maxPathLength)
data.append((nm, smi, property, fp))
logger.info(' got %d molecules' % len(data))
logger.info('calculating pairs')
pairs = []
for i in range(len(data)):
for j in range(i + 1, len(data)):
if DataStructs.DiceSimilarity(data[i][-1], data[j][-1]) > similarityThreshold:
pairs.append((i, j))
if not (i + 1) % 100:
logger.info('Done %d molecules' % (i + 1))
logger.info(' got %d reasonable pairs' % len(pairs))
logger.info('creating output file')
print >> outF, 'nameA|nameB|nameAB|smilesA|smilesB|smilesAB|actA|actB|dAct|dist|disparity'
for i, j in pairs:
if data[i][2] < data[j][2]:
i, j = j, i
nmi, smii, propi, fpi = data[i]
nmj, smij, propj, fpj = data[j]
dAct = propi - propj
dist = 1. - DataStructs.DiceSimilarity(fpi, fpj)
if dist != 0:
disparity = dAct / dist
else:
disparity = 1000
print >> outF, '%s|%s|%s_%s|%s|%s|%s.%s|%f|%f|%f|%f|%f' % (
nmi, nmj, nmi, nmj, smii, smij, smii, smij, propi, propj, dAct, dist, disparity)
|
rvianello/rdkit
|
Contrib/LEF/DistancePlot.py
|
Python
|
bsd-3-clause
| 4,011
|
[
"RDKit"
] |
242f2d5ddbe5a3c56a898619ded762b5e8d3cc6b178b2d0d060767d5ac79cb84
|
# coding: utf-8
from __future__ import unicode_literals
import binascii
import collections
import email
import getpass
import io
import optparse
import os
import re
import shlex
import shutil
import socket
import struct
import subprocess
import sys
import itertools
import xml.etree.ElementTree
try:
import urllib.request as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
try:
import urllib.error as compat_urllib_error
except ImportError: # Python 2
import urllib2 as compat_urllib_error
try:
import urllib.parse as compat_urllib_parse
except ImportError: # Python 2
import urllib as compat_urllib_parse
try:
from urllib.parse import urlparse as compat_urllib_parse_urlparse
except ImportError: # Python 2
from urlparse import urlparse as compat_urllib_parse_urlparse
try:
import urllib.parse as compat_urlparse
except ImportError: # Python 2
import urlparse as compat_urlparse
try:
import urllib.response as compat_urllib_response
except ImportError: # Python 2
import urllib as compat_urllib_response
try:
import http.cookiejar as compat_cookiejar
except ImportError: # Python 2
import cookielib as compat_cookiejar
try:
import http.cookies as compat_cookies
except ImportError: # Python 2
import Cookie as compat_cookies
try:
import html.entities as compat_html_entities
except ImportError: # Python 2
import htmlentitydefs as compat_html_entities
try: # Python >= 3.3
compat_html_entities_html5 = compat_html_entities.html5
except AttributeError:
# Copied from CPython 3.5.1 html/entities.py
compat_html_entities_html5 = {
'Aacute': '\xc1',
'aacute': '\xe1',
'Aacute;': '\xc1',
'aacute;': '\xe1',
'Abreve;': '\u0102',
'abreve;': '\u0103',
'ac;': '\u223e',
'acd;': '\u223f',
'acE;': '\u223e\u0333',
'Acirc': '\xc2',
'acirc': '\xe2',
'Acirc;': '\xc2',
'acirc;': '\xe2',
'acute': '\xb4',
'acute;': '\xb4',
'Acy;': '\u0410',
'acy;': '\u0430',
'AElig': '\xc6',
'aelig': '\xe6',
'AElig;': '\xc6',
'aelig;': '\xe6',
'af;': '\u2061',
'Afr;': '\U0001d504',
'afr;': '\U0001d51e',
'Agrave': '\xc0',
'agrave': '\xe0',
'Agrave;': '\xc0',
'agrave;': '\xe0',
'alefsym;': '\u2135',
'aleph;': '\u2135',
'Alpha;': '\u0391',
'alpha;': '\u03b1',
'Amacr;': '\u0100',
'amacr;': '\u0101',
'amalg;': '\u2a3f',
'AMP': '&',
'amp': '&',
'AMP;': '&',
'amp;': '&',
'And;': '\u2a53',
'and;': '\u2227',
'andand;': '\u2a55',
'andd;': '\u2a5c',
'andslope;': '\u2a58',
'andv;': '\u2a5a',
'ang;': '\u2220',
'ange;': '\u29a4',
'angle;': '\u2220',
'angmsd;': '\u2221',
'angmsdaa;': '\u29a8',
'angmsdab;': '\u29a9',
'angmsdac;': '\u29aa',
'angmsdad;': '\u29ab',
'angmsdae;': '\u29ac',
'angmsdaf;': '\u29ad',
'angmsdag;': '\u29ae',
'angmsdah;': '\u29af',
'angrt;': '\u221f',
'angrtvb;': '\u22be',
'angrtvbd;': '\u299d',
'angsph;': '\u2222',
'angst;': '\xc5',
'angzarr;': '\u237c',
'Aogon;': '\u0104',
'aogon;': '\u0105',
'Aopf;': '\U0001d538',
'aopf;': '\U0001d552',
'ap;': '\u2248',
'apacir;': '\u2a6f',
'apE;': '\u2a70',
'ape;': '\u224a',
'apid;': '\u224b',
'apos;': "'",
'ApplyFunction;': '\u2061',
'approx;': '\u2248',
'approxeq;': '\u224a',
'Aring': '\xc5',
'aring': '\xe5',
'Aring;': '\xc5',
'aring;': '\xe5',
'Ascr;': '\U0001d49c',
'ascr;': '\U0001d4b6',
'Assign;': '\u2254',
'ast;': '*',
'asymp;': '\u2248',
'asympeq;': '\u224d',
'Atilde': '\xc3',
'atilde': '\xe3',
'Atilde;': '\xc3',
'atilde;': '\xe3',
'Auml': '\xc4',
'auml': '\xe4',
'Auml;': '\xc4',
'auml;': '\xe4',
'awconint;': '\u2233',
'awint;': '\u2a11',
'backcong;': '\u224c',
'backepsilon;': '\u03f6',
'backprime;': '\u2035',
'backsim;': '\u223d',
'backsimeq;': '\u22cd',
'Backslash;': '\u2216',
'Barv;': '\u2ae7',
'barvee;': '\u22bd',
'Barwed;': '\u2306',
'barwed;': '\u2305',
'barwedge;': '\u2305',
'bbrk;': '\u23b5',
'bbrktbrk;': '\u23b6',
'bcong;': '\u224c',
'Bcy;': '\u0411',
'bcy;': '\u0431',
'bdquo;': '\u201e',
'becaus;': '\u2235',
'Because;': '\u2235',
'because;': '\u2235',
'bemptyv;': '\u29b0',
'bepsi;': '\u03f6',
'bernou;': '\u212c',
'Bernoullis;': '\u212c',
'Beta;': '\u0392',
'beta;': '\u03b2',
'beth;': '\u2136',
'between;': '\u226c',
'Bfr;': '\U0001d505',
'bfr;': '\U0001d51f',
'bigcap;': '\u22c2',
'bigcirc;': '\u25ef',
'bigcup;': '\u22c3',
'bigodot;': '\u2a00',
'bigoplus;': '\u2a01',
'bigotimes;': '\u2a02',
'bigsqcup;': '\u2a06',
'bigstar;': '\u2605',
'bigtriangledown;': '\u25bd',
'bigtriangleup;': '\u25b3',
'biguplus;': '\u2a04',
'bigvee;': '\u22c1',
'bigwedge;': '\u22c0',
'bkarow;': '\u290d',
'blacklozenge;': '\u29eb',
'blacksquare;': '\u25aa',
'blacktriangle;': '\u25b4',
'blacktriangledown;': '\u25be',
'blacktriangleleft;': '\u25c2',
'blacktriangleright;': '\u25b8',
'blank;': '\u2423',
'blk12;': '\u2592',
'blk14;': '\u2591',
'blk34;': '\u2593',
'block;': '\u2588',
'bne;': '=\u20e5',
'bnequiv;': '\u2261\u20e5',
'bNot;': '\u2aed',
'bnot;': '\u2310',
'Bopf;': '\U0001d539',
'bopf;': '\U0001d553',
'bot;': '\u22a5',
'bottom;': '\u22a5',
'bowtie;': '\u22c8',
'boxbox;': '\u29c9',
'boxDL;': '\u2557',
'boxDl;': '\u2556',
'boxdL;': '\u2555',
'boxdl;': '\u2510',
'boxDR;': '\u2554',
'boxDr;': '\u2553',
'boxdR;': '\u2552',
'boxdr;': '\u250c',
'boxH;': '\u2550',
'boxh;': '\u2500',
'boxHD;': '\u2566',
'boxHd;': '\u2564',
'boxhD;': '\u2565',
'boxhd;': '\u252c',
'boxHU;': '\u2569',
'boxHu;': '\u2567',
'boxhU;': '\u2568',
'boxhu;': '\u2534',
'boxminus;': '\u229f',
'boxplus;': '\u229e',
'boxtimes;': '\u22a0',
'boxUL;': '\u255d',
'boxUl;': '\u255c',
'boxuL;': '\u255b',
'boxul;': '\u2518',
'boxUR;': '\u255a',
'boxUr;': '\u2559',
'boxuR;': '\u2558',
'boxur;': '\u2514',
'boxV;': '\u2551',
'boxv;': '\u2502',
'boxVH;': '\u256c',
'boxVh;': '\u256b',
'boxvH;': '\u256a',
'boxvh;': '\u253c',
'boxVL;': '\u2563',
'boxVl;': '\u2562',
'boxvL;': '\u2561',
'boxvl;': '\u2524',
'boxVR;': '\u2560',
'boxVr;': '\u255f',
'boxvR;': '\u255e',
'boxvr;': '\u251c',
'bprime;': '\u2035',
'Breve;': '\u02d8',
'breve;': '\u02d8',
'brvbar': '\xa6',
'brvbar;': '\xa6',
'Bscr;': '\u212c',
'bscr;': '\U0001d4b7',
'bsemi;': '\u204f',
'bsim;': '\u223d',
'bsime;': '\u22cd',
'bsol;': '\\',
'bsolb;': '\u29c5',
'bsolhsub;': '\u27c8',
'bull;': '\u2022',
'bullet;': '\u2022',
'bump;': '\u224e',
'bumpE;': '\u2aae',
'bumpe;': '\u224f',
'Bumpeq;': '\u224e',
'bumpeq;': '\u224f',
'Cacute;': '\u0106',
'cacute;': '\u0107',
'Cap;': '\u22d2',
'cap;': '\u2229',
'capand;': '\u2a44',
'capbrcup;': '\u2a49',
'capcap;': '\u2a4b',
'capcup;': '\u2a47',
'capdot;': '\u2a40',
'CapitalDifferentialD;': '\u2145',
'caps;': '\u2229\ufe00',
'caret;': '\u2041',
'caron;': '\u02c7',
'Cayleys;': '\u212d',
'ccaps;': '\u2a4d',
'Ccaron;': '\u010c',
'ccaron;': '\u010d',
'Ccedil': '\xc7',
'ccedil': '\xe7',
'Ccedil;': '\xc7',
'ccedil;': '\xe7',
'Ccirc;': '\u0108',
'ccirc;': '\u0109',
'Cconint;': '\u2230',
'ccups;': '\u2a4c',
'ccupssm;': '\u2a50',
'Cdot;': '\u010a',
'cdot;': '\u010b',
'cedil': '\xb8',
'cedil;': '\xb8',
'Cedilla;': '\xb8',
'cemptyv;': '\u29b2',
'cent': '\xa2',
'cent;': '\xa2',
'CenterDot;': '\xb7',
'centerdot;': '\xb7',
'Cfr;': '\u212d',
'cfr;': '\U0001d520',
'CHcy;': '\u0427',
'chcy;': '\u0447',
'check;': '\u2713',
'checkmark;': '\u2713',
'Chi;': '\u03a7',
'chi;': '\u03c7',
'cir;': '\u25cb',
'circ;': '\u02c6',
'circeq;': '\u2257',
'circlearrowleft;': '\u21ba',
'circlearrowright;': '\u21bb',
'circledast;': '\u229b',
'circledcirc;': '\u229a',
'circleddash;': '\u229d',
'CircleDot;': '\u2299',
'circledR;': '\xae',
'circledS;': '\u24c8',
'CircleMinus;': '\u2296',
'CirclePlus;': '\u2295',
'CircleTimes;': '\u2297',
'cirE;': '\u29c3',
'cire;': '\u2257',
'cirfnint;': '\u2a10',
'cirmid;': '\u2aef',
'cirscir;': '\u29c2',
'ClockwiseContourIntegral;': '\u2232',
'CloseCurlyDoubleQuote;': '\u201d',
'CloseCurlyQuote;': '\u2019',
'clubs;': '\u2663',
'clubsuit;': '\u2663',
'Colon;': '\u2237',
'colon;': ':',
'Colone;': '\u2a74',
'colone;': '\u2254',
'coloneq;': '\u2254',
'comma;': ',',
'commat;': '@',
'comp;': '\u2201',
'compfn;': '\u2218',
'complement;': '\u2201',
'complexes;': '\u2102',
'cong;': '\u2245',
'congdot;': '\u2a6d',
'Congruent;': '\u2261',
'Conint;': '\u222f',
'conint;': '\u222e',
'ContourIntegral;': '\u222e',
'Copf;': '\u2102',
'copf;': '\U0001d554',
'coprod;': '\u2210',
'Coproduct;': '\u2210',
'COPY': '\xa9',
'copy': '\xa9',
'COPY;': '\xa9',
'copy;': '\xa9',
'copysr;': '\u2117',
'CounterClockwiseContourIntegral;': '\u2233',
'crarr;': '\u21b5',
'Cross;': '\u2a2f',
'cross;': '\u2717',
'Cscr;': '\U0001d49e',
'cscr;': '\U0001d4b8',
'csub;': '\u2acf',
'csube;': '\u2ad1',
'csup;': '\u2ad0',
'csupe;': '\u2ad2',
'ctdot;': '\u22ef',
'cudarrl;': '\u2938',
'cudarrr;': '\u2935',
'cuepr;': '\u22de',
'cuesc;': '\u22df',
'cularr;': '\u21b6',
'cularrp;': '\u293d',
'Cup;': '\u22d3',
'cup;': '\u222a',
'cupbrcap;': '\u2a48',
'CupCap;': '\u224d',
'cupcap;': '\u2a46',
'cupcup;': '\u2a4a',
'cupdot;': '\u228d',
'cupor;': '\u2a45',
'cups;': '\u222a\ufe00',
'curarr;': '\u21b7',
'curarrm;': '\u293c',
'curlyeqprec;': '\u22de',
'curlyeqsucc;': '\u22df',
'curlyvee;': '\u22ce',
'curlywedge;': '\u22cf',
'curren': '\xa4',
'curren;': '\xa4',
'curvearrowleft;': '\u21b6',
'curvearrowright;': '\u21b7',
'cuvee;': '\u22ce',
'cuwed;': '\u22cf',
'cwconint;': '\u2232',
'cwint;': '\u2231',
'cylcty;': '\u232d',
'Dagger;': '\u2021',
'dagger;': '\u2020',
'daleth;': '\u2138',
'Darr;': '\u21a1',
'dArr;': '\u21d3',
'darr;': '\u2193',
'dash;': '\u2010',
'Dashv;': '\u2ae4',
'dashv;': '\u22a3',
'dbkarow;': '\u290f',
'dblac;': '\u02dd',
'Dcaron;': '\u010e',
'dcaron;': '\u010f',
'Dcy;': '\u0414',
'dcy;': '\u0434',
'DD;': '\u2145',
'dd;': '\u2146',
'ddagger;': '\u2021',
'ddarr;': '\u21ca',
'DDotrahd;': '\u2911',
'ddotseq;': '\u2a77',
'deg': '\xb0',
'deg;': '\xb0',
'Del;': '\u2207',
'Delta;': '\u0394',
'delta;': '\u03b4',
'demptyv;': '\u29b1',
'dfisht;': '\u297f',
'Dfr;': '\U0001d507',
'dfr;': '\U0001d521',
'dHar;': '\u2965',
'dharl;': '\u21c3',
'dharr;': '\u21c2',
'DiacriticalAcute;': '\xb4',
'DiacriticalDot;': '\u02d9',
'DiacriticalDoubleAcute;': '\u02dd',
'DiacriticalGrave;': '`',
'DiacriticalTilde;': '\u02dc',
'diam;': '\u22c4',
'Diamond;': '\u22c4',
'diamond;': '\u22c4',
'diamondsuit;': '\u2666',
'diams;': '\u2666',
'die;': '\xa8',
'DifferentialD;': '\u2146',
'digamma;': '\u03dd',
'disin;': '\u22f2',
'div;': '\xf7',
'divide': '\xf7',
'divide;': '\xf7',
'divideontimes;': '\u22c7',
'divonx;': '\u22c7',
'DJcy;': '\u0402',
'djcy;': '\u0452',
'dlcorn;': '\u231e',
'dlcrop;': '\u230d',
'dollar;': '$',
'Dopf;': '\U0001d53b',
'dopf;': '\U0001d555',
'Dot;': '\xa8',
'dot;': '\u02d9',
'DotDot;': '\u20dc',
'doteq;': '\u2250',
'doteqdot;': '\u2251',
'DotEqual;': '\u2250',
'dotminus;': '\u2238',
'dotplus;': '\u2214',
'dotsquare;': '\u22a1',
'doublebarwedge;': '\u2306',
'DoubleContourIntegral;': '\u222f',
'DoubleDot;': '\xa8',
'DoubleDownArrow;': '\u21d3',
'DoubleLeftArrow;': '\u21d0',
'DoubleLeftRightArrow;': '\u21d4',
'DoubleLeftTee;': '\u2ae4',
'DoubleLongLeftArrow;': '\u27f8',
'DoubleLongLeftRightArrow;': '\u27fa',
'DoubleLongRightArrow;': '\u27f9',
'DoubleRightArrow;': '\u21d2',
'DoubleRightTee;': '\u22a8',
'DoubleUpArrow;': '\u21d1',
'DoubleUpDownArrow;': '\u21d5',
'DoubleVerticalBar;': '\u2225',
'DownArrow;': '\u2193',
'Downarrow;': '\u21d3',
'downarrow;': '\u2193',
'DownArrowBar;': '\u2913',
'DownArrowUpArrow;': '\u21f5',
'DownBreve;': '\u0311',
'downdownarrows;': '\u21ca',
'downharpoonleft;': '\u21c3',
'downharpoonright;': '\u21c2',
'DownLeftRightVector;': '\u2950',
'DownLeftTeeVector;': '\u295e',
'DownLeftVector;': '\u21bd',
'DownLeftVectorBar;': '\u2956',
'DownRightTeeVector;': '\u295f',
'DownRightVector;': '\u21c1',
'DownRightVectorBar;': '\u2957',
'DownTee;': '\u22a4',
'DownTeeArrow;': '\u21a7',
'drbkarow;': '\u2910',
'drcorn;': '\u231f',
'drcrop;': '\u230c',
'Dscr;': '\U0001d49f',
'dscr;': '\U0001d4b9',
'DScy;': '\u0405',
'dscy;': '\u0455',
'dsol;': '\u29f6',
'Dstrok;': '\u0110',
'dstrok;': '\u0111',
'dtdot;': '\u22f1',
'dtri;': '\u25bf',
'dtrif;': '\u25be',
'duarr;': '\u21f5',
'duhar;': '\u296f',
'dwangle;': '\u29a6',
'DZcy;': '\u040f',
'dzcy;': '\u045f',
'dzigrarr;': '\u27ff',
'Eacute': '\xc9',
'eacute': '\xe9',
'Eacute;': '\xc9',
'eacute;': '\xe9',
'easter;': '\u2a6e',
'Ecaron;': '\u011a',
'ecaron;': '\u011b',
'ecir;': '\u2256',
'Ecirc': '\xca',
'ecirc': '\xea',
'Ecirc;': '\xca',
'ecirc;': '\xea',
'ecolon;': '\u2255',
'Ecy;': '\u042d',
'ecy;': '\u044d',
'eDDot;': '\u2a77',
'Edot;': '\u0116',
'eDot;': '\u2251',
'edot;': '\u0117',
'ee;': '\u2147',
'efDot;': '\u2252',
'Efr;': '\U0001d508',
'efr;': '\U0001d522',
'eg;': '\u2a9a',
'Egrave': '\xc8',
'egrave': '\xe8',
'Egrave;': '\xc8',
'egrave;': '\xe8',
'egs;': '\u2a96',
'egsdot;': '\u2a98',
'el;': '\u2a99',
'Element;': '\u2208',
'elinters;': '\u23e7',
'ell;': '\u2113',
'els;': '\u2a95',
'elsdot;': '\u2a97',
'Emacr;': '\u0112',
'emacr;': '\u0113',
'empty;': '\u2205',
'emptyset;': '\u2205',
'EmptySmallSquare;': '\u25fb',
'emptyv;': '\u2205',
'EmptyVerySmallSquare;': '\u25ab',
'emsp13;': '\u2004',
'emsp14;': '\u2005',
'emsp;': '\u2003',
'ENG;': '\u014a',
'eng;': '\u014b',
'ensp;': '\u2002',
'Eogon;': '\u0118',
'eogon;': '\u0119',
'Eopf;': '\U0001d53c',
'eopf;': '\U0001d556',
'epar;': '\u22d5',
'eparsl;': '\u29e3',
'eplus;': '\u2a71',
'epsi;': '\u03b5',
'Epsilon;': '\u0395',
'epsilon;': '\u03b5',
'epsiv;': '\u03f5',
'eqcirc;': '\u2256',
'eqcolon;': '\u2255',
'eqsim;': '\u2242',
'eqslantgtr;': '\u2a96',
'eqslantless;': '\u2a95',
'Equal;': '\u2a75',
'equals;': '=',
'EqualTilde;': '\u2242',
'equest;': '\u225f',
'Equilibrium;': '\u21cc',
'equiv;': '\u2261',
'equivDD;': '\u2a78',
'eqvparsl;': '\u29e5',
'erarr;': '\u2971',
'erDot;': '\u2253',
'Escr;': '\u2130',
'escr;': '\u212f',
'esdot;': '\u2250',
'Esim;': '\u2a73',
'esim;': '\u2242',
'Eta;': '\u0397',
'eta;': '\u03b7',
'ETH': '\xd0',
'eth': '\xf0',
'ETH;': '\xd0',
'eth;': '\xf0',
'Euml': '\xcb',
'euml': '\xeb',
'Euml;': '\xcb',
'euml;': '\xeb',
'euro;': '\u20ac',
'excl;': '!',
'exist;': '\u2203',
'Exists;': '\u2203',
'expectation;': '\u2130',
'ExponentialE;': '\u2147',
'exponentiale;': '\u2147',
'fallingdotseq;': '\u2252',
'Fcy;': '\u0424',
'fcy;': '\u0444',
'female;': '\u2640',
'ffilig;': '\ufb03',
'fflig;': '\ufb00',
'ffllig;': '\ufb04',
'Ffr;': '\U0001d509',
'ffr;': '\U0001d523',
'filig;': '\ufb01',
'FilledSmallSquare;': '\u25fc',
'FilledVerySmallSquare;': '\u25aa',
'fjlig;': 'fj',
'flat;': '\u266d',
'fllig;': '\ufb02',
'fltns;': '\u25b1',
'fnof;': '\u0192',
'Fopf;': '\U0001d53d',
'fopf;': '\U0001d557',
'ForAll;': '\u2200',
'forall;': '\u2200',
'fork;': '\u22d4',
'forkv;': '\u2ad9',
'Fouriertrf;': '\u2131',
'fpartint;': '\u2a0d',
'frac12': '\xbd',
'frac12;': '\xbd',
'frac13;': '\u2153',
'frac14': '\xbc',
'frac14;': '\xbc',
'frac15;': '\u2155',
'frac16;': '\u2159',
'frac18;': '\u215b',
'frac23;': '\u2154',
'frac25;': '\u2156',
'frac34': '\xbe',
'frac34;': '\xbe',
'frac35;': '\u2157',
'frac38;': '\u215c',
'frac45;': '\u2158',
'frac56;': '\u215a',
'frac58;': '\u215d',
'frac78;': '\u215e',
'frasl;': '\u2044',
'frown;': '\u2322',
'Fscr;': '\u2131',
'fscr;': '\U0001d4bb',
'gacute;': '\u01f5',
'Gamma;': '\u0393',
'gamma;': '\u03b3',
'Gammad;': '\u03dc',
'gammad;': '\u03dd',
'gap;': '\u2a86',
'Gbreve;': '\u011e',
'gbreve;': '\u011f',
'Gcedil;': '\u0122',
'Gcirc;': '\u011c',
'gcirc;': '\u011d',
'Gcy;': '\u0413',
'gcy;': '\u0433',
'Gdot;': '\u0120',
'gdot;': '\u0121',
'gE;': '\u2267',
'ge;': '\u2265',
'gEl;': '\u2a8c',
'gel;': '\u22db',
'geq;': '\u2265',
'geqq;': '\u2267',
'geqslant;': '\u2a7e',
'ges;': '\u2a7e',
'gescc;': '\u2aa9',
'gesdot;': '\u2a80',
'gesdoto;': '\u2a82',
'gesdotol;': '\u2a84',
'gesl;': '\u22db\ufe00',
'gesles;': '\u2a94',
'Gfr;': '\U0001d50a',
'gfr;': '\U0001d524',
'Gg;': '\u22d9',
'gg;': '\u226b',
'ggg;': '\u22d9',
'gimel;': '\u2137',
'GJcy;': '\u0403',
'gjcy;': '\u0453',
'gl;': '\u2277',
'gla;': '\u2aa5',
'glE;': '\u2a92',
'glj;': '\u2aa4',
'gnap;': '\u2a8a',
'gnapprox;': '\u2a8a',
'gnE;': '\u2269',
'gne;': '\u2a88',
'gneq;': '\u2a88',
'gneqq;': '\u2269',
'gnsim;': '\u22e7',
'Gopf;': '\U0001d53e',
'gopf;': '\U0001d558',
'grave;': '`',
'GreaterEqual;': '\u2265',
'GreaterEqualLess;': '\u22db',
'GreaterFullEqual;': '\u2267',
'GreaterGreater;': '\u2aa2',
'GreaterLess;': '\u2277',
'GreaterSlantEqual;': '\u2a7e',
'GreaterTilde;': '\u2273',
'Gscr;': '\U0001d4a2',
'gscr;': '\u210a',
'gsim;': '\u2273',
'gsime;': '\u2a8e',
'gsiml;': '\u2a90',
'GT': '>',
'gt': '>',
'GT;': '>',
'Gt;': '\u226b',
'gt;': '>',
'gtcc;': '\u2aa7',
'gtcir;': '\u2a7a',
'gtdot;': '\u22d7',
'gtlPar;': '\u2995',
'gtquest;': '\u2a7c',
'gtrapprox;': '\u2a86',
'gtrarr;': '\u2978',
'gtrdot;': '\u22d7',
'gtreqless;': '\u22db',
'gtreqqless;': '\u2a8c',
'gtrless;': '\u2277',
'gtrsim;': '\u2273',
'gvertneqq;': '\u2269\ufe00',
'gvnE;': '\u2269\ufe00',
'Hacek;': '\u02c7',
'hairsp;': '\u200a',
'half;': '\xbd',
'hamilt;': '\u210b',
'HARDcy;': '\u042a',
'hardcy;': '\u044a',
'hArr;': '\u21d4',
'harr;': '\u2194',
'harrcir;': '\u2948',
'harrw;': '\u21ad',
'Hat;': '^',
'hbar;': '\u210f',
'Hcirc;': '\u0124',
'hcirc;': '\u0125',
'hearts;': '\u2665',
'heartsuit;': '\u2665',
'hellip;': '\u2026',
'hercon;': '\u22b9',
'Hfr;': '\u210c',
'hfr;': '\U0001d525',
'HilbertSpace;': '\u210b',
'hksearow;': '\u2925',
'hkswarow;': '\u2926',
'hoarr;': '\u21ff',
'homtht;': '\u223b',
'hookleftarrow;': '\u21a9',
'hookrightarrow;': '\u21aa',
'Hopf;': '\u210d',
'hopf;': '\U0001d559',
'horbar;': '\u2015',
'HorizontalLine;': '\u2500',
'Hscr;': '\u210b',
'hscr;': '\U0001d4bd',
'hslash;': '\u210f',
'Hstrok;': '\u0126',
'hstrok;': '\u0127',
'HumpDownHump;': '\u224e',
'HumpEqual;': '\u224f',
'hybull;': '\u2043',
'hyphen;': '\u2010',
'Iacute': '\xcd',
'iacute': '\xed',
'Iacute;': '\xcd',
'iacute;': '\xed',
'ic;': '\u2063',
'Icirc': '\xce',
'icirc': '\xee',
'Icirc;': '\xce',
'icirc;': '\xee',
'Icy;': '\u0418',
'icy;': '\u0438',
'Idot;': '\u0130',
'IEcy;': '\u0415',
'iecy;': '\u0435',
'iexcl': '\xa1',
'iexcl;': '\xa1',
'iff;': '\u21d4',
'Ifr;': '\u2111',
'ifr;': '\U0001d526',
'Igrave': '\xcc',
'igrave': '\xec',
'Igrave;': '\xcc',
'igrave;': '\xec',
'ii;': '\u2148',
'iiiint;': '\u2a0c',
'iiint;': '\u222d',
'iinfin;': '\u29dc',
'iiota;': '\u2129',
'IJlig;': '\u0132',
'ijlig;': '\u0133',
'Im;': '\u2111',
'Imacr;': '\u012a',
'imacr;': '\u012b',
'image;': '\u2111',
'ImaginaryI;': '\u2148',
'imagline;': '\u2110',
'imagpart;': '\u2111',
'imath;': '\u0131',
'imof;': '\u22b7',
'imped;': '\u01b5',
'Implies;': '\u21d2',
'in;': '\u2208',
'incare;': '\u2105',
'infin;': '\u221e',
'infintie;': '\u29dd',
'inodot;': '\u0131',
'Int;': '\u222c',
'int;': '\u222b',
'intcal;': '\u22ba',
'integers;': '\u2124',
'Integral;': '\u222b',
'intercal;': '\u22ba',
'Intersection;': '\u22c2',
'intlarhk;': '\u2a17',
'intprod;': '\u2a3c',
'InvisibleComma;': '\u2063',
'InvisibleTimes;': '\u2062',
'IOcy;': '\u0401',
'iocy;': '\u0451',
'Iogon;': '\u012e',
'iogon;': '\u012f',
'Iopf;': '\U0001d540',
'iopf;': '\U0001d55a',
'Iota;': '\u0399',
'iota;': '\u03b9',
'iprod;': '\u2a3c',
'iquest': '\xbf',
'iquest;': '\xbf',
'Iscr;': '\u2110',
'iscr;': '\U0001d4be',
'isin;': '\u2208',
'isindot;': '\u22f5',
'isinE;': '\u22f9',
'isins;': '\u22f4',
'isinsv;': '\u22f3',
'isinv;': '\u2208',
'it;': '\u2062',
'Itilde;': '\u0128',
'itilde;': '\u0129',
'Iukcy;': '\u0406',
'iukcy;': '\u0456',
'Iuml': '\xcf',
'iuml': '\xef',
'Iuml;': '\xcf',
'iuml;': '\xef',
'Jcirc;': '\u0134',
'jcirc;': '\u0135',
'Jcy;': '\u0419',
'jcy;': '\u0439',
'Jfr;': '\U0001d50d',
'jfr;': '\U0001d527',
'jmath;': '\u0237',
'Jopf;': '\U0001d541',
'jopf;': '\U0001d55b',
'Jscr;': '\U0001d4a5',
'jscr;': '\U0001d4bf',
'Jsercy;': '\u0408',
'jsercy;': '\u0458',
'Jukcy;': '\u0404',
'jukcy;': '\u0454',
'Kappa;': '\u039a',
'kappa;': '\u03ba',
'kappav;': '\u03f0',
'Kcedil;': '\u0136',
'kcedil;': '\u0137',
'Kcy;': '\u041a',
'kcy;': '\u043a',
'Kfr;': '\U0001d50e',
'kfr;': '\U0001d528',
'kgreen;': '\u0138',
'KHcy;': '\u0425',
'khcy;': '\u0445',
'KJcy;': '\u040c',
'kjcy;': '\u045c',
'Kopf;': '\U0001d542',
'kopf;': '\U0001d55c',
'Kscr;': '\U0001d4a6',
'kscr;': '\U0001d4c0',
'lAarr;': '\u21da',
'Lacute;': '\u0139',
'lacute;': '\u013a',
'laemptyv;': '\u29b4',
'lagran;': '\u2112',
'Lambda;': '\u039b',
'lambda;': '\u03bb',
'Lang;': '\u27ea',
'lang;': '\u27e8',
'langd;': '\u2991',
'langle;': '\u27e8',
'lap;': '\u2a85',
'Laplacetrf;': '\u2112',
'laquo': '\xab',
'laquo;': '\xab',
'Larr;': '\u219e',
'lArr;': '\u21d0',
'larr;': '\u2190',
'larrb;': '\u21e4',
'larrbfs;': '\u291f',
'larrfs;': '\u291d',
'larrhk;': '\u21a9',
'larrlp;': '\u21ab',
'larrpl;': '\u2939',
'larrsim;': '\u2973',
'larrtl;': '\u21a2',
'lat;': '\u2aab',
'lAtail;': '\u291b',
'latail;': '\u2919',
'late;': '\u2aad',
'lates;': '\u2aad\ufe00',
'lBarr;': '\u290e',
'lbarr;': '\u290c',
'lbbrk;': '\u2772',
'lbrace;': '{',
'lbrack;': '[',
'lbrke;': '\u298b',
'lbrksld;': '\u298f',
'lbrkslu;': '\u298d',
'Lcaron;': '\u013d',
'lcaron;': '\u013e',
'Lcedil;': '\u013b',
'lcedil;': '\u013c',
'lceil;': '\u2308',
'lcub;': '{',
'Lcy;': '\u041b',
'lcy;': '\u043b',
'ldca;': '\u2936',
'ldquo;': '\u201c',
'ldquor;': '\u201e',
'ldrdhar;': '\u2967',
'ldrushar;': '\u294b',
'ldsh;': '\u21b2',
'lE;': '\u2266',
'le;': '\u2264',
'LeftAngleBracket;': '\u27e8',
'LeftArrow;': '\u2190',
'Leftarrow;': '\u21d0',
'leftarrow;': '\u2190',
'LeftArrowBar;': '\u21e4',
'LeftArrowRightArrow;': '\u21c6',
'leftarrowtail;': '\u21a2',
'LeftCeiling;': '\u2308',
'LeftDoubleBracket;': '\u27e6',
'LeftDownTeeVector;': '\u2961',
'LeftDownVector;': '\u21c3',
'LeftDownVectorBar;': '\u2959',
'LeftFloor;': '\u230a',
'leftharpoondown;': '\u21bd',
'leftharpoonup;': '\u21bc',
'leftleftarrows;': '\u21c7',
'LeftRightArrow;': '\u2194',
'Leftrightarrow;': '\u21d4',
'leftrightarrow;': '\u2194',
'leftrightarrows;': '\u21c6',
'leftrightharpoons;': '\u21cb',
'leftrightsquigarrow;': '\u21ad',
'LeftRightVector;': '\u294e',
'LeftTee;': '\u22a3',
'LeftTeeArrow;': '\u21a4',
'LeftTeeVector;': '\u295a',
'leftthreetimes;': '\u22cb',
'LeftTriangle;': '\u22b2',
'LeftTriangleBar;': '\u29cf',
'LeftTriangleEqual;': '\u22b4',
'LeftUpDownVector;': '\u2951',
'LeftUpTeeVector;': '\u2960',
'LeftUpVector;': '\u21bf',
'LeftUpVectorBar;': '\u2958',
'LeftVector;': '\u21bc',
'LeftVectorBar;': '\u2952',
'lEg;': '\u2a8b',
'leg;': '\u22da',
'leq;': '\u2264',
'leqq;': '\u2266',
'leqslant;': '\u2a7d',
'les;': '\u2a7d',
'lescc;': '\u2aa8',
'lesdot;': '\u2a7f',
'lesdoto;': '\u2a81',
'lesdotor;': '\u2a83',
'lesg;': '\u22da\ufe00',
'lesges;': '\u2a93',
'lessapprox;': '\u2a85',
'lessdot;': '\u22d6',
'lesseqgtr;': '\u22da',
'lesseqqgtr;': '\u2a8b',
'LessEqualGreater;': '\u22da',
'LessFullEqual;': '\u2266',
'LessGreater;': '\u2276',
'lessgtr;': '\u2276',
'LessLess;': '\u2aa1',
'lesssim;': '\u2272',
'LessSlantEqual;': '\u2a7d',
'LessTilde;': '\u2272',
'lfisht;': '\u297c',
'lfloor;': '\u230a',
'Lfr;': '\U0001d50f',
'lfr;': '\U0001d529',
'lg;': '\u2276',
'lgE;': '\u2a91',
'lHar;': '\u2962',
'lhard;': '\u21bd',
'lharu;': '\u21bc',
'lharul;': '\u296a',
'lhblk;': '\u2584',
'LJcy;': '\u0409',
'ljcy;': '\u0459',
'Ll;': '\u22d8',
'll;': '\u226a',
'llarr;': '\u21c7',
'llcorner;': '\u231e',
'Lleftarrow;': '\u21da',
'llhard;': '\u296b',
'lltri;': '\u25fa',
'Lmidot;': '\u013f',
'lmidot;': '\u0140',
'lmoust;': '\u23b0',
'lmoustache;': '\u23b0',
'lnap;': '\u2a89',
'lnapprox;': '\u2a89',
'lnE;': '\u2268',
'lne;': '\u2a87',
'lneq;': '\u2a87',
'lneqq;': '\u2268',
'lnsim;': '\u22e6',
'loang;': '\u27ec',
'loarr;': '\u21fd',
'lobrk;': '\u27e6',
'LongLeftArrow;': '\u27f5',
'Longleftarrow;': '\u27f8',
'longleftarrow;': '\u27f5',
'LongLeftRightArrow;': '\u27f7',
'Longleftrightarrow;': '\u27fa',
'longleftrightarrow;': '\u27f7',
'longmapsto;': '\u27fc',
'LongRightArrow;': '\u27f6',
'Longrightarrow;': '\u27f9',
'longrightarrow;': '\u27f6',
'looparrowleft;': '\u21ab',
'looparrowright;': '\u21ac',
'lopar;': '\u2985',
'Lopf;': '\U0001d543',
'lopf;': '\U0001d55d',
'loplus;': '\u2a2d',
'lotimes;': '\u2a34',
'lowast;': '\u2217',
'lowbar;': '_',
'LowerLeftArrow;': '\u2199',
'LowerRightArrow;': '\u2198',
'loz;': '\u25ca',
'lozenge;': '\u25ca',
'lozf;': '\u29eb',
'lpar;': '(',
'lparlt;': '\u2993',
'lrarr;': '\u21c6',
'lrcorner;': '\u231f',
'lrhar;': '\u21cb',
'lrhard;': '\u296d',
'lrm;': '\u200e',
'lrtri;': '\u22bf',
'lsaquo;': '\u2039',
'Lscr;': '\u2112',
'lscr;': '\U0001d4c1',
'Lsh;': '\u21b0',
'lsh;': '\u21b0',
'lsim;': '\u2272',
'lsime;': '\u2a8d',
'lsimg;': '\u2a8f',
'lsqb;': '[',
'lsquo;': '\u2018',
'lsquor;': '\u201a',
'Lstrok;': '\u0141',
'lstrok;': '\u0142',
'LT': '<',
'lt': '<',
'LT;': '<',
'Lt;': '\u226a',
'lt;': '<',
'ltcc;': '\u2aa6',
'ltcir;': '\u2a79',
'ltdot;': '\u22d6',
'lthree;': '\u22cb',
'ltimes;': '\u22c9',
'ltlarr;': '\u2976',
'ltquest;': '\u2a7b',
'ltri;': '\u25c3',
'ltrie;': '\u22b4',
'ltrif;': '\u25c2',
'ltrPar;': '\u2996',
'lurdshar;': '\u294a',
'luruhar;': '\u2966',
'lvertneqq;': '\u2268\ufe00',
'lvnE;': '\u2268\ufe00',
'macr': '\xaf',
'macr;': '\xaf',
'male;': '\u2642',
'malt;': '\u2720',
'maltese;': '\u2720',
'Map;': '\u2905',
'map;': '\u21a6',
'mapsto;': '\u21a6',
'mapstodown;': '\u21a7',
'mapstoleft;': '\u21a4',
'mapstoup;': '\u21a5',
'marker;': '\u25ae',
'mcomma;': '\u2a29',
'Mcy;': '\u041c',
'mcy;': '\u043c',
'mdash;': '\u2014',
'mDDot;': '\u223a',
'measuredangle;': '\u2221',
'MediumSpace;': '\u205f',
'Mellintrf;': '\u2133',
'Mfr;': '\U0001d510',
'mfr;': '\U0001d52a',
'mho;': '\u2127',
'micro': '\xb5',
'micro;': '\xb5',
'mid;': '\u2223',
'midast;': '*',
'midcir;': '\u2af0',
'middot': '\xb7',
'middot;': '\xb7',
'minus;': '\u2212',
'minusb;': '\u229f',
'minusd;': '\u2238',
'minusdu;': '\u2a2a',
'MinusPlus;': '\u2213',
'mlcp;': '\u2adb',
'mldr;': '\u2026',
'mnplus;': '\u2213',
'models;': '\u22a7',
'Mopf;': '\U0001d544',
'mopf;': '\U0001d55e',
'mp;': '\u2213',
'Mscr;': '\u2133',
'mscr;': '\U0001d4c2',
'mstpos;': '\u223e',
'Mu;': '\u039c',
'mu;': '\u03bc',
'multimap;': '\u22b8',
'mumap;': '\u22b8',
'nabla;': '\u2207',
'Nacute;': '\u0143',
'nacute;': '\u0144',
'nang;': '\u2220\u20d2',
'nap;': '\u2249',
'napE;': '\u2a70\u0338',
'napid;': '\u224b\u0338',
'napos;': '\u0149',
'napprox;': '\u2249',
'natur;': '\u266e',
'natural;': '\u266e',
'naturals;': '\u2115',
'nbsp': '\xa0',
'nbsp;': '\xa0',
'nbump;': '\u224e\u0338',
'nbumpe;': '\u224f\u0338',
'ncap;': '\u2a43',
'Ncaron;': '\u0147',
'ncaron;': '\u0148',
'Ncedil;': '\u0145',
'ncedil;': '\u0146',
'ncong;': '\u2247',
'ncongdot;': '\u2a6d\u0338',
'ncup;': '\u2a42',
'Ncy;': '\u041d',
'ncy;': '\u043d',
'ndash;': '\u2013',
'ne;': '\u2260',
'nearhk;': '\u2924',
'neArr;': '\u21d7',
'nearr;': '\u2197',
'nearrow;': '\u2197',
'nedot;': '\u2250\u0338',
'NegativeMediumSpace;': '\u200b',
'NegativeThickSpace;': '\u200b',
'NegativeThinSpace;': '\u200b',
'NegativeVeryThinSpace;': '\u200b',
'nequiv;': '\u2262',
'nesear;': '\u2928',
'nesim;': '\u2242\u0338',
'NestedGreaterGreater;': '\u226b',
'NestedLessLess;': '\u226a',
'NewLine;': '\n',
'nexist;': '\u2204',
'nexists;': '\u2204',
'Nfr;': '\U0001d511',
'nfr;': '\U0001d52b',
'ngE;': '\u2267\u0338',
'nge;': '\u2271',
'ngeq;': '\u2271',
'ngeqq;': '\u2267\u0338',
'ngeqslant;': '\u2a7e\u0338',
'nges;': '\u2a7e\u0338',
'nGg;': '\u22d9\u0338',
'ngsim;': '\u2275',
'nGt;': '\u226b\u20d2',
'ngt;': '\u226f',
'ngtr;': '\u226f',
'nGtv;': '\u226b\u0338',
'nhArr;': '\u21ce',
'nharr;': '\u21ae',
'nhpar;': '\u2af2',
'ni;': '\u220b',
'nis;': '\u22fc',
'nisd;': '\u22fa',
'niv;': '\u220b',
'NJcy;': '\u040a',
'njcy;': '\u045a',
'nlArr;': '\u21cd',
'nlarr;': '\u219a',
'nldr;': '\u2025',
'nlE;': '\u2266\u0338',
'nle;': '\u2270',
'nLeftarrow;': '\u21cd',
'nleftarrow;': '\u219a',
'nLeftrightarrow;': '\u21ce',
'nleftrightarrow;': '\u21ae',
'nleq;': '\u2270',
'nleqq;': '\u2266\u0338',
'nleqslant;': '\u2a7d\u0338',
'nles;': '\u2a7d\u0338',
'nless;': '\u226e',
'nLl;': '\u22d8\u0338',
'nlsim;': '\u2274',
'nLt;': '\u226a\u20d2',
'nlt;': '\u226e',
'nltri;': '\u22ea',
'nltrie;': '\u22ec',
'nLtv;': '\u226a\u0338',
'nmid;': '\u2224',
'NoBreak;': '\u2060',
'NonBreakingSpace;': '\xa0',
'Nopf;': '\u2115',
'nopf;': '\U0001d55f',
'not': '\xac',
'Not;': '\u2aec',
'not;': '\xac',
'NotCongruent;': '\u2262',
'NotCupCap;': '\u226d',
'NotDoubleVerticalBar;': '\u2226',
'NotElement;': '\u2209',
'NotEqual;': '\u2260',
'NotEqualTilde;': '\u2242\u0338',
'NotExists;': '\u2204',
'NotGreater;': '\u226f',
'NotGreaterEqual;': '\u2271',
'NotGreaterFullEqual;': '\u2267\u0338',
'NotGreaterGreater;': '\u226b\u0338',
'NotGreaterLess;': '\u2279',
'NotGreaterSlantEqual;': '\u2a7e\u0338',
'NotGreaterTilde;': '\u2275',
'NotHumpDownHump;': '\u224e\u0338',
'NotHumpEqual;': '\u224f\u0338',
'notin;': '\u2209',
'notindot;': '\u22f5\u0338',
'notinE;': '\u22f9\u0338',
'notinva;': '\u2209',
'notinvb;': '\u22f7',
'notinvc;': '\u22f6',
'NotLeftTriangle;': '\u22ea',
'NotLeftTriangleBar;': '\u29cf\u0338',
'NotLeftTriangleEqual;': '\u22ec',
'NotLess;': '\u226e',
'NotLessEqual;': '\u2270',
'NotLessGreater;': '\u2278',
'NotLessLess;': '\u226a\u0338',
'NotLessSlantEqual;': '\u2a7d\u0338',
'NotLessTilde;': '\u2274',
'NotNestedGreaterGreater;': '\u2aa2\u0338',
'NotNestedLessLess;': '\u2aa1\u0338',
'notni;': '\u220c',
'notniva;': '\u220c',
'notnivb;': '\u22fe',
'notnivc;': '\u22fd',
'NotPrecedes;': '\u2280',
'NotPrecedesEqual;': '\u2aaf\u0338',
'NotPrecedesSlantEqual;': '\u22e0',
'NotReverseElement;': '\u220c',
'NotRightTriangle;': '\u22eb',
'NotRightTriangleBar;': '\u29d0\u0338',
'NotRightTriangleEqual;': '\u22ed',
'NotSquareSubset;': '\u228f\u0338',
'NotSquareSubsetEqual;': '\u22e2',
'NotSquareSuperset;': '\u2290\u0338',
'NotSquareSupersetEqual;': '\u22e3',
'NotSubset;': '\u2282\u20d2',
'NotSubsetEqual;': '\u2288',
'NotSucceeds;': '\u2281',
'NotSucceedsEqual;': '\u2ab0\u0338',
'NotSucceedsSlantEqual;': '\u22e1',
'NotSucceedsTilde;': '\u227f\u0338',
'NotSuperset;': '\u2283\u20d2',
'NotSupersetEqual;': '\u2289',
'NotTilde;': '\u2241',
'NotTildeEqual;': '\u2244',
'NotTildeFullEqual;': '\u2247',
'NotTildeTilde;': '\u2249',
'NotVerticalBar;': '\u2224',
'npar;': '\u2226',
'nparallel;': '\u2226',
'nparsl;': '\u2afd\u20e5',
'npart;': '\u2202\u0338',
'npolint;': '\u2a14',
'npr;': '\u2280',
'nprcue;': '\u22e0',
'npre;': '\u2aaf\u0338',
'nprec;': '\u2280',
'npreceq;': '\u2aaf\u0338',
'nrArr;': '\u21cf',
'nrarr;': '\u219b',
'nrarrc;': '\u2933\u0338',
'nrarrw;': '\u219d\u0338',
'nRightarrow;': '\u21cf',
'nrightarrow;': '\u219b',
'nrtri;': '\u22eb',
'nrtrie;': '\u22ed',
'nsc;': '\u2281',
'nsccue;': '\u22e1',
'nsce;': '\u2ab0\u0338',
'Nscr;': '\U0001d4a9',
'nscr;': '\U0001d4c3',
'nshortmid;': '\u2224',
'nshortparallel;': '\u2226',
'nsim;': '\u2241',
'nsime;': '\u2244',
'nsimeq;': '\u2244',
'nsmid;': '\u2224',
'nspar;': '\u2226',
'nsqsube;': '\u22e2',
'nsqsupe;': '\u22e3',
'nsub;': '\u2284',
'nsubE;': '\u2ac5\u0338',
'nsube;': '\u2288',
'nsubset;': '\u2282\u20d2',
'nsubseteq;': '\u2288',
'nsubseteqq;': '\u2ac5\u0338',
'nsucc;': '\u2281',
'nsucceq;': '\u2ab0\u0338',
'nsup;': '\u2285',
'nsupE;': '\u2ac6\u0338',
'nsupe;': '\u2289',
'nsupset;': '\u2283\u20d2',
'nsupseteq;': '\u2289',
'nsupseteqq;': '\u2ac6\u0338',
'ntgl;': '\u2279',
'Ntilde': '\xd1',
'ntilde': '\xf1',
'Ntilde;': '\xd1',
'ntilde;': '\xf1',
'ntlg;': '\u2278',
'ntriangleleft;': '\u22ea',
'ntrianglelefteq;': '\u22ec',
'ntriangleright;': '\u22eb',
'ntrianglerighteq;': '\u22ed',
'Nu;': '\u039d',
'nu;': '\u03bd',
'num;': '#',
'numero;': '\u2116',
'numsp;': '\u2007',
'nvap;': '\u224d\u20d2',
'nVDash;': '\u22af',
'nVdash;': '\u22ae',
'nvDash;': '\u22ad',
'nvdash;': '\u22ac',
'nvge;': '\u2265\u20d2',
'nvgt;': '>\u20d2',
'nvHarr;': '\u2904',
'nvinfin;': '\u29de',
'nvlArr;': '\u2902',
'nvle;': '\u2264\u20d2',
'nvlt;': '<\u20d2',
'nvltrie;': '\u22b4\u20d2',
'nvrArr;': '\u2903',
'nvrtrie;': '\u22b5\u20d2',
'nvsim;': '\u223c\u20d2',
'nwarhk;': '\u2923',
'nwArr;': '\u21d6',
'nwarr;': '\u2196',
'nwarrow;': '\u2196',
'nwnear;': '\u2927',
'Oacute': '\xd3',
'oacute': '\xf3',
'Oacute;': '\xd3',
'oacute;': '\xf3',
'oast;': '\u229b',
'ocir;': '\u229a',
'Ocirc': '\xd4',
'ocirc': '\xf4',
'Ocirc;': '\xd4',
'ocirc;': '\xf4',
'Ocy;': '\u041e',
'ocy;': '\u043e',
'odash;': '\u229d',
'Odblac;': '\u0150',
'odblac;': '\u0151',
'odiv;': '\u2a38',
'odot;': '\u2299',
'odsold;': '\u29bc',
'OElig;': '\u0152',
'oelig;': '\u0153',
'ofcir;': '\u29bf',
'Ofr;': '\U0001d512',
'ofr;': '\U0001d52c',
'ogon;': '\u02db',
'Ograve': '\xd2',
'ograve': '\xf2',
'Ograve;': '\xd2',
'ograve;': '\xf2',
'ogt;': '\u29c1',
'ohbar;': '\u29b5',
'ohm;': '\u03a9',
'oint;': '\u222e',
'olarr;': '\u21ba',
'olcir;': '\u29be',
'olcross;': '\u29bb',
'oline;': '\u203e',
'olt;': '\u29c0',
'Omacr;': '\u014c',
'omacr;': '\u014d',
'Omega;': '\u03a9',
'omega;': '\u03c9',
'Omicron;': '\u039f',
'omicron;': '\u03bf',
'omid;': '\u29b6',
'ominus;': '\u2296',
'Oopf;': '\U0001d546',
'oopf;': '\U0001d560',
'opar;': '\u29b7',
'OpenCurlyDoubleQuote;': '\u201c',
'OpenCurlyQuote;': '\u2018',
'operp;': '\u29b9',
'oplus;': '\u2295',
'Or;': '\u2a54',
'or;': '\u2228',
'orarr;': '\u21bb',
'ord;': '\u2a5d',
'order;': '\u2134',
'orderof;': '\u2134',
'ordf': '\xaa',
'ordf;': '\xaa',
'ordm': '\xba',
'ordm;': '\xba',
'origof;': '\u22b6',
'oror;': '\u2a56',
'orslope;': '\u2a57',
'orv;': '\u2a5b',
'oS;': '\u24c8',
'Oscr;': '\U0001d4aa',
'oscr;': '\u2134',
'Oslash': '\xd8',
'oslash': '\xf8',
'Oslash;': '\xd8',
'oslash;': '\xf8',
'osol;': '\u2298',
'Otilde': '\xd5',
'otilde': '\xf5',
'Otilde;': '\xd5',
'otilde;': '\xf5',
'Otimes;': '\u2a37',
'otimes;': '\u2297',
'otimesas;': '\u2a36',
'Ouml': '\xd6',
'ouml': '\xf6',
'Ouml;': '\xd6',
'ouml;': '\xf6',
'ovbar;': '\u233d',
'OverBar;': '\u203e',
'OverBrace;': '\u23de',
'OverBracket;': '\u23b4',
'OverParenthesis;': '\u23dc',
'par;': '\u2225',
'para': '\xb6',
'para;': '\xb6',
'parallel;': '\u2225',
'parsim;': '\u2af3',
'parsl;': '\u2afd',
'part;': '\u2202',
'PartialD;': '\u2202',
'Pcy;': '\u041f',
'pcy;': '\u043f',
'percnt;': '%',
'period;': '.',
'permil;': '\u2030',
'perp;': '\u22a5',
'pertenk;': '\u2031',
'Pfr;': '\U0001d513',
'pfr;': '\U0001d52d',
'Phi;': '\u03a6',
'phi;': '\u03c6',
'phiv;': '\u03d5',
'phmmat;': '\u2133',
'phone;': '\u260e',
'Pi;': '\u03a0',
'pi;': '\u03c0',
'pitchfork;': '\u22d4',
'piv;': '\u03d6',
'planck;': '\u210f',
'planckh;': '\u210e',
'plankv;': '\u210f',
'plus;': '+',
'plusacir;': '\u2a23',
'plusb;': '\u229e',
'pluscir;': '\u2a22',
'plusdo;': '\u2214',
'plusdu;': '\u2a25',
'pluse;': '\u2a72',
'PlusMinus;': '\xb1',
'plusmn': '\xb1',
'plusmn;': '\xb1',
'plussim;': '\u2a26',
'plustwo;': '\u2a27',
'pm;': '\xb1',
'Poincareplane;': '\u210c',
'pointint;': '\u2a15',
'Popf;': '\u2119',
'popf;': '\U0001d561',
'pound': '\xa3',
'pound;': '\xa3',
'Pr;': '\u2abb',
'pr;': '\u227a',
'prap;': '\u2ab7',
'prcue;': '\u227c',
'prE;': '\u2ab3',
'pre;': '\u2aaf',
'prec;': '\u227a',
'precapprox;': '\u2ab7',
'preccurlyeq;': '\u227c',
'Precedes;': '\u227a',
'PrecedesEqual;': '\u2aaf',
'PrecedesSlantEqual;': '\u227c',
'PrecedesTilde;': '\u227e',
'preceq;': '\u2aaf',
'precnapprox;': '\u2ab9',
'precneqq;': '\u2ab5',
'precnsim;': '\u22e8',
'precsim;': '\u227e',
'Prime;': '\u2033',
'prime;': '\u2032',
'primes;': '\u2119',
'prnap;': '\u2ab9',
'prnE;': '\u2ab5',
'prnsim;': '\u22e8',
'prod;': '\u220f',
'Product;': '\u220f',
'profalar;': '\u232e',
'profline;': '\u2312',
'profsurf;': '\u2313',
'prop;': '\u221d',
'Proportion;': '\u2237',
'Proportional;': '\u221d',
'propto;': '\u221d',
'prsim;': '\u227e',
'prurel;': '\u22b0',
'Pscr;': '\U0001d4ab',
'pscr;': '\U0001d4c5',
'Psi;': '\u03a8',
'psi;': '\u03c8',
'puncsp;': '\u2008',
'Qfr;': '\U0001d514',
'qfr;': '\U0001d52e',
'qint;': '\u2a0c',
'Qopf;': '\u211a',
'qopf;': '\U0001d562',
'qprime;': '\u2057',
'Qscr;': '\U0001d4ac',
'qscr;': '\U0001d4c6',
'quaternions;': '\u210d',
'quatint;': '\u2a16',
'quest;': '?',
'questeq;': '\u225f',
'QUOT': '"',
'quot': '"',
'QUOT;': '"',
'quot;': '"',
'rAarr;': '\u21db',
'race;': '\u223d\u0331',
'Racute;': '\u0154',
'racute;': '\u0155',
'radic;': '\u221a',
'raemptyv;': '\u29b3',
'Rang;': '\u27eb',
'rang;': '\u27e9',
'rangd;': '\u2992',
'range;': '\u29a5',
'rangle;': '\u27e9',
'raquo': '\xbb',
'raquo;': '\xbb',
'Rarr;': '\u21a0',
'rArr;': '\u21d2',
'rarr;': '\u2192',
'rarrap;': '\u2975',
'rarrb;': '\u21e5',
'rarrbfs;': '\u2920',
'rarrc;': '\u2933',
'rarrfs;': '\u291e',
'rarrhk;': '\u21aa',
'rarrlp;': '\u21ac',
'rarrpl;': '\u2945',
'rarrsim;': '\u2974',
'Rarrtl;': '\u2916',
'rarrtl;': '\u21a3',
'rarrw;': '\u219d',
'rAtail;': '\u291c',
'ratail;': '\u291a',
'ratio;': '\u2236',
'rationals;': '\u211a',
'RBarr;': '\u2910',
'rBarr;': '\u290f',
'rbarr;': '\u290d',
'rbbrk;': '\u2773',
'rbrace;': '}',
'rbrack;': ']',
'rbrke;': '\u298c',
'rbrksld;': '\u298e',
'rbrkslu;': '\u2990',
'Rcaron;': '\u0158',
'rcaron;': '\u0159',
'Rcedil;': '\u0156',
'rcedil;': '\u0157',
'rceil;': '\u2309',
'rcub;': '}',
'Rcy;': '\u0420',
'rcy;': '\u0440',
'rdca;': '\u2937',
'rdldhar;': '\u2969',
'rdquo;': '\u201d',
'rdquor;': '\u201d',
'rdsh;': '\u21b3',
'Re;': '\u211c',
'real;': '\u211c',
'realine;': '\u211b',
'realpart;': '\u211c',
'reals;': '\u211d',
'rect;': '\u25ad',
'REG': '\xae',
'reg': '\xae',
'REG;': '\xae',
'reg;': '\xae',
'ReverseElement;': '\u220b',
'ReverseEquilibrium;': '\u21cb',
'ReverseUpEquilibrium;': '\u296f',
'rfisht;': '\u297d',
'rfloor;': '\u230b',
'Rfr;': '\u211c',
'rfr;': '\U0001d52f',
'rHar;': '\u2964',
'rhard;': '\u21c1',
'rharu;': '\u21c0',
'rharul;': '\u296c',
'Rho;': '\u03a1',
'rho;': '\u03c1',
'rhov;': '\u03f1',
'RightAngleBracket;': '\u27e9',
'RightArrow;': '\u2192',
'Rightarrow;': '\u21d2',
'rightarrow;': '\u2192',
'RightArrowBar;': '\u21e5',
'RightArrowLeftArrow;': '\u21c4',
'rightarrowtail;': '\u21a3',
'RightCeiling;': '\u2309',
'RightDoubleBracket;': '\u27e7',
'RightDownTeeVector;': '\u295d',
'RightDownVector;': '\u21c2',
'RightDownVectorBar;': '\u2955',
'RightFloor;': '\u230b',
'rightharpoondown;': '\u21c1',
'rightharpoonup;': '\u21c0',
'rightleftarrows;': '\u21c4',
'rightleftharpoons;': '\u21cc',
'rightrightarrows;': '\u21c9',
'rightsquigarrow;': '\u219d',
'RightTee;': '\u22a2',
'RightTeeArrow;': '\u21a6',
'RightTeeVector;': '\u295b',
'rightthreetimes;': '\u22cc',
'RightTriangle;': '\u22b3',
'RightTriangleBar;': '\u29d0',
'RightTriangleEqual;': '\u22b5',
'RightUpDownVector;': '\u294f',
'RightUpTeeVector;': '\u295c',
'RightUpVector;': '\u21be',
'RightUpVectorBar;': '\u2954',
'RightVector;': '\u21c0',
'RightVectorBar;': '\u2953',
'ring;': '\u02da',
'risingdotseq;': '\u2253',
'rlarr;': '\u21c4',
'rlhar;': '\u21cc',
'rlm;': '\u200f',
'rmoust;': '\u23b1',
'rmoustache;': '\u23b1',
'rnmid;': '\u2aee',
'roang;': '\u27ed',
'roarr;': '\u21fe',
'robrk;': '\u27e7',
'ropar;': '\u2986',
'Ropf;': '\u211d',
'ropf;': '\U0001d563',
'roplus;': '\u2a2e',
'rotimes;': '\u2a35',
'RoundImplies;': '\u2970',
'rpar;': ')',
'rpargt;': '\u2994',
'rppolint;': '\u2a12',
'rrarr;': '\u21c9',
'Rrightarrow;': '\u21db',
'rsaquo;': '\u203a',
'Rscr;': '\u211b',
'rscr;': '\U0001d4c7',
'Rsh;': '\u21b1',
'rsh;': '\u21b1',
'rsqb;': ']',
'rsquo;': '\u2019',
'rsquor;': '\u2019',
'rthree;': '\u22cc',
'rtimes;': '\u22ca',
'rtri;': '\u25b9',
'rtrie;': '\u22b5',
'rtrif;': '\u25b8',
'rtriltri;': '\u29ce',
'RuleDelayed;': '\u29f4',
'ruluhar;': '\u2968',
'rx;': '\u211e',
'Sacute;': '\u015a',
'sacute;': '\u015b',
'sbquo;': '\u201a',
'Sc;': '\u2abc',
'sc;': '\u227b',
'scap;': '\u2ab8',
'Scaron;': '\u0160',
'scaron;': '\u0161',
'sccue;': '\u227d',
'scE;': '\u2ab4',
'sce;': '\u2ab0',
'Scedil;': '\u015e',
'scedil;': '\u015f',
'Scirc;': '\u015c',
'scirc;': '\u015d',
'scnap;': '\u2aba',
'scnE;': '\u2ab6',
'scnsim;': '\u22e9',
'scpolint;': '\u2a13',
'scsim;': '\u227f',
'Scy;': '\u0421',
'scy;': '\u0441',
'sdot;': '\u22c5',
'sdotb;': '\u22a1',
'sdote;': '\u2a66',
'searhk;': '\u2925',
'seArr;': '\u21d8',
'searr;': '\u2198',
'searrow;': '\u2198',
'sect': '\xa7',
'sect;': '\xa7',
'semi;': ';',
'seswar;': '\u2929',
'setminus;': '\u2216',
'setmn;': '\u2216',
'sext;': '\u2736',
'Sfr;': '\U0001d516',
'sfr;': '\U0001d530',
'sfrown;': '\u2322',
'sharp;': '\u266f',
'SHCHcy;': '\u0429',
'shchcy;': '\u0449',
'SHcy;': '\u0428',
'shcy;': '\u0448',
'ShortDownArrow;': '\u2193',
'ShortLeftArrow;': '\u2190',
'shortmid;': '\u2223',
'shortparallel;': '\u2225',
'ShortRightArrow;': '\u2192',
'ShortUpArrow;': '\u2191',
'shy': '\xad',
'shy;': '\xad',
'Sigma;': '\u03a3',
'sigma;': '\u03c3',
'sigmaf;': '\u03c2',
'sigmav;': '\u03c2',
'sim;': '\u223c',
'simdot;': '\u2a6a',
'sime;': '\u2243',
'simeq;': '\u2243',
'simg;': '\u2a9e',
'simgE;': '\u2aa0',
'siml;': '\u2a9d',
'simlE;': '\u2a9f',
'simne;': '\u2246',
'simplus;': '\u2a24',
'simrarr;': '\u2972',
'slarr;': '\u2190',
'SmallCircle;': '\u2218',
'smallsetminus;': '\u2216',
'smashp;': '\u2a33',
'smeparsl;': '\u29e4',
'smid;': '\u2223',
'smile;': '\u2323',
'smt;': '\u2aaa',
'smte;': '\u2aac',
'smtes;': '\u2aac\ufe00',
'SOFTcy;': '\u042c',
'softcy;': '\u044c',
'sol;': '/',
'solb;': '\u29c4',
'solbar;': '\u233f',
'Sopf;': '\U0001d54a',
'sopf;': '\U0001d564',
'spades;': '\u2660',
'spadesuit;': '\u2660',
'spar;': '\u2225',
'sqcap;': '\u2293',
'sqcaps;': '\u2293\ufe00',
'sqcup;': '\u2294',
'sqcups;': '\u2294\ufe00',
'Sqrt;': '\u221a',
'sqsub;': '\u228f',
'sqsube;': '\u2291',
'sqsubset;': '\u228f',
'sqsubseteq;': '\u2291',
'sqsup;': '\u2290',
'sqsupe;': '\u2292',
'sqsupset;': '\u2290',
'sqsupseteq;': '\u2292',
'squ;': '\u25a1',
'Square;': '\u25a1',
'square;': '\u25a1',
'SquareIntersection;': '\u2293',
'SquareSubset;': '\u228f',
'SquareSubsetEqual;': '\u2291',
'SquareSuperset;': '\u2290',
'SquareSupersetEqual;': '\u2292',
'SquareUnion;': '\u2294',
'squarf;': '\u25aa',
'squf;': '\u25aa',
'srarr;': '\u2192',
'Sscr;': '\U0001d4ae',
'sscr;': '\U0001d4c8',
'ssetmn;': '\u2216',
'ssmile;': '\u2323',
'sstarf;': '\u22c6',
'Star;': '\u22c6',
'star;': '\u2606',
'starf;': '\u2605',
'straightepsilon;': '\u03f5',
'straightphi;': '\u03d5',
'strns;': '\xaf',
'Sub;': '\u22d0',
'sub;': '\u2282',
'subdot;': '\u2abd',
'subE;': '\u2ac5',
'sube;': '\u2286',
'subedot;': '\u2ac3',
'submult;': '\u2ac1',
'subnE;': '\u2acb',
'subne;': '\u228a',
'subplus;': '\u2abf',
'subrarr;': '\u2979',
'Subset;': '\u22d0',
'subset;': '\u2282',
'subseteq;': '\u2286',
'subseteqq;': '\u2ac5',
'SubsetEqual;': '\u2286',
'subsetneq;': '\u228a',
'subsetneqq;': '\u2acb',
'subsim;': '\u2ac7',
'subsub;': '\u2ad5',
'subsup;': '\u2ad3',
'succ;': '\u227b',
'succapprox;': '\u2ab8',
'succcurlyeq;': '\u227d',
'Succeeds;': '\u227b',
'SucceedsEqual;': '\u2ab0',
'SucceedsSlantEqual;': '\u227d',
'SucceedsTilde;': '\u227f',
'succeq;': '\u2ab0',
'succnapprox;': '\u2aba',
'succneqq;': '\u2ab6',
'succnsim;': '\u22e9',
'succsim;': '\u227f',
'SuchThat;': '\u220b',
'Sum;': '\u2211',
'sum;': '\u2211',
'sung;': '\u266a',
'sup1': '\xb9',
'sup1;': '\xb9',
'sup2': '\xb2',
'sup2;': '\xb2',
'sup3': '\xb3',
'sup3;': '\xb3',
'Sup;': '\u22d1',
'sup;': '\u2283',
'supdot;': '\u2abe',
'supdsub;': '\u2ad8',
'supE;': '\u2ac6',
'supe;': '\u2287',
'supedot;': '\u2ac4',
'Superset;': '\u2283',
'SupersetEqual;': '\u2287',
'suphsol;': '\u27c9',
'suphsub;': '\u2ad7',
'suplarr;': '\u297b',
'supmult;': '\u2ac2',
'supnE;': '\u2acc',
'supne;': '\u228b',
'supplus;': '\u2ac0',
'Supset;': '\u22d1',
'supset;': '\u2283',
'supseteq;': '\u2287',
'supseteqq;': '\u2ac6',
'supsetneq;': '\u228b',
'supsetneqq;': '\u2acc',
'supsim;': '\u2ac8',
'supsub;': '\u2ad4',
'supsup;': '\u2ad6',
'swarhk;': '\u2926',
'swArr;': '\u21d9',
'swarr;': '\u2199',
'swarrow;': '\u2199',
'swnwar;': '\u292a',
'szlig': '\xdf',
'szlig;': '\xdf',
'Tab;': '\t',
'target;': '\u2316',
'Tau;': '\u03a4',
'tau;': '\u03c4',
'tbrk;': '\u23b4',
'Tcaron;': '\u0164',
'tcaron;': '\u0165',
'Tcedil;': '\u0162',
'tcedil;': '\u0163',
'Tcy;': '\u0422',
'tcy;': '\u0442',
'tdot;': '\u20db',
'telrec;': '\u2315',
'Tfr;': '\U0001d517',
'tfr;': '\U0001d531',
'there4;': '\u2234',
'Therefore;': '\u2234',
'therefore;': '\u2234',
'Theta;': '\u0398',
'theta;': '\u03b8',
'thetasym;': '\u03d1',
'thetav;': '\u03d1',
'thickapprox;': '\u2248',
'thicksim;': '\u223c',
'ThickSpace;': '\u205f\u200a',
'thinsp;': '\u2009',
'ThinSpace;': '\u2009',
'thkap;': '\u2248',
'thksim;': '\u223c',
'THORN': '\xde',
'thorn': '\xfe',
'THORN;': '\xde',
'thorn;': '\xfe',
'Tilde;': '\u223c',
'tilde;': '\u02dc',
'TildeEqual;': '\u2243',
'TildeFullEqual;': '\u2245',
'TildeTilde;': '\u2248',
'times': '\xd7',
'times;': '\xd7',
'timesb;': '\u22a0',
'timesbar;': '\u2a31',
'timesd;': '\u2a30',
'tint;': '\u222d',
'toea;': '\u2928',
'top;': '\u22a4',
'topbot;': '\u2336',
'topcir;': '\u2af1',
'Topf;': '\U0001d54b',
'topf;': '\U0001d565',
'topfork;': '\u2ada',
'tosa;': '\u2929',
'tprime;': '\u2034',
'TRADE;': '\u2122',
'trade;': '\u2122',
'triangle;': '\u25b5',
'triangledown;': '\u25bf',
'triangleleft;': '\u25c3',
'trianglelefteq;': '\u22b4',
'triangleq;': '\u225c',
'triangleright;': '\u25b9',
'trianglerighteq;': '\u22b5',
'tridot;': '\u25ec',
'trie;': '\u225c',
'triminus;': '\u2a3a',
'TripleDot;': '\u20db',
'triplus;': '\u2a39',
'trisb;': '\u29cd',
'tritime;': '\u2a3b',
'trpezium;': '\u23e2',
'Tscr;': '\U0001d4af',
'tscr;': '\U0001d4c9',
'TScy;': '\u0426',
'tscy;': '\u0446',
'TSHcy;': '\u040b',
'tshcy;': '\u045b',
'Tstrok;': '\u0166',
'tstrok;': '\u0167',
'twixt;': '\u226c',
'twoheadleftarrow;': '\u219e',
'twoheadrightarrow;': '\u21a0',
'Uacute': '\xda',
'uacute': '\xfa',
'Uacute;': '\xda',
'uacute;': '\xfa',
'Uarr;': '\u219f',
'uArr;': '\u21d1',
'uarr;': '\u2191',
'Uarrocir;': '\u2949',
'Ubrcy;': '\u040e',
'ubrcy;': '\u045e',
'Ubreve;': '\u016c',
'ubreve;': '\u016d',
'Ucirc': '\xdb',
'ucirc': '\xfb',
'Ucirc;': '\xdb',
'ucirc;': '\xfb',
'Ucy;': '\u0423',
'ucy;': '\u0443',
'udarr;': '\u21c5',
'Udblac;': '\u0170',
'udblac;': '\u0171',
'udhar;': '\u296e',
'ufisht;': '\u297e',
'Ufr;': '\U0001d518',
'ufr;': '\U0001d532',
'Ugrave': '\xd9',
'ugrave': '\xf9',
'Ugrave;': '\xd9',
'ugrave;': '\xf9',
'uHar;': '\u2963',
'uharl;': '\u21bf',
'uharr;': '\u21be',
'uhblk;': '\u2580',
'ulcorn;': '\u231c',
'ulcorner;': '\u231c',
'ulcrop;': '\u230f',
'ultri;': '\u25f8',
'Umacr;': '\u016a',
'umacr;': '\u016b',
'uml': '\xa8',
'uml;': '\xa8',
'UnderBar;': '_',
'UnderBrace;': '\u23df',
'UnderBracket;': '\u23b5',
'UnderParenthesis;': '\u23dd',
'Union;': '\u22c3',
'UnionPlus;': '\u228e',
'Uogon;': '\u0172',
'uogon;': '\u0173',
'Uopf;': '\U0001d54c',
'uopf;': '\U0001d566',
'UpArrow;': '\u2191',
'Uparrow;': '\u21d1',
'uparrow;': '\u2191',
'UpArrowBar;': '\u2912',
'UpArrowDownArrow;': '\u21c5',
'UpDownArrow;': '\u2195',
'Updownarrow;': '\u21d5',
'updownarrow;': '\u2195',
'UpEquilibrium;': '\u296e',
'upharpoonleft;': '\u21bf',
'upharpoonright;': '\u21be',
'uplus;': '\u228e',
'UpperLeftArrow;': '\u2196',
'UpperRightArrow;': '\u2197',
'Upsi;': '\u03d2',
'upsi;': '\u03c5',
'upsih;': '\u03d2',
'Upsilon;': '\u03a5',
'upsilon;': '\u03c5',
'UpTee;': '\u22a5',
'UpTeeArrow;': '\u21a5',
'upuparrows;': '\u21c8',
'urcorn;': '\u231d',
'urcorner;': '\u231d',
'urcrop;': '\u230e',
'Uring;': '\u016e',
'uring;': '\u016f',
'urtri;': '\u25f9',
'Uscr;': '\U0001d4b0',
'uscr;': '\U0001d4ca',
'utdot;': '\u22f0',
'Utilde;': '\u0168',
'utilde;': '\u0169',
'utri;': '\u25b5',
'utrif;': '\u25b4',
'uuarr;': '\u21c8',
'Uuml': '\xdc',
'uuml': '\xfc',
'Uuml;': '\xdc',
'uuml;': '\xfc',
'uwangle;': '\u29a7',
'vangrt;': '\u299c',
'varepsilon;': '\u03f5',
'varkappa;': '\u03f0',
'varnothing;': '\u2205',
'varphi;': '\u03d5',
'varpi;': '\u03d6',
'varpropto;': '\u221d',
'vArr;': '\u21d5',
'varr;': '\u2195',
'varrho;': '\u03f1',
'varsigma;': '\u03c2',
'varsubsetneq;': '\u228a\ufe00',
'varsubsetneqq;': '\u2acb\ufe00',
'varsupsetneq;': '\u228b\ufe00',
'varsupsetneqq;': '\u2acc\ufe00',
'vartheta;': '\u03d1',
'vartriangleleft;': '\u22b2',
'vartriangleright;': '\u22b3',
'Vbar;': '\u2aeb',
'vBar;': '\u2ae8',
'vBarv;': '\u2ae9',
'Vcy;': '\u0412',
'vcy;': '\u0432',
'VDash;': '\u22ab',
'Vdash;': '\u22a9',
'vDash;': '\u22a8',
'vdash;': '\u22a2',
'Vdashl;': '\u2ae6',
'Vee;': '\u22c1',
'vee;': '\u2228',
'veebar;': '\u22bb',
'veeeq;': '\u225a',
'vellip;': '\u22ee',
'Verbar;': '\u2016',
'verbar;': '|',
'Vert;': '\u2016',
'vert;': '|',
'VerticalBar;': '\u2223',
'VerticalLine;': '|',
'VerticalSeparator;': '\u2758',
'VerticalTilde;': '\u2240',
'VeryThinSpace;': '\u200a',
'Vfr;': '\U0001d519',
'vfr;': '\U0001d533',
'vltri;': '\u22b2',
'vnsub;': '\u2282\u20d2',
'vnsup;': '\u2283\u20d2',
'Vopf;': '\U0001d54d',
'vopf;': '\U0001d567',
'vprop;': '\u221d',
'vrtri;': '\u22b3',
'Vscr;': '\U0001d4b1',
'vscr;': '\U0001d4cb',
'vsubnE;': '\u2acb\ufe00',
'vsubne;': '\u228a\ufe00',
'vsupnE;': '\u2acc\ufe00',
'vsupne;': '\u228b\ufe00',
'Vvdash;': '\u22aa',
'vzigzag;': '\u299a',
'Wcirc;': '\u0174',
'wcirc;': '\u0175',
'wedbar;': '\u2a5f',
'Wedge;': '\u22c0',
'wedge;': '\u2227',
'wedgeq;': '\u2259',
'weierp;': '\u2118',
'Wfr;': '\U0001d51a',
'wfr;': '\U0001d534',
'Wopf;': '\U0001d54e',
'wopf;': '\U0001d568',
'wp;': '\u2118',
'wr;': '\u2240',
'wreath;': '\u2240',
'Wscr;': '\U0001d4b2',
'wscr;': '\U0001d4cc',
'xcap;': '\u22c2',
'xcirc;': '\u25ef',
'xcup;': '\u22c3',
'xdtri;': '\u25bd',
'Xfr;': '\U0001d51b',
'xfr;': '\U0001d535',
'xhArr;': '\u27fa',
'xharr;': '\u27f7',
'Xi;': '\u039e',
'xi;': '\u03be',
'xlArr;': '\u27f8',
'xlarr;': '\u27f5',
'xmap;': '\u27fc',
'xnis;': '\u22fb',
'xodot;': '\u2a00',
'Xopf;': '\U0001d54f',
'xopf;': '\U0001d569',
'xoplus;': '\u2a01',
'xotime;': '\u2a02',
'xrArr;': '\u27f9',
'xrarr;': '\u27f6',
'Xscr;': '\U0001d4b3',
'xscr;': '\U0001d4cd',
'xsqcup;': '\u2a06',
'xuplus;': '\u2a04',
'xutri;': '\u25b3',
'xvee;': '\u22c1',
'xwedge;': '\u22c0',
'Yacute': '\xdd',
'yacute': '\xfd',
'Yacute;': '\xdd',
'yacute;': '\xfd',
'YAcy;': '\u042f',
'yacy;': '\u044f',
'Ycirc;': '\u0176',
'ycirc;': '\u0177',
'Ycy;': '\u042b',
'ycy;': '\u044b',
'yen': '\xa5',
'yen;': '\xa5',
'Yfr;': '\U0001d51c',
'yfr;': '\U0001d536',
'YIcy;': '\u0407',
'yicy;': '\u0457',
'Yopf;': '\U0001d550',
'yopf;': '\U0001d56a',
'Yscr;': '\U0001d4b4',
'yscr;': '\U0001d4ce',
'YUcy;': '\u042e',
'yucy;': '\u044e',
'yuml': '\xff',
'Yuml;': '\u0178',
'yuml;': '\xff',
'Zacute;': '\u0179',
'zacute;': '\u017a',
'Zcaron;': '\u017d',
'zcaron;': '\u017e',
'Zcy;': '\u0417',
'zcy;': '\u0437',
'Zdot;': '\u017b',
'zdot;': '\u017c',
'zeetrf;': '\u2128',
'ZeroWidthSpace;': '\u200b',
'Zeta;': '\u0396',
'zeta;': '\u03b6',
'Zfr;': '\u2128',
'zfr;': '\U0001d537',
'ZHcy;': '\u0416',
'zhcy;': '\u0436',
'zigrarr;': '\u21dd',
'Zopf;': '\u2124',
'zopf;': '\U0001d56b',
'Zscr;': '\U0001d4b5',
'zscr;': '\U0001d4cf',
'zwj;': '\u200d',
'zwnj;': '\u200c',
}
try:
import http.client as compat_http_client
except ImportError: # Python 2
import httplib as compat_http_client
try:
from urllib.error import HTTPError as compat_HTTPError
except ImportError: # Python 2
from urllib2 import HTTPError as compat_HTTPError
try:
from urllib.request import urlretrieve as compat_urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve as compat_urlretrieve
try:
from html.parser import HTMLParser as compat_HTMLParser
except ImportError: # Python 2
from HTMLParser import HTMLParser as compat_HTMLParser
try:
from subprocess import DEVNULL
compat_subprocess_get_DEVNULL = lambda: DEVNULL
except ImportError:
compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
try:
import http.server as compat_http_server
except ImportError:
import BaseHTTPServer as compat_http_server
try:
compat_str = unicode # Python 2
except NameError:
compat_str = str
try:
from urllib.parse import unquote_to_bytes as compat_urllib_parse_unquote_to_bytes
from urllib.parse import unquote as compat_urllib_parse_unquote
from urllib.parse import unquote_plus as compat_urllib_parse_unquote_plus
except ImportError: # Python 2
_asciire = (compat_urllib_parse._asciire if hasattr(compat_urllib_parse, '_asciire')
else re.compile(r'([\x00-\x7f]+)'))
# HACK: The following are the correct unquote_to_bytes, unquote and unquote_plus
# implementations from cpython 3.4.3's stdlib. Python 2's version
# is apparently broken (see https://github.com/rg3/youtube-dl/pull/6244)
def compat_urllib_parse_unquote_to_bytes(string):
"""unquote_to_bytes('abc%20def') -> b'abc def'."""
# Note: strings are encoded as UTF-8. This is only an issue if it contains
# unescaped non-ASCII characters, which URIs should not.
if not string:
# Is it a string-like object?
string.split
return b''
if isinstance(string, compat_str):
string = string.encode('utf-8')
bits = string.split(b'%')
if len(bits) == 1:
return string
res = [bits[0]]
append = res.append
for item in bits[1:]:
try:
append(compat_urllib_parse._hextochr[item[:2]])
append(item[2:])
except KeyError:
append(b'%')
append(item)
return b''.join(res)
def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
"""Replace %xx escapes by their single-character equivalent. The optional
encoding and errors parameters specify how to decode percent-encoded
sequences into Unicode characters, as accepted by the bytes.decode()
method.
By default, percent-encoded sequences are decoded with UTF-8, and invalid
sequences are replaced by a placeholder character.
unquote('abc%20def') -> 'abc def'.
"""
if '%' not in string:
string.split
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
bits = _asciire.split(string)
res = [bits[0]]
append = res.append
for i in range(1, len(bits), 2):
append(compat_urllib_parse_unquote_to_bytes(bits[i]).decode(encoding, errors))
append(bits[i + 1])
return ''.join(res)
def compat_urllib_parse_unquote_plus(string, encoding='utf-8', errors='replace'):
"""Like unquote(), but also replace plus signs by spaces, as required for
unquoting HTML form values.
unquote_plus('%7e/abc+def') -> '~/abc def'
"""
string = string.replace('+', ' ')
return compat_urllib_parse_unquote(string, encoding, errors)
try:
from urllib.parse import urlencode as compat_urllib_parse_urlencode
except ImportError: # Python 2
# Python 2 will choke in urlencode on mixture of byte and unicode strings.
# Possible solutions are to either port it from python 3 with all
# the friends or manually ensure input query contains only byte strings.
# We will stick with latter thus recursively encoding the whole query.
def compat_urllib_parse_urlencode(query, doseq=0, encoding='utf-8'):
def encode_elem(e):
if isinstance(e, dict):
e = encode_dict(e)
elif isinstance(e, (list, tuple,)):
list_e = encode_list(e)
e = tuple(list_e) if isinstance(e, tuple) else list_e
elif isinstance(e, compat_str):
e = e.encode(encoding)
return e
def encode_dict(d):
return dict((encode_elem(k), encode_elem(v)) for k, v in d.items())
def encode_list(l):
return [encode_elem(e) for e in l]
return compat_urllib_parse.urlencode(encode_elem(query), doseq=doseq)
try:
from urllib.request import DataHandler as compat_urllib_request_DataHandler
except ImportError: # Python < 3.4
# Ported from CPython 98774:1733b3bd46db, Lib/urllib/request.py
class compat_urllib_request_DataHandler(compat_urllib_request.BaseHandler):
def data_open(self, req):
# data URLs as specified in RFC 2397.
#
# ignores POSTed data
#
# syntax:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
url = req.get_full_url()
scheme, data = url.split(':', 1)
mediatype, data = data.split(',', 1)
# even base64 encoded data URLs might be quoted so unquote in any case:
data = compat_urllib_parse_unquote_to_bytes(data)
if mediatype.endswith(';base64'):
data = binascii.a2b_base64(data)
mediatype = mediatype[:-7]
if not mediatype:
mediatype = 'text/plain;charset=US-ASCII'
headers = email.message_from_string(
'Content-type: %s\nContent-length: %d\n' % (mediatype, len(data)))
return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
try:
compat_basestring = basestring # Python 2
except NameError:
compat_basestring = str
try:
compat_chr = unichr # Python 2
except NameError:
compat_chr = chr
try:
from xml.etree.ElementTree import ParseError as compat_xml_parse_error
except ImportError: # Python 2.6
from xml.parsers.expat import ExpatError as compat_xml_parse_error
etree = xml.etree.ElementTree
class _TreeBuilder(etree.TreeBuilder):
def doctype(self, name, pubid, system):
pass
if sys.version_info[0] >= 3:
def compat_etree_fromstring(text):
return etree.XML(text, parser=etree.XMLParser(target=_TreeBuilder()))
else:
# python 2.x tries to encode unicode strings with ascii (see the
# XMLParser._fixtext method)
try:
_etree_iter = etree.Element.iter
except AttributeError: # Python <=2.6
def _etree_iter(root):
for el in root.findall('*'):
yield el
for sub in _etree_iter(el):
yield sub
# on 2.6 XML doesn't have a parser argument, function copied from CPython
# 2.7 source
def _XML(text, parser=None):
if not parser:
parser = etree.XMLParser(target=_TreeBuilder())
parser.feed(text)
return parser.close()
def _element_factory(*args, **kwargs):
el = etree.Element(*args, **kwargs)
for k, v in el.items():
if isinstance(v, bytes):
el.set(k, v.decode('utf-8'))
return el
def compat_etree_fromstring(text):
doc = _XML(text, parser=etree.XMLParser(target=_TreeBuilder(element_factory=_element_factory)))
for el in _etree_iter(doc):
if el.text is not None and isinstance(el.text, bytes):
el.text = el.text.decode('utf-8')
return doc
if sys.version_info < (2, 7):
# Here comes the crazy part: In 2.6, if the xpath is a unicode,
# .//node does not match if a node is a direct child of . !
def compat_xpath(xpath):
if isinstance(xpath, compat_str):
xpath = xpath.encode('ascii')
return xpath
else:
compat_xpath = lambda xpath: xpath
try:
from urllib.parse import parse_qs as compat_parse_qs
except ImportError: # Python 2
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
# Python 2's version is apparently totally broken
def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
qs, _coerce_result = qs, compat_str
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError('bad query field: %r' % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = compat_urllib_parse_unquote(
name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = compat_urllib_parse_unquote(
value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
parsed_result = {}
pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
try:
from shlex import quote as compat_shlex_quote
except ImportError: # Python < 3.3
def compat_shlex_quote(s):
if re.match(r'^[-_\w./]+$', s):
return s
else:
return "'" + s.replace("'", "'\"'\"'") + "'"
try:
args = shlex.split('中文')
assert (isinstance(args, list) and
isinstance(args[0], compat_str) and
args[0] == '中文')
compat_shlex_split = shlex.split
except (AssertionError, UnicodeEncodeError):
# Working around shlex issue with unicode strings on some python 2
# versions (see http://bugs.python.org/issue1548891)
def compat_shlex_split(s, comments=False, posix=True):
if isinstance(s, compat_str):
s = s.encode('utf-8')
return list(map(lambda s: s.decode('utf-8'), shlex.split(s, comments, posix)))
def compat_ord(c):
if type(c) is int:
return c
else:
return ord(c)
compat_os_name = os._name if os.name == 'java' else os.name
if sys.version_info >= (3, 0):
compat_getenv = os.getenv
compat_expanduser = os.path.expanduser
def compat_setenv(key, value, env=os.environ):
env[key] = value
else:
# Environment variables should be decoded with filesystem encoding.
# Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
def compat_getenv(key, default=None):
from .utils import get_filesystem_encoding
env = os.getenv(key, default)
if env:
env = env.decode(get_filesystem_encoding())
return env
def compat_setenv(key, value, env=os.environ):
def encode(v):
from .utils import get_filesystem_encoding
return v.encode(get_filesystem_encoding()) if isinstance(v, compat_str) else v
env[encode(key)] = encode(value)
# HACK: The default implementations of os.path.expanduser from cpython do not decode
# environment variables with filesystem encoding. We will work around this by
# providing adjusted implementations.
# The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
# for different platforms with correct environment variables decoding.
if compat_os_name == 'posix':
def compat_expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if not path.startswith('~'):
return path
i = path.find('/', 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = compat_getenv('HOME')
else:
import pwd
try:
pwent = pwd.getpwnam(path[1:i])
except KeyError:
return path
userhome = pwent.pw_dir
userhome = userhome.rstrip('/')
return (userhome + path[i:]) or '/'
elif compat_os_name == 'nt' or compat_os_name == 'ce':
def compat_expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = compat_getenv('HOME')
elif 'USERPROFILE' in os.environ:
userhome = compat_getenv('USERPROFILE')
elif 'HOMEPATH' not in os.environ:
return path
else:
try:
drive = compat_getenv('HOMEDRIVE')
except KeyError:
drive = ''
userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
if i != 1: # ~user
userhome = os.path.join(os.path.dirname(userhome), path[1:i])
return userhome + path[i:]
else:
compat_expanduser = os.path.expanduser
if sys.version_info < (3, 0):
def compat_print(s):
from .utils import preferredencoding
print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
else:
def compat_print(s):
assert isinstance(s, compat_str)
print(s)
if sys.version_info < (3, 0) and sys.platform == 'win32':
def compat_getpass(prompt, *args, **kwargs):
if isinstance(prompt, compat_str):
from .utils import preferredencoding
prompt = prompt.encode(preferredencoding())
return getpass.getpass(prompt, *args, **kwargs)
else:
compat_getpass = getpass.getpass
try:
compat_input = raw_input
except NameError: # Python 3
compat_input = input
# Python < 2.6.5 require kwargs to be bytes
try:
def _testfunc(x):
pass
_testfunc(**{'x': 0})
except TypeError:
def compat_kwargs(kwargs):
return dict((bytes(k), v) for k, v in kwargs.items())
else:
compat_kwargs = lambda kwargs: kwargs
if sys.version_info < (2, 7):
def compat_socket_create_connection(address, timeout, source_address=None):
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error('getaddrinfo returns an empty list')
else:
compat_socket_create_connection = socket.create_connection
# Fix https://github.com/rg3/youtube-dl/issues/4223
# See http://bugs.python.org/issue9161 for what is broken
def workaround_optparse_bug9161():
op = optparse.OptionParser()
og = optparse.OptionGroup(op, 'foo')
try:
og.add_option('-t')
except TypeError:
real_add_option = optparse.OptionGroup.add_option
def _compat_add_option(self, *args, **kwargs):
enc = lambda v: (
v.encode('ascii', 'replace') if isinstance(v, compat_str)
else v)
bargs = [enc(a) for a in args]
bkwargs = dict(
(k, enc(v)) for k, v in kwargs.items())
return real_add_option(self, *bargs, **bkwargs)
optparse.OptionGroup.add_option = _compat_add_option
if hasattr(shutil, 'get_terminal_size'): # Python >= 3.3
compat_get_terminal_size = shutil.get_terminal_size
else:
_terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines'])
def compat_get_terminal_size(fallback=(80, 24)):
columns = compat_getenv('COLUMNS')
if columns:
columns = int(columns)
else:
columns = None
lines = compat_getenv('LINES')
if lines:
lines = int(lines)
else:
lines = None
if columns is None or lines is None or columns <= 0 or lines <= 0:
try:
sp = subprocess.Popen(
['stty', 'size'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = sp.communicate()
_lines, _columns = map(int, out.split())
except Exception:
_columns, _lines = _terminal_size(*fallback)
if columns is None or columns <= 0:
columns = _columns
if lines is None or lines <= 0:
lines = _lines
return _terminal_size(columns, lines)
try:
itertools.count(start=0, step=1)
compat_itertools_count = itertools.count
except TypeError: # Python 2.6
def compat_itertools_count(start=0, step=1):
n = start
while True:
yield n
n += step
if sys.version_info >= (3, 0):
from tokenize import tokenize as compat_tokenize_tokenize
else:
from tokenize import generate_tokens as compat_tokenize_tokenize
try:
struct.pack('!I', 0)
except TypeError:
# In Python 2.6 and 2.7.x < 2.7.7, struct requires a bytes argument
# See https://bugs.python.org/issue19099
def compat_struct_pack(spec, *args):
if isinstance(spec, compat_str):
spec = spec.encode('ascii')
return struct.pack(spec, *args)
def compat_struct_unpack(spec, *args):
if isinstance(spec, compat_str):
spec = spec.encode('ascii')
return struct.unpack(spec, *args)
else:
compat_struct_pack = struct.pack
compat_struct_unpack = struct.unpack
__all__ = [
'compat_HTMLParser',
'compat_HTTPError',
'compat_basestring',
'compat_chr',
'compat_cookiejar',
'compat_cookies',
'compat_etree_fromstring',
'compat_expanduser',
'compat_get_terminal_size',
'compat_getenv',
'compat_getpass',
'compat_html_entities',
'compat_html_entities_html5',
'compat_http_client',
'compat_http_server',
'compat_input',
'compat_itertools_count',
'compat_kwargs',
'compat_ord',
'compat_os_name',
'compat_parse_qs',
'compat_print',
'compat_setenv',
'compat_shlex_quote',
'compat_shlex_split',
'compat_socket_create_connection',
'compat_str',
'compat_struct_pack',
'compat_struct_unpack',
'compat_subprocess_get_DEVNULL',
'compat_tokenize_tokenize',
'compat_urllib_error',
'compat_urllib_parse',
'compat_urllib_parse_unquote',
'compat_urllib_parse_unquote_plus',
'compat_urllib_parse_unquote_to_bytes',
'compat_urllib_parse_urlencode',
'compat_urllib_parse_urlparse',
'compat_urllib_request',
'compat_urllib_request_DataHandler',
'compat_urllib_response',
'compat_urlparse',
'compat_urlretrieve',
'compat_xml_parse_error',
'compat_xpath',
'workaround_optparse_bug9161',
]
|
linglung/ytdl
|
youtube_dl/compat.py
|
Python
|
unlicense
| 88,573
|
[
"Bowtie"
] |
fefa7a78a27158b717a6e6d1f45e88df1c73c8c5596d3978f31344ab76b4228b
|
"""
GPoFM: Gaussian Process Training with
Optimized Feature Maps for Shift-Invariant Kernels
Github: https://github.com/MaxInGaussian/GPoFM
Author: Max W. Y. Lam [maxingaussian@gmail.com]
"""
import numpy as np
from scipy.stats import skew, norm, mstats
from scipy.optimize import minimize
__all__ = [
"Transformer",
]
class Transformer(object):
" Transformer (Data Preprocessing) "
algos = [
"min-max",
"normal",
"inv-normal",
"auto-normal",
"auto-uniform",
]
data = {}
def __init__(self, algo):
assert algo.lower() in self.algos, "Invalid Scaling Algorithm!"
self.algo = algo.lower()
if(self.algo == "min-max"):
self.data = {"cols": None, "min": 0, "max":0}
elif(self.algo == "normal"):
self.data = {"cols": None, "std": 0, "mu":0}
elif(self.algo == "inv-normal"):
self.data = {"cols": None, "std": 0, "mu":0}
elif(self.algo == "auto-normal"):
self.data = {"cols": None, "min": 0, "max":0, "std": 0, "mu":0, "boxcox":0}
elif(self.algo == "auto-uniform"):
self.data = {"cols": None, "min": 0, "max":0, "std": 0, "mu":0, "boxcox":0}
def fit_transform(self, X):
self.fit(X)
return self.transform(X)
def fit(self, X):
self.data["cols"] = list(set(range(X.shape[1])).difference(
np.where(np.all(X == X[0,:], axis = 0))[0]))
tX = X[:, self.data["cols"]]
if(self.algo == "min-max"):
self.data['min'] = np.min(tX, axis=0)
self.data['max'] = np.max(tX, axis=0)
elif(self.algo == "normal"):
self.data['mu'] = np.mean(tX, axis=0)
self.data['std'] = np.std(tX, axis=0)
elif(self.algo == "inv-normal"):
self.data['mu'] = np.mean(tX, axis=0)
self.data['std'] = np.std(tX, axis=0)
elif(self.algo == "auto-normal"):
self.data['min'] = np.min(tX, axis=0)
self.data['max'] = np.max(tX, axis=0)
tX = (tX-self.data["min"])/(self.data["max"]-self.data["min"])
boxcox = lambda x, lm: (np.sign(x)*np.abs(x)**lm-1)/lm
self.data['boxcox'] = np.zeros(tX.shape[1])
for d in range(tX.shape[1]):
Xd = tX[:, d]
if(np.unique(tX[:, d]).shape[0] < 10):
self.data['boxcox'][d] = 1
continue
skewness = lambda x: skew(x, bias=False)**2
t_lm = lambda lm: np.log(np.exp(lm[0])+1)
boxcox_Xd = lambda lm: boxcox(Xd, t_lm(lm))
obj = lambda lm: skewness(boxcox_Xd(lm))
bounds = [(-5, 5)]
lm = minimize(obj, [0.], method='SLSQP', bounds=bounds,
options={'ftol': 1e-8, 'maxiter':100, 'disp':False})['x']
self.data['boxcox'][d] = t_lm(lm)
lm = self.data['boxcox'][None, :]
tX = boxcox(tX, lm)
self.data['mu'] = np.mean(tX, axis=0)
self.data['std'] = np.std(tX, axis=0)
elif(self.algo == "auto-uniform"):
self.data['min'] = np.min(tX, axis=0)
self.data['max'] = np.max(tX, axis=0)
tX = (tX-self.data["min"])/(self.data["max"]-self.data["min"])
boxcox = lambda x, lm: (np.sign(x)*np.abs(x)**lm-1)/lm
self.data['boxcox'] = np.zeros(tX.shape[1])
for d in range(tX.shape[1]):
Xd = tX[:, d]
if(np.unique(tX[:, d]).shape[0] < 10):
self.data['boxcox'][d] = 1
continue
skewness = lambda x: skew(x, bias=False)**2
t_lm = lambda lm: np.log(np.exp(lm[0])+1)
boxcox_Xd = lambda lm: boxcox(Xd, t_lm(lm))
obj = lambda lm: skewness(boxcox_Xd(lm))
bounds = [(-5, 5)]
lm = minimize(obj, [0.], method='SLSQP', bounds=bounds,
options={'ftol': 1e-8, 'maxiter':100, 'disp':False})['x']
self.data['boxcox'][d] = t_lm(lm)
lm = self.data['boxcox'][None, :]
tX = boxcox(tX, lm)
self.data['mu'] = np.mean(tX, axis=0)
self.data['std'] = np.std(tX, axis=0)
def transform(self, X):
tX = X[:, self.data["cols"]]
if(self.algo == "min-max"):
return (tX-self.data["min"])/(self.data["max"]-self.data["min"])
elif(self.algo == "normal"):
return (tX-self.data["mu"])/self.data["std"]
elif(self.algo == "inv-normal"):
return norm.cdf((tX-self.data["mu"])/self.data["std"])
elif(self.algo == "auto-normal"):
tX = (tX-self.data["min"])/(self.data["max"]-self.data["min"])
lm = self.data['boxcox'][None, :]
boxcox = lambda x: (np.sign(x)*np.abs(x)**lm-1)/lm
return (boxcox(tX)-self.data["mu"])/self.data["std"]
elif(self.algo == "auto-uniform"):
tX = (tX-self.data["min"])/(self.data["max"]-self.data["min"])
lm = self.data['boxcox'][None, :]
boxcox = lambda x: (np.sign(x)*np.abs(x)**lm-1)/lm
return norm.cdf(boxcox(tX), self.data["mu"], self.data["std"])
def recover(self, X):
assert len(self.data["cols"]) == X.shape[1], "Backward Transform Error"
if(self.algo == "min-max"):
return X*(self.data["max"]-self.data["min"])+self.data["min"]
elif(self.algo == "normal"):
return X*self.data["std"]+self.data["mu"]
elif(self.algo == "inv-normal"):
return (norm.ppf(X)-self.data["mu"])/self.data["std"]
elif(self.algo == "auto-normal"):
lm = self.data['boxcox'][None, :]
inv_boxcox = lambda x: np.sign(x*lm+1)*np.abs(x*lm+1)**(1./lm)
tX = X*self.data["std"]+self.data["mu"]
return inv_boxcox(tX)*(self.data["max"]-self.data["min"])+self.data["min"]
elif(self.algo == "auto-uniform"):
lm = self.data['boxcox'][None, :]
inv_boxcox = lambda x: np.sign(x*lm+1)*np.abs(x*lm+1)**(1./lm)
tX = norm.ppf(X, self.data["mu"], self.data["std"])
return inv_boxcox(tX)*(self.data["max"]-self.data["min"])+self.data["min"]
|
MaxInGaussian/GPoFM
|
GPoFM/utils/Transformer.py
|
Python
|
bsd-3-clause
| 6,332
|
[
"Gaussian"
] |
4118c31370357d6e7f7d1ace624e88349f5507baf99afa528272910f87b64f69
|
from vtk import vtkGenericDataObjectReader,
from vtk.util.numpy_support import vtk_to_numpy
import sys
def load_unstructured_vtu_data(input_file)
"""
Given path to unstructured grid file (vtu file format)
returns tuple of numpy arrays (coordinate_points, scalar_data)
"""
reader = vtk.vtkGenericDataObjectReader()
reader.SetFileName(input_file)
reader.Update()
op = reader.GetOutput()
point_data = vtk_to_numpy(op.GetPoints().GetData())
scalar_data = vtk_to_numpy(op.GetPointData().GetScalars)
print(point_data)
print(scalar_data)
return (point_data, scalar_data)
if __name__ == '__main__':
args = sys.argv[1:]
file_path = args[0]
load_unstructured_vtu_data(file_path)
|
sohrabtowfighi/py_tri_calc
|
vtk_local_min_max/load_vtk_unstructured_mesh.py
|
Python
|
gpl-3.0
| 755
|
[
"VTK"
] |
6f652b730c1beb4cc20ab046661dd0d11f9d1b28569f2b62a7d62b5b607a87ae
|
import json
import os
import time
from SWParser import *
from SWPlugin import SWPlugin
import threading
sources = {
1: 'Unknown Scroll',
2: 'Mystical Scroll',
3: 'Light & Dark Scroll',
4: 'Water Scroll',
5: 'Fire Scroll',
6: 'Wind Scroll',
7: 'Legendary Scroll',
8: 'Exclusive Summons',
9: "Legendary Pieces",
10: "Light & Dark Pieces"
}
def identify_scroll(id):
return sources[id]
class MonsterLogger(SWPlugin):
def __init__(self):
with open('swproxy.config') as f:
self.config = json.load(f)
def process_request(self, req_json, resp_json):
config = self.config
if 'log_summon' not in config or not config['log_summon']:
return
command = req_json['command']
if command == 'SummonUnit':
return self.log_summon(req_json, resp_json, config)
def log_summon(self, req_json, resp_json, config):
if not config["log_summon"]:
return
wizard_id = str(resp_json['wizard_info']['wizard_id'])
if 'unit_list' in resp_json:
if 'item_info' in resp_json:
scroll = identify_scroll(resp_json['item_info']['item_master_id'])
else:
mode = req_json['mode']
if mode == 3:
scroll = 'Crystal'
elif mode == 5:
scroll = 'Social'
else:
scroll = 'Unidentified'
filename = "%s-summons.csv" % wizard_id
is_new_file = not os.path.exists(filename)
with open(filename, "ab") as log_file:
field_names = ['date', 'scroll', 'unit_name', 'attribute', 'grade', 'awake']
header = {'date': 'Date', 'scroll': 'Scroll', 'unit_name': 'Unit', 'attribute': 'Attribute', 'grade': 'Grade',
'awake': 'Awakened'}
SWPlugin.call_plugins('process_csv_row', ('summon_logger', 'header', (field_names, header)))
log_writer = DictUnicodeWriter(log_file, fieldnames=field_names)
if is_new_file:
log_writer.writerow(header)
if 'unit_list' in resp_json and len(resp_json['unit_list']) > 0:
for i in range(0, len(resp_json['unit_list'])):
unit_name = monster_name(resp_json['unit_list'][i]['unit_master_id'],'',False)
attribute = monster_attribute(resp_json['unit_list'][i]['attribute'])
grade = resp_json['unit_list'][i]['class']
awakened = str(resp_json['unit_list'][i]['unit_master_id'])
if int(awakened[-2]) == 0:
awake = 'No'
else:
awake = 'Yes'
log_entry = {'date': time.strftime("%Y-%m-%d %H:%M"), 'scroll': scroll, 'unit_name': unit_name,
'attribute': attribute, 'grade': grade, 'awake': awake}
SWPlugin.call_plugins('process_csv_row', ('summon_logger', 'entry', (field_names, log_entry)))
log_writer.writerow(log_entry)
return
|
lstern/SWProxy-plugins
|
plugins/SummonLogger.py
|
Python
|
lgpl-3.0
| 3,115
|
[
"CRYSTAL"
] |
2307d47d603db6e92edac5baf6ba10d175e02a208d56b6117dc338b6ffa7483e
|
#
# Author: Jorg Bornschein <bornschein@fias.uni-frankfurt.de)
# Lincense: Academic Free License (AFL) v3.0
#
"""
Provide simple bar-pattern generator.
"""
from __future__ import division
__author__ = "Joerg Bornschein <bornschein@fias.uni-frankfurt.de>"
import numpy as np
def generate(num, size, p_bar):
"""
Generate a set of *num* bar-patterns with *size* * *size* pixels. Each
of the 2* *size* possible bars is active with a probability of *p_bar*.
The pixels of an active bar are set to 1, the pixels of the
inactive bar are set to 0.
This function returns the set of bar patterns as (N,size,size) shaped numpy
array.
You can easily add noise to the generated patterns
by using the random number generators in :mod:`numpy.random`.
Example::
import numpy as np
from pulp.utils import bargen
N = 1000; size = 16
bars = bargen.generate_pattern(N, size, 2./15)
bars = 10*bars + 5*np.random.randn( (N,size,size) )
bars = bars.reshape(N, size*size)
Generates 1000 bar-patterns in the interval [0,10] with additive gaussian
noise of #math`\sigma=5` variance.
"""
bars = np.zeros( (num, size, size) )
for n in xrange(num):
for i in range(size):
# generate horizontal bar
if np.random.random() <= p_bar:
bars[n, i, :] = 1.
# generate vertical bar
if np.random.random() <= p_bar:
bars[n, :, i] = 1.
return bars
#For unit-test purposes
def W_gen(H, bar_val):
"""Returns (*H*, (H//2)**2) array with bars inside of activity *bar_val*"""
D2 = H//2
D = D2**2
W_gt = np.zeros( (H, D2, D2) )
for i in xrange(D2):
W_gt[ i, i, :] = bar_val
W_gt[D2+i, :, i] = bar_val
return W_gt.reshape( (H, D) )
|
jbornschein/mca-genmodel
|
pulp/utils/bargen.py
|
Python
|
agpl-3.0
| 1,857
|
[
"Gaussian"
] |
1b522cd2647201951d9d1f9df679d47a95179082422fb58a22310134a8bdc304
|
#!/usr/bin/python
# file: formatter.py
# author: Andrea Vedaldi
# description: Utility to format MATLAB comments.
# Copyright (C) 2007-12 Andrea Vedaldi and Brian Fulkerson.
# All rights reserved.
#
# This file is part of the VLFeat library and is made available under
# the terms of the BSD license (see the COPYING file).
"""
MDOC fromats the help block of a MATLAB M-file based on a simple set
of rules. Pharagraphs, verbatim sections, lists and other structures
are automatically instantiated by looking at blank lines, indentation
and a few decoration symbols.
The documentation starts at a conventional indentation level N (by
default 2). A block of non-epmty lines prefixed by N characters is
considered a paragraph. For instance
| Bla bla bla
| bla bla bla.
|
| Bla bla.
generates two pharagraps. If there are more than N white spaces,
then the block is taken verbatim instead (and rendered in <pre> HTML
tags). For instance
| Bla bla bla
| Code Code Code
|
| Code Code Code
generates one paragraph followed by one verbatim section.
"""
import xml.dom.minidom
import sys
import os
import re
__mpname__ = 'MDocFormatter'
__version__ = '0.1'
__date__ = '2008-01-01'
__description__ = 'MDoc formatting module'
__long_description__ = __doc__
__license__ = 'BSD'
__author__ = 'Andrea Vedaldi'
# terminal
class Terminal:
def isa(self, classinfo):
return isinstance(self, classinfo)
# empty terminal
class E (Terminal):
pass
# blank line
class B (Terminal):
content = ""
# non-blank line
class L (Terminal):
indent = 0
# regular line
class PL (L):
pass
# line with bullet
class BL (L):
bullet = None
inner_indent = 0
# line with description
class DL (L):
pass
# --------------------------------------------------------------------
def lex(line):
# --------------------------------------------------------------------
"""
Parse the string LINE to a terminal symbol. Each line corresponds
to exactly one terminal type. However, terminal types are the leaf
of a hierarchy of types.
"""
match = re.match(r"\s*\n?$", line) ;
if match: return B()
match = re.match(r"(\s*)(.*)::(.*)\n?$", line)
if match:
x = DL()
x.indent = len(match.group(1))
x.content = match.group(2)
x.inner_content = match.group(3)
return x
match = re.match(r"(\s*)([-\*#]\s*)(\S.*)\n?$", line)
if match:
x = BL()
x.indent = len(match.group(1))
x.inner_content = match.group(3)
x.bullet = match.group(2)
x.inner_indent = x.indent + len(x.bullet)
x.content = x.bullet + x.inner_content
return x
match = re.match(r"(\s*)(\S.*)\n?$", line)
if match:
x = PL()
x.indent = len(match.group(1))
x.content = match.group(2)
return x
# --------------------------------------------------------------------
class Lexer(object):
# --------------------------------------------------------------------
"""
l = Lexer(LINES) parses the array of strings LINES. Lexer has a
head pointing to the current line. The head can be controlled by
the following methods:
l.next() advances the head and fetches the next terminal.
l.back() moves back the head.
l.getpos() returns the head position.
l.seek(POS) sets the head position to POS.
"""
def __init__(self, lines):
self.tokens = []
self.pos = -1
for line in lines:
self.tokens.append(lex(line))
def next(self):
self.pos = self.pos + 1
if self.pos >= len(self.tokens):
return E()
else:
return self.tokens [self.pos]
def seek(self, pos):
self.pos = pos
def back(self):
if self.pos >=0: self.pos -= 1
def rewrite(self, str):
self.tokens [self.pos] = str ;
def getpos(self):
return self.pos
def __str__(self):
str = ""
for i,t in enumerate(self.tokens):
str += "%5d) %s %s\n" % (i, t.__class__.__name__,t.content)
return str
# --------------------------------------------------------------------
class Formatter:
# --------------------------------------------------------------------
"""
f = Formatter(LINES) parse the array of strings LINES.
f = Formatter(LINES, FUNCS) takes the dictionary of functions
FUNCS. Function names must be uppercase. The dictionary entries
are used to cross link functions in the generated documentation.
Formatter(LINES, FUNCS, LINKTYPE) produces links of the specified
type. Use 'a' for HTML anchors and 'wiki' for MediaWiki style
links.
f.toDOM() process the data to construct an XML (HTML) representation
of them.
"""
def __init__ (self, lines, funcs={}, linktype='a'):
self.indentinit = 0
lineone = lines[0]
while lineone.startswith(' '):
lineone = lineone[1:]
self.indentinit += 1
self.tokens = Lexer(lines)
self.xmldoc = xml.dom.minidom.Document()
self.funcs = funcs
self.linktype = linktype
#print self.tokens
def toTextNode(self,s):
return self.xmldoc.createTextNode(unicode(s, 'iso-8859-1'))
def addAttr(self, tag, attr, val):
x = self.xmldoc.createAttribute(attr)
x.nodeValue = val
tag.setAttributeNode(x)
def addText(self, tag, s):
txt = self.toTextNode(s)
tag.appendChild(txt)
def addFancyText(self, tag, s):
"Adds text while transforming function references to links."
xs = []
iter = re.finditer('([A-Z][A-Z0-9_]*)\([^\)]*\)', s)
last = -1
for i in iter:
func_name = i.group(1)
# lookup function name in dictionary
if self.funcs.has_key(func_name):
# retrieve function HTML location
func_href = self.funcs[func_name]
# add text so far
xs.append(self.toTextNode(s[last+1:i.start()]))
if self.linktype == 'a':
# add link to function
atag = self.xmldoc.createElement(u"a")
self.addText(atag, i.group(1))
atag.setAttribute(u"href", u"%s" % (func_href))
xs.append(atag)
elif self.linktype == 'wiki':
linktxt = "[[%s|%s]]" % (func_href, i.group(1))
xs.append(self.toTextNode(linktxt))
# set head
last = i.start()+len(i.group(1))-1
#else:
# print "function: %s not found" % func_name
xs.append(self.toTextNode(s[last+1:]))
for x in xs:
tag.appendChild(x)
# ................................................................
# E, B, L, PL, BL, DL, ...
def parse_Terminal(self, T):
"If the next terminal on the stream is of type T, the terminal"
"is extracted and returned. Otherwise the function returns None"
pos = self.tokens.getpos()
t = self.tokens.next()
if t.isa(T):
return t
self.tokens.seek(pos)
return None
# ................................................................
# DIV(N) -> (B | P(N) | BL(N) | DL(N) | V(N))+
def parse_DIV(self, indent):
"Parse a DIV(N) symbol. A DIV(N) a sequence of blank"
"lines (B or other blocks at indentation level N, such as"
"pharagraphs P(N), bullet lists BL(N), description lists DN(N)"
pos = self.tokens.getpos()
xs = []
while True:
x = self.parse_Terminal(B)
if x: continue
x = self.parse_P(indent)
if x:
xs.append(x)
continue
x = self.parse_V(indent)
if x:
xs.append(x)
continue
x = self.parse_UL(indent)
if x:
xs.append(x)
continue
x = self.parse_DL(indent)
if x:
xs.append(x)
continue
break
if len(xs) == 0: return None
return xs
# ................................................................
# P(N) -> PL(N) L(N)*
def parse_P(self, indent):
content = "\n"
good = False
pos = self.tokens.getpos()
# Introduced by PL
x = self.parse_Terminal(PL)
if x:
if x.indent == indent:
content += x.content + "\n"
good = True
else:
self.tokens.back()
if not good:
return None
# Continued by zero or more L
while True:
x = self.parse_Terminal(L)
if x:
if x.indent == indent:
content += x.content + "\n"
good = True
continue
else:
self.tokens.back()
break
ptag = self.xmldoc.createElement("p")
self.addFancyText(ptag, content)
return ptag
# ................................................................
# V(N) -> L(M)+, M > N
def parse_V(self, indent):
content = "\n"
good = False
pos = self.tokens.getpos()
while True:
x = self.parse_Terminal(L)
if x:
if x.indent > indent:
content += " "*(x.indent - indent) + x.content + "\n"
good = True
continue
else:
self.tokens.back()
x = self.parse_Terminal(B)
if x:
content += "\n"
continue
break
if good:
ptag = self.xmldoc.createElement("pre")
# remove potential blank line at the end
if content[-2:] == "\n\n":
content= content[:-1]
self.addText(ptag, content)
return ptag
self.tokens.seek(pos)
return None
# ................................................................
# UL(N) -> ULI(N)+
def parse_UL(self, indent):
xs = []
while True:
x = self.parse_ULI(indent)
if x:
xs.append(x)
continue
break
if len(xs) == 0: return None
ultag = self.xmldoc.createElement("ul")
for x in xs:
ultag.appendChild(x)
return ultag
# ................................................................
# ULI(N) -> UL(N,M) L(M)* DIV(M), M > N
def parse_ULI(self, indent):
content = "\n"
good = False
pos = self.tokens.getpos()
# Introduced by UL
x = self.parse_Terminal(BL)
if x:
if x.indent == indent:
content += x.inner_content + "\n"
indent = x.inner_indent
good = True
else:
self.tokens.back()
if not good:
return None
# Continued by zero or more L
while True:
x = self.parse_Terminal(L)
if x:
if x.indent == indent:
content += x.content + "\n"
good = True
continue
else:
self.tokens.back()
break
litag = self.xmldoc.createElement(u"li")
ptag = self.xmldoc.createElement(u"p")
self.addFancyText(ptag, content)
litag.appendChild(ptag)
# Continued by DIV
xs = self.parse_DIV(indent)
if xs:
for x in xs:
litag.appendChild(x)
return litag
# ................................................................
# DL(N) -> DI(N)+
def parse_DL(self, indent):
xs = []
while True:
x = self.parse_DI(indent)
if x:
xs += x
continue
break
if len(xs) == 0: return None
dltag = self.xmldoc.createElement(u"dl")
for x in xs:
dltag.appendChild(x)
return dltag
# ................................................................
# DI(N) -> DL(N) DIV(M)?, M > N
def parse_DI(self, indent):
content = "\n"
good = False
pos = self.tokens.getpos()
xs = []
# Introduced by DL
x = self.parse_Terminal(DL)
if x:
if x.indent == indent:
content += x.content + "\n"
good = True
else:
self.tokens.back()
if not good:
return None
if False:
# adds text after :: as part of the description dd
dttag = self.xmldoc.createElement(u"dt")
dttxt = self.toTextNode(content)
dttag.appendChild(dttxt)
xs.append(dttag)
# Inject inner_content
c = x.inner_content.strip()
if len(c) > 0:
tk = PL()
tk.content = x.inner_content
t = self.tokens.next()
self.tokens.back()
if t.isa(L) and t.indent > indent:
tk.indent = t.indent
else:
tk.indent = indent+1 ;
self.tokens.rewrite(tk)
self.tokens.back()
else:
# adds text after :: as part of the description term dt
dttag = self.xmldoc.createElement(u"dt")
dttxt = self.toTextNode(content)
dttag.appendChild(dttxt)
c = x.inner_content.strip()
if len(c) > 0:
deftag = self.xmldoc.createElement(u"span")
self.addAttr(deftag, "class", "defaults")
self.addText(deftag, c)
dttag.appendChild(deftag)
xs.append(dttag)
# Continued by DIV
t = self.tokens.next()
self.tokens.back()
if t.isa(L) and t.indent > indent:
xs_ = self.parse_DIV(t.indent)
if len(xs_) > 0:
ddtag = self.xmldoc.createElement(u"dd")
for x in xs_:
ddtag.appendChild(x)
xs.append(ddtag)
return xs
# ................................................................
def toDOM(self):
# write <mfile></mfile>
xmf = self.xmldoc.createElement("div")
xmf.setAttribute(u"class", u"documentation")
self.xmldoc.appendChild(xmf)
# parse documentation
xs = self.parse_DIV(self.indentinit)
for x in xs: xmf.appendChild(x)
return self.xmldoc
if __name__ == '__main__':
text="""
Lorem Ipsum is simply dummy text of the printing and typesetting
industry. Lorem Ipsum has been the industry's standard dummy text
ever since the 1500s, when an unknown printer took a galley of type
and scrambled it to make a type specimen book. It has survived not
only five centuries, but also the leap into electronic typesetting,
remaining essentially unchanged. It was popularised in the 1960s with
the release of Letraset sheets containing Lorem Ipsum passages, and
more recently with desktop publishing software like Aldus PageMaker
including versions of Lorem Ipsum.
These are links BL(), BL(A,B) and BLA(A,A) (as long as the dictionary
cites them).
Mimamama
verbatim1
verbatim2
verbatim3
verbatim4
verbatim5
Lorem Ipsum is simply dummy text of the printing and typesetting
industry. Lorem Ipsum has been the industry's standard dummy text
ever since the 1500s, when an unknown printer took a galley of type
and scrambled it to make a type specimen book. It has survived not
only five centuries, but also the leap into electronic typesetting,
remaining essentially unchanged. It was popularised in the 1960s with
the release of Letraset sheets containing Lorem Ipsum passages, and
more recently with desktop publishing software like Aldus PageMaker
including versions of Lorem Ipsum.
- outer1 /
outer1 line 2 /
outer1 line 3 /
outer1 new paragarph
- inner1
- inner2
- inner3
continued on next line
continued with verbatim
more verbatim after blank
- inner4
- outer again
- outer
bla
- list2
- list4
- BL()
- BL(A,B)
Test descrition::
Lorem Ipsum is simply dummy text of the printing
and typesetting industry. Lorem Ipsum has been the industry's
standard dummy text ever since the 1500s, when an unknown printer
took a galley of type and scrambled it to make a type specimen
book. It has survived not only five centuries, but also the leap
into electronic typesetting, remaining essentially unchanged. It
was popularised in the 1960s with the release of Letraset sheets
containing Lorem Ipsum passages, and more recently with desktop
publishing software like Aldus PageMaker including versions of
Lorem Ipsum.
Ancora::
Bli bli bli
Blu blu blu
- list
- lust
- last
Bli bla
Verbatimmo
"""
lines = text.splitlines()
formatter = Formatter(lines, {'BL':'http://www.google.com'}, 'a')
print formatter.toDOM().toxml("UTF-8")
|
hustcalm/whatsprogrammer
|
computer-vision/SFMedu/SFMedu/lib/vlfeat/docsrc/formatter.py
|
Python
|
mit
| 17,497
|
[
"Brian"
] |
ca5926921261081e16c7ea7ceacacfb18bd650f6b5c0788a206ef4d266db5f13
|
from __future__ import absolute_import, division, print_function
from operator import add
from itertools import chain
def inc(x):
return x + 1
def ishashable(x):
""" Is x hashable?
Example
-------
>>> ishashable(1)
True
>>> ishashable([1])
False
"""
try:
hash(x)
return True
except TypeError:
return False
def istask(x):
""" Is x a runnable task?
A task is a tuple with a callable first argument
Example
-------
>>> inc = lambda x: x + 1
>>> istask((inc, 1))
True
>>> istask(1)
False
"""
return isinstance(x, tuple) and x and callable(x[0])
def _get_task(d, task, maxdepth=1000):
# non-recursive. DAG property is checked upon reaching maxdepth.
_iter = lambda *args: iter(args)
# We construct a nested heirarchy of tuples to mimic the execution stack
# of frames that Python would maintain for a recursive implementation.
# A frame is associated with a single task from a Dask.
# A frame tuple has three elements:
# 1) The function for the task.
# 2) The arguments for the task (typically keys in the Dask).
# Arguments are stored in reverse order, and elements are popped
# as they are evaluated.
# 3) The calculated results of the arguments from (2).
stack = [(task[0], list(task[:0:-1]), [])]
while True:
func, args, results = stack[-1]
if not args:
val = func(*results)
if len(stack) == 1:
return val
stack.pop()
stack[-1][2].append(val)
continue
elif maxdepth and len(stack) > maxdepth:
cycle = getcycle(d, list(task[1:]))
if cycle:
cycle = '->'.join(cycle)
raise RuntimeError('Cycle detected in Dask: %s' % cycle)
maxdepth = None
key = args.pop()
if isinstance(key, list):
# v = (get(d, k, concrete=False) for k in key) # recursive
# Fake being lazy
stack.append((_iter, key[::-1], []))
continue
elif ishashable(key) and key in d:
v = d[key]
else:
v = key
if istask(v):
stack.append((v[0], list(v[:0:-1]), []))
else:
results.append(v)
def get(d, key, get=None, concrete=True, **kwargs):
""" Get value from Dask
Example
-------
>>> inc = lambda x: x + 1
>>> d = {'x': 1, 'y': (inc, 'x')}
>>> get(d, 'x')
1
>>> get(d, 'y')
2
See Also
--------
set
"""
get = get or _get
if isinstance(key, list):
v = (get(d, k, get=get, concrete=concrete) for k in key)
if concrete:
v = list(v)
elif ishashable(key) and key in d:
v = d[key]
elif istask(key):
v = key
else:
return key
if istask(v):
if get is _get:
# use non-recursive method by default
return _get_task(d, v)
func, args = v[0], v[1:]
args2 = [get(d, arg, get=get, concrete=False) for arg in args]
return func(*[get(d, arg, get=get) for arg in args2])
else:
return v
_get = get
def _deps(dsk, arg):
""" Get dependencies from keys or tasks
Helper function for get_dependencies.
>>> dsk = {'x': 1, 'y': 2}
>>> _deps(dsk, 'x')
['x']
>>> _deps(dsk, (add, 'x', 1))
['x']
>>> _deps(dsk, (add, 'x', (inc, 'y'))) # doctest: +SKIP
['x', 'y']
"""
if istask(arg):
result = []
for a in arg[1:]:
result.extend(_deps(dsk, a))
return result
try:
if arg not in dsk:
return []
except TypeError: # not hashable
return []
return [arg]
def get_dependencies(dsk, task, as_list=False):
""" Get the immediate tasks on which this task depends
>>> dsk = {'x': 1,
... 'y': (inc, 'x'),
... 'z': (add, 'x', 'y'),
... 'w': (inc, 'z'),
... 'a': (add, (inc, 'x'), 1)}
>>> get_dependencies(dsk, 'x')
set([])
>>> get_dependencies(dsk, 'y')
set(['x'])
>>> get_dependencies(dsk, 'z') # doctest: +SKIP
set(['x', 'y'])
>>> get_dependencies(dsk, 'w') # Only direct dependencies
set(['z'])
>>> get_dependencies(dsk, 'a') # Ignore non-keys
set(['x'])
"""
args = [dsk[task]]
result = []
while args:
arg = args.pop()
if istask(arg):
args.extend(arg[1:])
elif isinstance(arg, list):
args.extend(arg)
else:
result.append(arg)
if not result:
return [] if as_list else set()
rv = []
for x in result:
rv.extend(_deps(dsk, x))
return rv if as_list else set(rv)
def flatten(seq):
"""
>>> list(flatten([1]))
[1]
>>> list(flatten([[1, 2], [1, 2]]))
[1, 2, 1, 2]
>>> list(flatten([[[1], [2]], [[1], [2]]]))
[1, 2, 1, 2]
>>> list(flatten(((1, 2), (1, 2)))) # Don't flatten tuples
[(1, 2), (1, 2)]
>>> list(flatten((1, 2, [3, 4]))) # support heterogeneous
[1, 2, 3, 4]
"""
for item in seq:
if isinstance(item, list):
for item2 in flatten(item):
yield item2
else:
yield item
def reverse_dict(d):
"""
>>> a, b, c = 'abc'
>>> d = {a: [b, c], b: [c]}
>>> reverse_dict(d) # doctest: +SKIP
{'a': set([]), 'b': set(['a']}, 'c': set(['a', 'b'])}
"""
terms = list(d.keys()) + list(chain.from_iterable(d.values()))
result = dict((t, set()) for t in terms)
for k, vals in d.items():
for val in vals:
result[val].add(k)
return result
def subs(task, key, val):
""" Perform a substitution on a task
Example
-------
>>> subs((inc, 'x'), 'x', 1) # doctest: +SKIP
(inc, 1)
"""
if not istask(task):
if task == key:
return val
elif isinstance(task, list):
return [subs(x, key, val) for x in task]
else:
return task
newargs = []
for arg in task[1:]:
if istask(arg):
arg = subs(arg, key, val)
elif isinstance(arg, list):
arg = [subs(x, key, val) for x in arg]
elif arg == key:
arg = val
newargs.append(arg)
return task[:1] + tuple(newargs)
def _toposort(dsk, keys=None, returncycle=False):
# Stack-based depth-first search traversal. This is based on Tarjan's
# method for topological sorting (see wikipedia for pseudocode)
if keys is None:
keys = dsk
elif not isinstance(keys, list):
keys = [keys]
if not returncycle:
ordered = []
# Nodes whose descendents have been completely explored.
# These nodes are guaranteed to not be part of a cycle.
completed = set()
# All nodes that have been visited in the current traversal. Because
# we are doing depth-first search, going "deeper" should never result
# in visiting a node that has already been seen. The `seen` and
# `completed` sets are mutually exclusive; it is okay to visit a node
# that has already been added to `completed`.
seen = set()
for key in keys:
if key in completed:
continue
nodes = [key]
while nodes:
# Keep current node on the stack until all descendants are visited
cur = nodes[-1]
if cur in completed:
# Already fully traversed descendants of cur
nodes.pop()
continue
seen.add(cur)
# Add direct descendants of cur to nodes stack
next_nodes = []
for nxt in get_dependencies(dsk, cur):
if nxt not in completed:
if nxt in seen:
# Cycle detected!
cycle = [nxt]
while nodes[-1] != nxt:
cycle.append(nodes.pop())
cycle.append(nodes.pop())
cycle.reverse()
if returncycle:
return cycle
else:
cycle = '->'.join(cycle)
raise RuntimeError('Cycle detected in Dask: %s' % cycle)
next_nodes.append(nxt)
if next_nodes:
nodes.extend(next_nodes)
else:
# cur has no more descendants to explore, so we're done with it
if not returncycle:
ordered.append(cur)
completed.add(cur)
seen.remove(cur)
nodes.pop()
if returncycle:
return []
return ordered
def toposort(dsk):
""" Return a list of keys of dask sorted in topological order."""
return _toposort(dsk)
def getcycle(d, keys):
""" Return a list of nodes that form a cycle if Dask is not a DAG.
Returns an empty list if no cycle is found.
``keys`` may be a single key or list of keys.
Example
-------
>>> d = {'x': (inc, 'z'), 'y': (inc, 'x'), 'z': (inc, 'y')}
>>> getcycle(d, 'x')
['x', 'z', 'y', 'x']
See Also
--------
isdag
"""
return _toposort(d, keys=keys, returncycle=True)
def isdag(d, keys):
""" Does Dask form a directed acyclic graph when calculating keys?
``keys`` may be a single key or list of keys.
Example
-------
>>> inc = lambda x: x + 1
>>> isdag({'x': 0, 'y': (inc, 'x')}, 'y')
True
>>> isdag({'x': (inc, 'y'), 'y': (inc, 'x')}, 'y')
False
See Also
--------
getcycle
"""
return not getcycle(d, keys)
|
PeterDSteinberg/dask
|
dask/core.py
|
Python
|
bsd-3-clause
| 9,803
|
[
"VisIt"
] |
5f67454967ae72eaa72f5c90a2c28f2e91f645080fa91899da2910779086acd8
|
#!/usr/bin/env python
'''
Master loader for all Coordinated Canyon Experiment data
from October 2015 through 2016
Mike McCann
MBARI 26 January March 2016
'''
import os
import sys
parent_dir = os.path.join(os.path.dirname(__file__), "../")
sys.path.insert(0, parent_dir) # So that CCE is found
from CCE import CCELoader
from collections import namedtuple
from DAPloaders import NoValidData
from datetime import datetime
import numpy as np
import timing
# CCE event start and end times for loading timeseriesprofile mooring (ADCP) data
Event = namedtuple('Event', ['start', 'end'])
lores_event_times = [
Event(datetime(2016, 1, 15, 0, 0), datetime(2016, 1, 18, 0, 0)),
Event(datetime(2016, 3, 5, 0, 0), datetime(2016, 3, 8, 0, 0)),
]
hires_event_times = [
Event(datetime(2016, 1, 15, 21, 0), datetime(2016, 1, 16, 2, 0)),
Event(datetime(2016, 3, 6, 0, 0), datetime(2016, 3, 7, 0, 0)),
]
# Overall time period for the whole campaign
#campaign_start_datetime = datetime(2015, 10, 13, 0, 0)
#campaign_end_datetime = datetime(2017, 4, 11, 0, 0)
class CCE_2015_Campaign:
def __init__(self, db_alias='stoqs_cce2015', campaign_name='Coordinated Canyon Experiment'):
self.cl = CCELoader(db_alias, campaign_name,
description = 'Coordinated Canyon Experiment - Measuring turbidity flows in Monterey Submarine Canyon',
x3dTerrains = {
'https://stoqs.mbari.org/x3d/MontereyCanyonBeds_1m+5m_1x_src/MontereyCanyonBeds_1m+5m_1x_src_scene.x3d': {
'name': 'MontereyCanyonBeds_1m+5m_1x',
'position': '2232.80938 10346.25515 3543.76722',
'orientation': '-0.98394 0.16804 -0.06017 1.25033',
'centerOfRotation': '0 0 0',
'VerticalExaggeration': '1',
'geoOrigin': '36.80, -121.87, -400',
'speed': '1.0',
'zNear': '100.0',
'zFar': '30000.0',
'selected': '1'
},
'https://stoqs.mbari.org/x3d/Monterey25_1x/Monterey25_1x_src_scene.x3d': {
'name': 'Monterey25_1x',
'position': '-32985.28634 88026.90417 22334.02600',
'orientation': '-0.99875 -0.04772 0.01482 1.31683',
'centerOfRotation': '-20564.015827789044 -1956.065669754069 14112.954469753739',
'VerticalExaggeration': '1',
'geoOrigin': '36.80, -121.87, -400',
'speed': '1.0',
'zNear': '-1',
'zFar': '-1',
},
},
# Do not check in .grd files to the repository, keep them in the loaders directory
grdTerrain=os.path.join(parent_dir, 'MontereyCanyonBeds_1m+5m.grd'),
##grdTerrain=os.path.join(parent_dir, 'Monterey25.grd'),
)
# Base OPeNDAP server
self.cl.bed_base = 'http://dods.mbari.org/opendap/data/CCE_Processed/BEDs/'
# Copied from ProjectLibrary to BEDs SVN working dir for netCDF conversion, and then copied to elvis.
# See BEDs/BEDs/Visualization/py/makeBEDNetCDF_CCE.sh
self.cl.bed_parms = ['XA', 'YA', 'ZA', 'A', 'XR', 'YR', 'ZR', 'ROT_RATE', 'ROT_COUNT', 'P', 'P_ADJUSTED',
'P_RATE', 'P_SPLINE', 'P_SPLINE_RATE', 'ROT_DIST', 'IMPLIED_VELOCITY', 'BED_DEPTH_CSI',
'BED_DEPTH', 'BED_DEPTH_LI', 'DIST_TOPO', 'TUMBLE_RATE', 'TUMBLE_COUNT', 'TIDE',
'ROT_X', 'ROT_Y', 'ROT_Z', 'AXIS_X', 'AXIS_Y', 'AXIS_Z', 'ANGLE']
# Several BED files: 30200078 to 3020080
# bed_files, bed_platforms, bed_depths must have same number of items; they are zipped together in the load
##self.cl.bed_files = [(f'CanyonEvents/BED3/20151001_20160115/{n}.nc') for n in range(30200078, 30200081)]
##self.cl.bed_platforms = ['BED03'] * len(self.cl.bed_files)
##self.cl.bed_depths = [201] * len(self.cl.bed_files)
# Just the event files for the CCE
self.cl.bed_files_framegrabs_2015 = [
('BED04/MBCCE_BED04_20151004_Event20151201/netcdf/40100037_full_traj.nc',
'http://search.mbari.org/ARCHIVE/frameGrabs/Ventana/stills/2015/vnta3872/00_17_50_24.html'),
('BED05/MBCCE_BED05_20151027_Event20151201/netcdf/50200024_decim_traj.nc',
'http://search.mbari.org/ARCHIVE/frameGrabs/Ventana/stills/2015/vnta3873/00_29_56_03.html'),
]
self.cl.bed_files_framegrabs_2016 = [
('BED03/20151001_20160115/netcdf/30200078_full_traj.nc',
'http://search.mbari.org/ARCHIVE/frameGrabs/Ventana/stills/2015/vnta3874/00_21_23_28.html'),
('BED06/20151001_20160115/netcdf/60100068_full_traj.nc',
'http://search.mbari.org/ARCHIVE/frameGrabs/Ventana/stills/2015/vnta3870/00_15_38_23.html'),
('BED03/MBCCE_BED03_20160212_Event20160217/netcdf/30300004_decim_traj.nc',
''),
('BED05/MBCCE_BED05_20151027_Event20160115/netcdf/50200054_decim_traj.nc',
''),
('BED05/MBCCE_BED05_20151027_Event20160115/netcdf/50200055_decim_traj.nc',
''),
('BED05/MBCCE_BED05_20151027_Event20160115/netcdf/50200056_decim_traj.nc',
''),
('BED05/MBCCE_BED05_20151027_Event20160115/netcdf/50200057_decim_traj.nc',
''),
('BED03/MBCCE_BED03_20160212_Event20160306/netcdf/30300016_decim_traj.nc',
''),
('BED06/MBCCE_BED06_20160222_Event20160306/netcdf/60200011_decim_traj.nc',
''),
('BED06/MBCCE_BED06_20160222_Event20160306/netcdf/60200012_decim_traj.nc',
''),
('BED06/MBCCE_BED06_20160222_Event20160901/netcdf/60200130_decim_traj.nc',
''),
('BED09/MBCCE_BED09_20160408_Event20160901/netcdf/90100096_decim_traj.nc',
'http://search.mbari.org/ARCHIVE/frameGrabs/Ventana/stills/2016/vnta3922/02_55_51_27.html'),
('BED10/MBCCE_BED10_20160408_Event20160901/netcdf/A0100096_decim_traj.nc',
'http://search.mbari.org/ARCHIVE/frameGrabs/Ventana/stills/2016/vnta3921/02_05_37_16.html'),
# Uncomment to test 3D replay of BED motion - it should rotate only around the X-Axis
##('BED00/Simulated/netcdf/BED00_SIM_rolling_trajectory.nc',
## ''),
] + [
(f'BED09/MBCCE_BED09_20160408_Event20161124/netcdf/901001{n}_full_traj.nc', '') for n in (
list(range(56, 63)) + list(range(64, 65)))
] + [
('BED09/MBCCE_BED09_20160408_Event20161124/netcdf/90100165_full.nc',
''),
('BED03/MBCCE_BED03_20161005_Event20161124/netcdf/30400015_decim_traj.nc',
''),
('BED10/MBCCE_BED10_20160408_Event20161124/netcdf/A0100154_decim_traj.nc',
''),
('BED04/MBCCE_BED04_20151004_Event20161124/netcdf/40200014_decim_traj.nc',
''),
]
self.cl.bed_files_framegrabs_2017 = [
(f'BED09/MBCCE_BED09_20160408_Watch/netcdf/9010000{n}.nc', '') for n in range(4, 8)
] + [
('BED09/MBCCE_BED09_20160408_Event20170109/netcdf/90100196_full_traj.nc',
''),
('BED11/MBCCE_BED11_20161010_Event20170109/netcdf/B0100026_full_traj.nc',
''),
('BED11/MBCCE_BED11_20161010_Event20170109/netcdf/B0100027_full.nc',
''),
('BED11/MBCCE_BED11_20161010_Event20170109/netcdf/B0100028_full_traj.nc',
''),
('BED00/Simulated/netcdf/BED00_cycle_rot_axes_200_202_trajectory.nc',
''),
('BED08/MBCCE_BED08_20161005_Event20161124/netcdf/80200014_decim_traj.nc',
'http://search.mbari.org/ARCHIVE/frameGrabs/Ventana/stills/2016/vnta3969/00_26_09_00.html'),
('BED08/MBCCE_BED08_20161005_Event20161124/netcdf/80200014_full_traj.nc',
'http://search.mbari.org/ARCHIVE/frameGrabs/Ventana/stills/2016/vnta3969/00_26_09_00.html'),
('BED08/MBCCE_BED08_20161005_Event20161124/netcdf/80200015_full_traj.nc',
'http://search.mbari.org/ARCHIVE/frameGrabs/Ventana/stills/2016/vnta3969/00_26_09_00.html'),
('BED08/MBCCE_BED08_20161005_Event20161124/netcdf/80200016_decim_traj.nc',
'http://search.mbari.org/ARCHIVE/frameGrabs/Ventana/stills/2016/vnta3969/00_26_09_00.html'),
('BED08/MBCCE_BED08_20161005_Event20161124/netcdf/80200016_full_traj.nc',
'http://search.mbari.org/ARCHIVE/frameGrabs/Ventana/stills/2016/vnta3969/00_26_09_00.html'),
('BED08/MBCCE_BED08_20161005_Event20161124/netcdf/80200017_full_traj.nc',
'http://search.mbari.org/ARCHIVE/frameGrabs/Ventana/stills/2016/vnta3969/00_26_09_00.html'),
('BED08/MBCCE_BED08_20161005_Event20161124/netcdf/80200019_full_traj.nc',
'http://search.mbari.org/ARCHIVE/frameGrabs/Ventana/stills/2016/vnta3969/00_26_09_00.html'),
('BED08/MBCCE_BED08_20161005_Event20161124/netcdf/80200020_decim_traj.nc',
'http://search.mbari.org/ARCHIVE/frameGrabs/Ventana/stills/2016/vnta3969/00_26_09_00.html'),
('BED08/MBCCE_BED08_20161005_Event20161124/netcdf/80200020_full_traj.nc',
'http://search.mbari.org/ARCHIVE/frameGrabs/Ventana/stills/2016/vnta3969/00_26_09_00.html'),
('BED08/MBCCE_BED08_20161005_Event20170109/netcdf/80200034_decim_traj.nc',
'http://search.mbari.org/ARCHIVE/frameGrabs/Ventana/stills/2016/vnta3969/00_26_09_00.html'),
('BED08/MBCCE_BED08_20161005_Event20170109/netcdf/80200039_full_traj.nc',
'http://search.mbari.org/ARCHIVE/frameGrabs/Ventana/stills/2016/vnta3969/00_26_09_00.html'),
('BED08/MBCCE_BED08_20161005_Event20170203/netcdf/80200046_decim_traj.nc',
'http://search.mbari.org/ARCHIVE/frameGrabs/Ventana/stills/2016/vnta3969/00_26_09_00.html'),
('BED08/MBCCE_BED08_20161005_Event20170218/netcdf/80200050_decim_traj.nc',
'http://search.mbari.org/ARCHIVE/frameGrabs/Ventana/stills/2016/vnta3969/00_26_09_00.html'),
('BED08/MBCCE_BED08_20161005_Event20170218/netcdf/80200052_full_traj.nc',
'http://search.mbari.org/ARCHIVE/frameGrabs/Ventana/stills/2016/vnta3969/00_26_09_00.html'),
('BED11/MBCCE_BED11_20161010_Event20161124/netcdf/B0100012_decim_traj.nc',
''),
('BED06/MBCCE_BED06_20160222_Event20170109/netcdf/60200218_decim_traj.nc',
''),
('BED06/MBCCE_BED06_20160222_Event20170109/netcdf/60200219_decim_traj.nc',
''),
('BED03/MBCCE_BED03_20161005_Event20170203/netcdf/30400034_full_traj.nc',
''),
('BED06/MBCCE_BED06_20160222_Event20170203/netcdf/60200236_decim_traj.nc',
''),
('BED06/MBCCE_BED06_20160222_Event20170218/netcdf/60200246_decim_traj.nc',
''),
('BED11/MBCCE_BED11_20161010_Event20170203/netcdf/B0100036_full_traj.nc',
''),
('BED11/MBCCE_BED11_20161010_Event20170203/netcdf/B0100037_full.nc',
''),
]
self.cl.bed_files_framegrabs = self.cl.bed_files_framegrabs_2015 + self.cl.bed_files_framegrabs_2016 + self.cl.bed_files_framegrabs_2017
self.cl.bed_files = [ffg[0] for ffg in self.cl.bed_files_framegrabs]
self.cl.bed_framegrabs = [ffg[1] for ffg in self.cl.bed_files_framegrabs]
self.cl.bed_platforms = [f.split('/')[0] for f in self.cl.bed_files ]
# Execute just before loading BEDs data, as this delays the start of loading mooring data
##self.cl.bed_depths = np.round(self.cl.get_start_bed_depths(), 1)
# CCE event start and end times for loading mooring data
self.lores_event_times = lores_event_times
self.hires_event_times = hires_event_times
# CCE SIN (Seafloor Instrument Node) data - all parameters but the timeseriesprofile ADCP data
# There are 3 categories of loaded data:
# 1. 10-minute resolution entire timeseries of non-profile parameters
# 2. Low resolution profile data (ADCP) for a few days around each event
# 3. High resolution profile data (ADCP) for a several hours around each event
self.cl.ccesin_nominaldepth = 1836
self.cl.ccesin_base = 'http://dods.mbari.org/opendap/data/CCE_Processed/SIN/'
self.cl.ccesin_files = [
'20151013/CTDOBSTrans/MBCCE_SIN_CTDOBSTrans_20151013_timecorrected.nc',
'20151013/OX/MBCCE_SIN_OX_20151013_timecorrected.nc',
'20151013/FLNTU/MBCCE_SIN_FLNTU_20151013_timecorrected.nc',
'20151013/ADCP300/MBCCE_SIN_ADCP300_20151013.nc',
'20151013/ADCP600/MBCCE_SIN_ADCP600_20151013.nc',
'20151013/ADCP1200/MBCCE_SIN_ADCP1200_20151013.nc',
'20160417/CTDOBSTrans/MBCCE_SIN_CTDOBSTrans_20160417_timecorrected.nc',
'20160417/OX/MBCCE_SIN_OX_20160417_timecorrected.nc',
'20160417/FLNTU/MBCCE_SIN_FLNTU_20160417_timecorrected.nc',
'20161018/CTDOBSTrans/MBCCE_SIN_CTDOBSTrans_20161018_timecorrected.nc',
'20161018/OX/MBCCE_SIN_OX_20161018_timecorrected.nc',
'20161018/FLNTU/MBCCE_SIN_FLNTU_20161018_timecorrected.nc',
]
self.cl.ccesin_parms = [ 'pressure', 'temperature', 'conductivity', 'turbidity', 'optical_backscatter',
'oxygen', 'saturation', 'optode_temperature',
'chlor', 'ntu1', 'ntu2',
'Hdg_1215', 'Ptch_1216', 'Roll_1217']
# CCE SIN (Seafloor Instrument Node) data - files and parameters to load for just the events:
# Just the timeseriesprofile ADCP data
self.cl.ccesin_nominaldepth_ev = self.cl.ccesin_nominaldepth
self.cl.ccesin_base_ev = self.cl.ccesin_base
self.cl.ccesin_files_ev = [
'20151013/ADCP300/MBCCE_SIN_ADCP300_20151013.nc',
'20151013/ADCP600/MBCCE_SIN_ADCP600_20151013.nc',
'20151013/ADCP1200/MBCCE_SIN_ADCP1200_20151013.nc',
]
self.cl.ccesin_parms_ev = [ 'u_1205', 'v_1206', 'w_1204', 'AGC_1202' ]
# Just the timeseries data for the highres period
self.cl.ccesin_files_ev_hires = [
'20151013/CTDOBSTrans/MBCCE_SIN_CTDOBSTrans_20151013_timecorrected.nc',
'20151013/OX/MBCCE_SIN_OX_20151013_timecorrected.nc',
'20151013/FLNTU/MBCCE_SIN_FLNTU_20151013_timecorrected.nc',
'20160417/CTDOBSTrans/MBCCE_SIN_CTDOBSTrans_20160417_timecorrected.nc',
'20160417/OX/MBCCE_SIN_OX_20160417_timecorrected.nc',
'20160417/FLNTU/MBCCE_SIN_FLNTU_20160417_timecorrected.nc',
'20161018/CTDOBSTrans/MBCCE_SIN_CTDOBSTrans_20161018_timecorrected.nc',
'20161018/OX/MBCCE_SIN_OX_20161018_timecorrected.nc',
'20161018/FLNTU/MBCCE_SIN_FLNTU_20161018_timecorrected.nc',
]
self.cl.ccesin_parms_ev_hires = [ 'pressure', 'temperature', 'conductivity', 'turbidity', 'optical_backscatter',
'oxygen', 'saturation', 'optode_temperature',
'chlor', 'ntu1', 'ntu2',
'Hdg_1215', 'Ptch_1216', 'Roll_1217']
# MS1 ADCP data - timeseries data
self.cl.ccems1_nominal_depth = 225
self.cl.ccems1_base = 'http://dods.mbari.org/opendap/data/CCE_Archive/MS1/'
self.cl.ccems1_files = [
'20151006/ADCP300/MBCCE_MS1_ADCP300_20151006.nc',
'20151006/Aquadopp2000/MBCCE_MS1_Aquadopp2000_20151006.nc',
'20151006/CTOBSTrans9m/MBCCE_MS1_CTOBSTrans9m_20151006.nc',
'20151006/TD65m/MBCCE_MS1_TD65m_20151006.nc',
'20151006/TU35m/MBCCE_MS1_TU35m_20151006.nc',
'20151006/TU65m/MBCCE_MS1_TU65m_20151006.nc',
]
self.cl.ccems1_parms = [
'Hdg_1215', 'Ptch_1216', 'Roll_1217',
'P_1', 'T_1211',
'T_28', 'S_41', 'ST_70', 'tran_4010', 'ATTN_55', 'NEP_56', 'Trb_980',
]
# MS1 ADCP data - timeseriesprofile (ADCP) data
self.cl.ccems1_nominal_depth_ev = self.cl.ccems1_nominal_depth
self.cl.ccems1_base_ev = self.cl.ccems1_base
self.cl.ccems1_files_ev = [
'20151006/ADCP300/MBCCE_MS1_ADCP300_20151006.nc',
'20151006/Aquadopp2000/MBCCE_MS1_Aquadopp2000_20151006.nc',
]
self.cl.ccems1_parms_ev = [ 'u_1205', 'v_1206', 'w_1204', 'AGC_1202' ]
# MS2 ADCP data - timeseries data
self.cl.ccems2_nominal_depth = 462
self.cl.ccems2_base = 'http://dods.mbari.org/opendap/data/CCE_Archive/MS2/'
self.cl.ccems2_files = [
'20151005/ADCP300/MBCCE_MS2_ADCP300_20151005.nc',
'20151005/CTD9m/MBCCE_MS2_CTD9m_20151005.nc',
'20151005/TU9m/MBCCE_MS2_TU9m_20151005.nc',
]
self.cl.ccems2_parms = [
'Hdg_1215', 'Ptch_1216', 'Roll_1217',
'D_3', 'P_1', 'T_28', 'S_41',
'NEP_56', 'Trb_980',
]
# MS2 ADCP data - timeseriesprofile (ADCP) data
self.cl.ccems2_nominal_depth_ev = self.cl.ccems2_nominal_depth
self.cl.ccems2_base_ev = self.cl.ccems2_base
self.cl.ccems2_files_ev = [
'20151005/ADCP300/MBCCE_MS2_ADCP300_20151005.nc',
]
self.cl.ccems2_parms_ev = [ 'u_1205', 'v_1206', 'w_1204', 'AGC_1202' ]
# MS3 ADCP and CTD data - timeseries data
self.cl.ccems3_nominal_depth = 764
self.cl.ccems3_base = 'http://dods.mbari.org/opendap/data/CCE_Archive/MS3/'
self.cl.ccems3_files = [
'20151005/ADCP300/MBCCE_MS3_ADCP300_20151005.nc',
'20151005/Aquadopp2000/MBCCE_MS3_Aquadopp2000_20151005.nc',
'20151005/CT9m/MBCCE_MS3_CT9m_20151005.nc',
'20151005/TU9m/MBCCE_MS3_TU9m_20151005.nc',
]
self.cl.ccems3_parms = [
'Hdg_1215', 'Ptch_1216', 'Roll_1217',
'P_1', 'T_1211', 'NEP1_56',
'T_28', 'S_41', 'ST_70',
'tran_4010', 'ATTN_55',
]
# MS3 ADCP data - timeseriesprofile (ADCP) data
self.cl.ccems3_nominal_depth_ev = self.cl.ccems3_nominal_depth
self.cl.ccems3_base_ev = self.cl.ccems3_base
self.cl.ccems3_files_ev = [
'20151005/ADCP300/MBCCE_MS3_ADCP300_20151005.nc',
'20151005/Aquadopp2000/MBCCE_MS3_Aquadopp2000_20151005.nc',
]
self.cl.ccems3_parms_ev = [ 'u_1205', 'v_1206', 'w_1204', 'AGC_1202' ]
# MS4 ADCP - 20151005 data files are corrupted
##self.cl.ccems4_nominal_depth = 462
##self.cl.ccems4_base = 'http://dods.mbari.org/opendap/data/CCE_Archive/MS4...'
##self.cl.ccems4_files = [ '' ]
##self.cl.ccems4_parms = [ 'u_1205', 'v_1206', 'w_1204', 'AGC_1202', ]
##self.cl.ccems4_parms = [ 'u_1205', 'v_1206', 'w_1204', 'AGC_1202', 'Hdg_1215', 'Ptch_1216', 'Roll_1217']
# MS5 ADCP data - timeseries data
self.cl.ccems5_nominal_depth = 1315
self.cl.ccems5_base = 'http://dods.mbari.org/opendap/data/CCE_Archive/MS5/'
self.cl.ccems5_files = [
'20151020/ADCP300/MBCCE_MS5_ADCP300_20151020.nc',
]
self.cl.ccems5_parms = [ 'Hdg_1215', 'Ptch_1216', 'Roll_1217']
# MS3 ADCP data - timeseriesprofile (ADCP) data
self.cl.ccems5_nominal_depth_ev = self.cl.ccems5_nominal_depth
self.cl.ccems5_base_ev = self.cl.ccems5_base
self.cl.ccems5_files_ev = [
'20151020/ADCP300/MBCCE_MS5_ADCP300_20151020.nc',
]
self.cl.ccems5_parms_ev = [ 'u_1205', 'v_1206', 'w_1204', 'AGC_1202' ]
# Full-deployment files, exatracted from SSDS with stride of 60
##self.cl.ccesin_base = 'http://dods.mbari.org/opendap/data/ssdsdata/deployments/ccesin2015/201510/'
##self.cl.ccesin_files = [
## 'ccesin2015_aanderaaoxy_20151013.nc',
## 'ccesin2015_adcp1825_20151013.nc',
## 'ccesin2015_adcp1827_20151013.nc',
## 'ccesin2015_adcp1828_20151013.nc',
## 'ccesin2015_ecotriplet_20151013.nc',
## 'ccesin2015_sbe16_20151013.nc',
## ]
##self.cl.ccesin_base = 'http://dods.mbari.org/opendap/data/ssdsdata/deployments/ccesin20160115/201601/'
##self.cl.ccesin_files = [
## ##'ccesin20160115_aanderaaoxy_20160115.nc',
## 'ccesin20160115_adcp1825_20160115.nc',
## 'ccesin20160115_adcp1827_20160115.nc',
## 'ccesin20160115_adcp1828_20160115.nc',
## ##'ccesin20160115_ecotriplet_20160115.nc',
## ##'ccesin20160115_sbe16_20160115.nc',
## ]
##self.cl.ccesin_parms = [
## 'u_component_uncorrected', 'v_component_uncorrected',
## 'echo_intensity_beam1',
## #'echo_intensity_beam2', 'echo_intensity_beam3', 'echo_intensity_beam4',
## #'std_head', 'std_pitch', 'std_roll', 'xdcr_temperature',
## ##'Pressure', 'Salinity', 'Temperature',
## ##'AirSaturation', 'Oxygen',
## ##'Chlor', 'NTU1', 'NTU2',
## ]
# Execute the load for trajectory representation
self.cl.process_command_line()
def load_ccemoorings(self, stride=20, start_mooring=1, end_mooring=5):
for mooring in range(start_mooring, end_mooring + 1):
if hasattr(self.cl, f'ccems{mooring:d}_base'):
try:
getattr(self.cl, f'load_ccems{mooring:d}')(stride=stride)
except NoValidData as e:
self.cl.logger.warn(str(e))
def load_ccemoorings_ev(self, low_res_stride=20, high_res_stride=1,
start_mooring=1, end_mooring=5):
# DRY: for all moorings load all lo res and hi res data that have a .._base attribute
for mooring in range(start_mooring, end_mooring + 1):
if not hasattr(self.cl, f'ccems{mooring:d}_base_ev'):
self.cl.logger.warning(f'Skipping mooring ms{mooring:d}, no ccems{mooring:d}_base_ev attribute')
continue
setattr(self.cl, f'ccems{mooring:d}_base', eval(f'self.cl.ccems{mooring:d}_base_ev'))
setattr(self.cl, f'ccems{mooring:d}_files', eval(f'self.cl.ccems{mooring:d}_files_ev'))
setattr(self.cl, f'ccems{mooring:d}_parms', eval(f'self.cl.ccems{mooring:d}_parms_ev'))
if hasattr(self.cl, f'ccems{mooring:d}_base'):
for event in self.lores_event_times:
setattr(self.cl, f'ccems{mooring:d}_start_datetime', event.start)
setattr(self.cl, f'ccems{mooring:d}_end_datetime', event.end)
try:
getattr(self.cl, f'load_ccems{mooring:d}')(stride=low_res_stride)
except NoValidData as e:
self.cl.logger.warn(str(e))
for event in self.hires_event_times:
setattr(self.cl, f'ccems{mooring:d}_start_datetime', event.start)
setattr(self.cl, f'ccems{mooring:d}_end_datetime', event.end)
try:
getattr(self.cl, f'load_ccems{mooring:d}')(stride=high_res_stride)
except NoValidData as e:
self.cl.logger.warn(str(e))
def load_ccesin_ev(self, low_res_stride=300, high_res_stride=1):
# Assign standard attributes with the data we want loaded just for the events
setattr(self.cl, 'ccesin_base', self.cl.ccesin_base_ev)
setattr(self.cl, 'ccesin_files', self.cl.ccesin_files_ev)
setattr(self.cl, 'ccesin_parms', self.cl.ccesin_parms_ev)
setattr(self.cl, 'ccesin_nominaldepth', self.cl.ccesin_nominaldepth_ev)
# SIN: start and end times Low-res with stride for 10 minute intervals
for event in lores_event_times:
setattr(self.cl, 'ccesin_start_datetime', event.start)
setattr(self.cl, 'ccesin_end_datetime', event.end)
try:
getattr(self.cl, 'loadCCESIN')(stride=low_res_stride)
except NoValidData as e:
self.cl.logger.warn(str(e))
# SIN: start and end times High-res with stride for 2 seconds intervals
for event in hires_event_times:
setattr(self.cl, 'ccesin_start_datetime', event.start)
setattr(self.cl, 'ccesin_end_datetime', event.end)
if hasattr(self.cl, 'ccesin_files_ev_hires') and hasattr(self.cl, 'ccesin_parms_ev_hires'):
setattr(self.cl, 'ccesin_files', self.cl.ccesin_files_ev_hires)
setattr(self.cl, 'ccesin_parms', self.cl.ccesin_parms_ev_hires)
try:
getattr(self.cl, 'loadCCESIN')(stride=high_res_stride)
except NoValidData as e:
self.cl.logger.warn(str(e))
if __name__ == '__main__':
campaign = CCE_2015_Campaign()
if campaign.cl.args.test:
campaign.load_ccemoorings(stride=100, start_mooring=1, end_mooring=5)
campaign.load_ccemoorings_ev(low_res_stride=10, start_mooring=1, end_mooring=5)
campaign.cl.loadCCESIN(stride=1000) # Normal base class loader for entire time series
campaign.load_ccesin_ev(low_res_stride=1000, high_res_stride=100)
campaign.cl.bed_depths = [np.round(d, 1) for d in campaign.cl.get_start_bed_depths()]
campaign.cl.loadBEDS(stride=100, featureType='trajectory')
elif campaign.cl.args.optimal_stride:
campaign.load_ccemoorings(stride=10)
campaign.load_ccemoorings_ev(low_res_stride=10, high_res_stride=2)
campaign.cl.loadCCESIN(stride=1000)
campaign.load_ccesin_ev(low_res_stride=300, high_res_stride=2)
campaign.cl.bed_depths = [np.round(d, 1) for d in campaign.cl.get_start_bed_depths()]
campaign.cl.loadBEDS(stride=1, featureType='trajectory')
else:
campaign.cl.stride = campaign.cl.args.stride
campaign.load_ccemoorings()
campaign.load_ccemoorings_ev()
campaign.cl.loadCCESIN(stride=300)
campaign.load_ccesin_ev()
campaign.cl.bed_depths = [np.round(d, 1) for d in campaign.cl.get_start_bed_depths()]
campaign.cl.loadBEDS(featureType='trajectory')
# Add any X3D Terrain information specified in the constructor to the database - must be done after a load is executed
campaign.cl.addTerrainResources()
campaign.cl.logger.info("All Done.")
|
danellecline/stoqs
|
stoqs/loaders/CCE/loadCCE_2015.py
|
Python
|
gpl-3.0
| 29,595
|
[
"NetCDF"
] |
89b6136d6760b013bc8e7af6e89558a750195bcd922fb42759b18decdb6160b8
|
# TODO: to be transformed into a real test
########################################################################
# File: ReplicateAndRegisterTests.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/05/13 18:38:55
########################################################################
""" :mod: FullChainTest
===================
.. module: FullChainTests
:synopsis: full chain integration test for DMS operation handlers
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
unittest for replicateAndRegister operation handler
"""
import random
import os
import sys
import DIRAC
DIRAC.initialize() # Initialize configuration
from DIRAC import gLogger
from DIRAC.Core.Utilities.Adler import fileAdler
from DIRAC.Core.Utilities.File import makeGuid
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getGroupsForUser, getDNForUsername
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
########################################################################
class FullChainTest(object):
"""
.. class:: FullChainTest
creates and puts to the ReqDB full chain tests for RMS and DMS operations
* RemoveFile
* PutAndRegister
* ReplicateAndRegister
* RemoveReplica
* RemoveFile
"""
def buildRequest(self, owner, group, sourceSE, targetSE1, targetSE2):
files = self.files(owner, group)
putAndRegister = Operation()
putAndRegister.Type = "PutAndRegister"
putAndRegister.TargetSE = sourceSE
for fname, lfn, size, checksum, guid in files:
putFile = File()
putFile.LFN = lfn
putFile.PFN = fname
putFile.Checksum = checksum
putFile.ChecksumType = "adler32"
putFile.Size = size
putFile.GUID = guid
putAndRegister.addFile(putFile)
replicateAndRegister = Operation()
replicateAndRegister.Type = "ReplicateAndRegister"
replicateAndRegister.TargetSE = "%s,%s" % (targetSE1, targetSE2)
for fname, lfn, size, checksum, guid in files:
repFile = File()
repFile.LFN = lfn
repFile.Size = size
repFile.Checksum = checksum
repFile.ChecksumType = "adler32"
replicateAndRegister.addFile(repFile)
removeReplica = Operation()
removeReplica.Type = "RemoveReplica"
removeReplica.TargetSE = sourceSE
for fname, lfn, size, checksum, guid in files:
removeReplica.addFile(File({"LFN": lfn}))
removeFile = Operation()
removeFile.Type = "RemoveFile"
for fname, lfn, size, checksum, guid in files:
removeFile.addFile(File({"LFN": lfn}))
removeFileInit = Operation()
removeFileInit.Type = "RemoveFile"
for fname, lfn, size, checksum, guid in files:
removeFileInit.addFile(File({"LFN": lfn}))
req = Request()
req.addOperation(removeFileInit)
req.addOperation(putAndRegister)
req.addOperation(replicateAndRegister)
req.addOperation(removeReplica)
req.addOperation(removeFile)
return req
def files(self, userName, userGroup):
"""get list of files in user domain"""
files = []
for i in range(10):
fname = "/tmp/testUserFile-%s" % i
if userGroup == "dteam_user":
lfn = "/lhcb/user/%s/%s/%s" % (userName[0], userName, fname.split("/")[-1])
else:
lfn = "/lhcb/certification/test/rmsdms/%s" % fname.split("/")[-1]
fh = open(fname, "w+")
for i in range(100):
fh.write(str(random.randint(0, i)))
fh.close()
size = os.stat(fname).st_size
checksum = fileAdler(fname)
guid = makeGuid(fname)
files.append((fname, lfn, size, checksum, guid))
return files
def putRequest(self, userName, userDN, userGroup, sourceSE, targetSE1, targetSE2):
"""test case for user"""
req = self.buildRequest(userName, userGroup, sourceSE, targetSE1, targetSE2)
req.RequestName = "test%s-%s" % (userName, userGroup)
req.OwnerDN = userDN
req.OwnerGroup = userGroup
gLogger.always("putRequest: request '%s'" % req.RequestName)
for op in req:
gLogger.always("putRequest: => %s %s %s" % (op.Order, op.Type, op.TargetSE))
for f in op:
gLogger.always("putRequest: ===> file %s" % f.LFN)
reqClient = ReqClient()
delete = reqClient.deleteRequest(req.RequestName)
if not delete["OK"]:
gLogger.error("putRequest: %s" % delete["Message"])
return delete
put = reqClient.putRequest(req)
if not put["OK"]:
gLogger.error("putRequest: %s" % put["Message"])
return put
# # test execution
if __name__ == "__main__":
if len(sys.argv) != 5:
gLogger.error("Usage:\n python %s userGroup SourceSE TargetSE1 TargetSE2\n")
sys.exit(-1)
userGroup = sys.argv[1]
sourceSE = sys.argv[2]
targetSE1 = sys.argv[3]
targetSE2 = sys.argv[4]
gLogger.always("will use '%s' group" % userGroup)
admin = DiracAdmin()
userName = admin._getCurrentUser()
if not userName["OK"]:
gLogger.error(userName["Message"])
sys.exit(-1)
userName = userName["Value"]
gLogger.always("current user is '%s'" % userName)
userGroups = getGroupsForUser(userName)
if not userGroups["OK"]:
gLogger.error(userGroups["Message"])
sys.exit(-1)
userGroups = userGroups["Value"]
if userGroup not in userGroups:
gLogger.error("'%s' is not a member of the '%s' group" % (userName, userGroup))
sys.exit(-1)
userDN = getDNForUsername(userName)
if not userDN["OK"]:
gLogger.error(userDN["Message"])
sys.exit(-1)
userDN = userDN["Value"][0]
gLogger.always("userDN is %s" % userDN)
fct = FullChainTest()
put = fct.putRequest(userName, userDN, userGroup, sourceSE, targetSE1, targetSE2)
|
DIRACGrid/DIRAC
|
tests/Integration/RequestManagementSystem/FIXME_IntegrationFCT.py
|
Python
|
gpl-3.0
| 6,376
|
[
"DIRAC"
] |
6f983845d1d70dfe044a6bbaf7314b64bd36dec013b4eb65a8c53af0d53ae25c
|
'''
Usage:
This programme is to creating the netcdf file corresponding to the 24, 48...120 hr forcast and observed rainfall data of rainfall data.
Written by: Dileepkumar R
JRF- IIT DELHI
Date: 23.06.2011;
'''
import numpy.ma
import ctgfunction
import data_access
import cdms2
import cdutil
from regrid2 import Regridder
gobj = data_access.Grib_Access(dataPath = '/home/dileep/Desktop/NCMRWF/NCMRWF/Monsoon_2010',modelName = 'NCMRWF2010')
obs = data_access.Rainfall_Access(xmlPath = '/home/dileep/rainfall_regrided/rainfall_regrided.xml', xmlVar = 'pobs')
time_list_grib=('2010-6-1','2010-9-30')
time_fcst=[24, 48, 72, 96, 120]
for i in xrange(len(time_fcst)):
rain,fcst_grib = gobj.getRainfallDataPartners(date = time_list_grib, hour = time_fcst[i], level = 'all',orginData = 1,datePriority = 'o',rainObject = obs, lat=(0,40),lon=(60,100))
fcst_grib.id='fcst_grib_%d' %(time_fcst[i])
filename_2='/home/dileep/NCMR_fcst_obs_nc/ncmr_fcst_%d.nc' %(time_fcst[i])
filename_1='/home/dileep/NCMR_fcst_obs_nc/ncmr_obs.nc'
if i==0:
rain.id='rain_obs'
f=cdms2.open(filename_1, 'w')
f.write(rain)
f.close()
g=cdms2.open(filename_2, 'w')
g.write(fcst_grib)
g.close()
|
arulalant/mmDiagnosis
|
diagnosis1/extra/pygrib_code/rainfall_regrid_generating_nc.py
|
Python
|
gpl-3.0
| 1,210
|
[
"NetCDF"
] |
58ecd3b32bd1cb25c3f29c48dd2a7b13befa2980f14e5f8d99035886669ac5af
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Extract neural activity from a fluorescence trace using a constrained deconvolution approach
Created on Tue Sep 1 16:11:25 2015
@author: Eftychios A. Pnevmatikakis, based on an implementation by T. Machado, Andrea Giovannucci & Ben Deverett
"""
from builtins import range
from past.utils import old_div
import numpy as np
import scipy.signal
import scipy.linalg
from warnings import warn
from math import log, sqrt, exp
import sys
#%%
def constrained_foopsi(fluor, bl=None, c1=None, g=None, sn=None, p=None, method_deconvolution='oasis', bas_nonneg=True,
noise_range=[.25, .5], noise_method='logmexp', lags=5, fudge_factor=1.,
verbosity=False, solvers=None, optimize_g=0, s_min=None, **kwargs):
""" Infer the most likely discretized spike train underlying a fluorescence trace
It relies on a noise constrained deconvolution approach
Args:
fluor: np.ndarray
One dimensional array containing the fluorescence intensities with
one entry per time-bin.
bl: [optional] float
Fluorescence baseline value. If no value is given, then bl is estimated
from the data.
c1: [optional] float
value of calcium at time 0
g: [optional] list,float
Parameters of the AR process that models the fluorescence impulse response.
Estimated from the data if no value is given
sn: float, optional
Standard deviation of the noise distribution. If no value is given,
then sn is estimated from the data.
p: int
order of the autoregression model
method_deconvolution: [optional] string
solution method for basis projection pursuit 'cvx' or 'cvxpy' or 'oasis'
bas_nonneg: bool
baseline strictly non-negative
noise_range: list of two elms
frequency range for averaging noise PSD
noise_method: string
method of averaging noise PSD
lags: int
number of lags for estimating time constants
fudge_factor: float
fudge factor for reducing time constant bias
verbosity: bool
display optimization details
solvers: list string
primary and secondary (if problem unfeasible for approx solution) solvers
to be used with cvxpy, default is ['ECOS','SCS']
optimize_g : [optional] int, only applies to method 'oasis'
Number of large, isolated events to consider for optimizing g.
If optimize_g=0 (default) the provided or estimated g is not further optimized.
s_min : float, optional, only applies to method 'oasis'
Minimal non-zero activity within each bin (minimal 'spike size').
For negative values the threshold is abs(s_min) * sn * sqrt(1-g)
If None (default) the standard L1 penalty is used
If 0 the threshold is determined automatically such that RSS <= sn^2 T
Returns:
c: np.ndarray float
The inferred denoised fluorescence signal at each time-bin.
bl, c1, g, sn : As explained above
sp: ndarray of float
Discretized deconvolved neural activity (spikes)
lam: float
Regularization parameter
Raises:
Exception("You must specify the value of p")
Exception('OASIS is currently only implemented for p=1 and p=2')
Exception('Undefined Deconvolution Method')
References:
* Pnevmatikakis et al. 2016. Neuron, in press, http://dx.doi.org/10.1016/j.neuron.2015.11.037
* Machado et al. 2015. Cell 162(2):338-350
\image: docs/img/deconvolution.png
\image: docs/img/evaluationcomponent.png
"""
if p is None:
raise Exception("You must specify the value of p")
if g is None or sn is None:
# Estimate noise standard deviation and AR coefficients if they are not present
g, sn = estimate_parameters(fluor, p=p, sn=sn, g=g, range_ff=noise_range,
method=noise_method, lags=lags, fudge_factor=fudge_factor)
lam = None
if p == 0:
c1 = 0
g = np.array(0)
bl = 0
c = np.maximum(fluor, 0)
sp = c.copy()
else: # choose a source extraction method
if method_deconvolution == 'cvx':
c, bl, c1, g, sn, sp = cvxopt_foopsi(
fluor, b=bl, c1=c1, g=g, sn=sn, p=p, bas_nonneg=bas_nonneg, verbosity=verbosity)
elif method_deconvolution == 'cvxpy':
c, bl, c1, g, sn, sp = cvxpy_foopsi(
fluor, g, sn, b=bl, c1=c1, bas_nonneg=bas_nonneg, solvers=solvers)
elif method_deconvolution == 'oasis':
from caiman.source_extraction.cnmf.oasis import constrained_oasisAR1
penalty = 1 if s_min is None else 0
if p == 1:
if bl is None:
# Infer the most likely discretized spike train underlying an AR(1) fluorescence trace
# Solves the noise constrained sparse non-negative deconvolution problem
# min |s|_1 subject to |c-y|^2 = sn^2 T and s_t = c_t-g c_{t-1} >= 0
c, sp, bl, g, lam = constrained_oasisAR1(
fluor.astype(np.float32), g[0], sn, optimize_b=True, b_nonneg=bas_nonneg,
optimize_g=optimize_g, penalty=penalty, s_min=0 if s_min is None else s_min)
else:
c, sp, _, g, lam = constrained_oasisAR1(
(fluor - bl).astype(np.float32), g[0], sn, optimize_b=False, penalty=penalty,
s_min=0 if s_min is None else s_min)
c1 = c[0]
# remove intial calcium to align with the other foopsi methods
# it is added back in function constrained_foopsi_parallel of temporal.py
c -= c1 * g**np.arange(len(fluor))
elif p == 2:
if bl is None:
c, sp, bl, g, lam = constrained_oasisAR2(
fluor.astype(np.float32), g, sn, optimize_b=True, b_nonneg=bas_nonneg,
optimize_g=optimize_g, penalty=penalty, s_min=s_min)
else:
c, sp, _, g, lam = constrained_oasisAR2(
(fluor - bl).astype(np.float32), g, sn, optimize_b=False,
penalty=penalty, s_min=s_min)
c1 = c[0]
d = (g[0] + sqrt(g[0] * g[0] + 4 * g[1])) / 2
c -= c1 * d**np.arange(len(fluor))
else:
raise Exception(
'OASIS is currently only implemented for p=1 and p=2')
g = np.ravel(g)
else:
raise Exception('Undefined Deconvolution Method')
return c, bl, c1, g, sn, sp, lam
def G_inv_mat(x, mode, NT, gs, gd_vec, bas_flag=True, c1_flag=True):
"""
Fast computation of G^{-1}*x and G^{-T}*x
"""
from scipy.signal import lfilter
if mode == 1:
b = lfilter(np.array([1]), np.concatenate([np.array([1.]), -gs]), x[:NT]
) + bas_flag * x[NT - 1 + bas_flag] + c1_flag * gd_vec * x[-1]
elif mode == 2:
b = np.hstack((np.flipud(lfilter(np.array([1]), np.concatenate([np.array(
[1.]), -gs]), np.flipud(x))), np.ones(bas_flag) * np.sum(x), np.ones(c1_flag) * np.sum(gd_vec * x)))
return b
def cvxopt_foopsi(fluor, b, c1, g, sn, p, bas_nonneg, verbosity):
"""Solve the deconvolution problem using cvxopt and picos packages
"""
try:
from cvxopt import matrix, spmatrix, spdiag, solvers
import picos
except ImportError:
raise ImportError(
'Constrained Foopsi requires cvxopt and picos packages.')
T = len(fluor)
# construct deconvolution matrix (sp = G*c)
G = spmatrix(1., list(range(T)), list(range(T)), (T, T))
for i in range(p):
G = G + spmatrix(-g[i], np.arange(i + 1, T),
np.arange(T - i - 1), (T, T))
gr = np.roots(np.concatenate([np.array([1]), -g.flatten()]))
gd_vec = np.max(gr)**np.arange(T) # decay vector for initial fluorescence
gen_vec = G * matrix(np.ones(fluor.size))
# Initialize variables in our problem
prob = picos.Problem()
# Define variables
calcium_fit = prob.add_variable('calcium_fit', fluor.size)
cnt = 0
if b is None:
flag_b = True
cnt += 1
b = prob.add_variable('b', 1)
if bas_nonneg:
b_lb = 0
else:
b_lb = np.min(fluor)
prob.add_constraint(b >= b_lb)
else:
flag_b = False
if c1 is None:
flag_c1 = True
cnt += 1
c1 = prob.add_variable('c1', 1)
prob.add_constraint(c1 >= 0)
else:
flag_c1 = False
# Add constraints
prob.add_constraint(G * calcium_fit >= 0)
res = abs(matrix(fluor.astype(float)) - calcium_fit - b *
matrix(np.ones(fluor.size)) - matrix(gd_vec) * c1)
prob.add_constraint(res < sn * np.sqrt(fluor.size))
prob.set_objective('min', calcium_fit.T * gen_vec)
# solve problem
try:
prob.solve(solver='mosek', verbose=verbosity)
except ImportError:
warn('MOSEK is not installed. Spike inference may be VERY slow!')
prob.solver_selection()
prob.solve(verbose=verbosity)
# if problem in infeasible due to low noise value then project onto the
# cone of linear constraints with cvxopt
if prob.status == 'prim_infeas_cer' or prob.status == 'dual_infeas_cer' or prob.status == 'primal infeasible':
warn('Original problem infeasible. Adjusting noise level and re-solving')
# setup quadratic problem with cvxopt
solvers.options['show_progress'] = verbosity
ind_rows = list(range(T))
ind_cols = list(range(T))
vals = np.ones(T)
if flag_b:
ind_rows = ind_rows + list(range(T))
ind_cols = ind_cols + [T] * T
vals = np.concatenate((vals, np.ones(T)))
if flag_c1:
ind_rows = ind_rows + list(range(T))
ind_cols = ind_cols + [T + cnt - 1] * T
vals = np.concatenate((vals, np.squeeze(gd_vec)))
P = spmatrix(vals, ind_rows, ind_cols, (T, T + cnt))
H = P.T * P
Py = P.T * matrix(fluor.astype(float))
sol = solvers.qp(
H, -Py, spdiag([-G, -spmatrix(1., list(range(cnt)), list(range(cnt)))]), matrix(0., (T + cnt, 1)))
xx = sol['x']
c = np.array(xx[:T])
sp = np.array(G * matrix(c))
c = np.squeeze(c)
if flag_b:
b = np.array(xx[T + 1]) + b_lb
if flag_c1:
c1 = np.array(xx[-1])
sn = old_div(np.linalg.norm(fluor - c - c1 * gd_vec - b), np.sqrt(T))
else: # readout picos solution
c = np.squeeze(calcium_fit.value)
sp = np.squeeze(np.asarray(G * calcium_fit.value))
if flag_b:
b = np.squeeze(b.value)
if flag_c1:
c1 = np.squeeze(c1.value)
return c, b, c1, g, sn, sp
def cvxpy_foopsi(fluor, g, sn, b=None, c1=None, bas_nonneg=True, solvers=None):
"""Solves the deconvolution problem using the cvxpy package and the ECOS/SCS library.
Args:
fluor: ndarray
fluorescence trace
g: list of doubles
parameters of the autoregressive model, cardinality equivalent to p
sn: double
estimated noise level
b: double
baseline level. If None it is estimated.
c1: double
initial value of calcium. If None it is estimated.
bas_nonneg: boolean
should the baseline be estimated
solvers: tuple of two strings
primary and secondary solvers to be used. Can be choosen between ECOS, SCS, CVXOPT
Returns:
c: estimated calcium trace
b: estimated baseline
c1: esimtated initial calcium value
g: esitmated parameters of the autoregressive model
sn: estimated noise level
sp: estimated spikes
Raises:
ImportError 'cvxpy solver requires installation of cvxpy. Not working in windows at the moment.'
ValueError 'Problem solved suboptimally or unfeasible'
"""
# todo: check the result and gen_vector vars
try:
import cvxpy as cvx
except ImportError: # XXX Is the below still true?
raise ImportError(
'cvxpy solver requires installation of cvxpy. Not working in windows at the moment.')
if solvers is None:
solvers = ['ECOS', 'SCS']
T = fluor.size
# construct deconvolution matrix (sp = G*c)
G = scipy.sparse.dia_matrix((np.ones((1, T)), [0]), (T, T))
for i, gi in enumerate(g):
G = G + \
scipy.sparse.dia_matrix((-gi * np.ones((1, T)), [-1 - i]), (T, T))
gr = np.roots(np.concatenate([np.array([1]), -g.flatten()]))
gd_vec = np.max(gr)**np.arange(T) # decay vector for initial fluorescence
gen_vec = G.dot(scipy.sparse.coo_matrix(np.ones((T, 1))))
c = cvx.Variable(T) # calcium at each time step
constraints = []
cnt = 0
if b is None:
flag_b = True
cnt += 1
b = cvx.Variable(1) # baseline value
if bas_nonneg:
b_lb = 0
else:
b_lb = np.min(fluor)
constraints.append(b >= b_lb)
else:
flag_b = False
if c1 is None:
flag_c1 = True
cnt += 1
c1 = cvx.Variable(1) # baseline value
constraints.append(c1 >= 0)
else:
flag_c1 = False
thrNoise = sn * np.sqrt(fluor.size)
try:
# minimize number of spikes
objective = cvx.Minimize(cvx.norm(G * c, 1))
constraints.append(G * c >= 0)
constraints.append(
cvx.norm(-c + fluor - b - gd_vec * c1, 2) <= thrNoise) # constraints
prob = cvx.Problem(objective, constraints)
result = prob.solve(solver=solvers[0])
if not (prob.status == 'optimal' or prob.status == 'optimal_inaccurate'):
raise ValueError('Problem solved suboptimally or unfeasible')
print(('PROBLEM STATUS:' + prob.status))
sys.stdout.flush()
except (ValueError, cvx.SolverError): # if solvers fail to solve the problem
lam = old_div(sn, 500)
constraints = constraints[:-1]
objective = cvx.Minimize(cvx.norm(-c + fluor - b - gd_vec *
c1, 2) + lam * cvx.norm(G * c, 1))
prob = cvx.Problem(objective, constraints)
try: # in case scs was not installed properly
try:
print('TRYING AGAIN ECOS')
sys.stdout.flush()
result = prob.solve(solver=solvers[0])
except:
print((solvers[0] + ' DID NOT WORK TRYING ' + solvers[1]))
result = prob.solve(solver=solvers[1])
except:
sys.stderr.write(
'***** SCS solver failed, try installing and compiling SCS for much faster performance. '
'Otherwise set the solvers in tempora_params to ["ECOS","CVXOPT"]')
sys.stderr.flush()
raise
if not (prob.status == 'optimal' or prob.status == 'optimal_inaccurate'):
print(('PROBLEM STATUS:' + prob.status))
sp = fluor
c = fluor
b = 0
c1 = 0
return c, b, c1, g, sn, sp
sp = np.squeeze(np.asarray(G * c.value))
c = np.squeeze(np.asarray(c.value))
if flag_b:
b = np.squeeze(b.value)
if flag_c1:
c1 = np.squeeze(c1.value)
return c, b, c1, g, sn, sp
def _nnls(KK, Ky, s=None, mask=None, tol=1e-9, max_iter=None):
"""
Solve non-negative least squares problem
``argmin_s || Ks - y ||_2`` for ``s>=0``
Args:
KK : array, shape (n, n)
Dot-product of design matrix K transposed and K, K'K
Ky : array, shape (n,)
Dot-product of design matrix K transposed and target vector y, K'y
s : None or array, shape (n,), optional, default None
Initialization of deconvolved neural activity.
mask : array of bool, shape (n,), optional, default (True,)*n
Mask to restrict potential spike times considered.
tol : float, optional, default 1e-9
Tolerance parameter.
max_iter : None or int, optional, default None
Maximum number of iterations before termination.
If None (default), it is set to len(KK).
Returns:
s : array, shape (n,)
Discretized deconvolved neural activity (spikes)
References:
Lawson C and Hanson RJ, SIAM 1987
Bro R and DeJong S, J Chemometrics 1997
"""
if mask is None:
mask = np.ones(len(KK), dtype=bool)
else:
KK = KK[mask][:, mask]
Ky = Ky[mask]
if s is None:
s = np.zeros(len(KK))
l = Ky.copy()
P = np.zeros(len(KK), dtype=bool)
else:
s = s[mask]
P = s > 0
l = Ky - KK[:, P].dot(s[P])
i = 0
if max_iter is None:
max_iter = len(KK)
for i in range(max_iter): # max(l) is checked at the end, should do at least one iteration
w = np.argmax(l)
P[w] = True
try: # likely unnnecessary try-except-clause for robustness sake
#mu = np.linalg.inv(KK[P][:, P]).dot(Ky[P])
mu = np.linalg.solve(KK[P][:, P], Ky[P])
except:
#mu = np.linalg.inv(KK[P][:, P] + tol * np.eye(P.sum())).dot(Ky[P])
mu = np.linalg.solve(KK[P][:, P] + tol * np.eye(P.sum()), Ky[P])
print(r'added $\epsilon$I to avoid singularity')
while len(mu > 0) and min(mu) < 0:
a = min(s[P][mu < 0] / (s[P][mu < 0] - mu[mu < 0]))
s[P] += a * (mu - s[P])
P[s <= tol] = False
try:
#mu = np.linalg.inv(KK[P][:, P]).dot(Ky[P])
mu = np.linalg.solve(KK[P][:, P], Ky[P])
except:
#mu = np.linalg.inv(KK[P][:, P] + tol *
# np.eye(P.sum())).dot(Ky[P])
mu = np.linalg.solve(KK[P][:, P] + tol * np.eye(P.sum()), Ky[P])
print(r'added $\epsilon$I to avoid singularity')
s[P] = mu.copy()
l = Ky - KK[:, P].dot(s[P])
if max(l) < tol:
break
tmp = np.zeros(len(mask))
tmp[mask] = s
return tmp
def onnls(y, g, lam=0, shift=100, window=None, mask=None, tol=1e-9, max_iter=None):
""" Infer the most likely discretized spike train underlying an AR(2) fluorescence trace
Solves the sparse non-negative deconvolution problem
``argmin_s 1/2|Ks-y|^2 + lam |s|_1`` for ``s>=0``
Args:
y : array of float, shape (T,)
One dimensional array containing the fluorescence intensities with
one entry per time-bin.
g : array, shape (p,)
if p in (1,2):
Parameter(s) of the AR(p) process that models the fluorescence impulse response.
else:
Kernel that models the fluorescence impulse response.
lam : float, optional, default 0
Sparsity penalty parameter lambda.
shift : int, optional, default 100
Number of frames by which to shift window from on run of NNLS to the next.
window : int, optional, default None (200 or larger dependend on g)
Window size.
mask : array of bool, shape (n,), optional, default (True,)*n
Mask to restrict potential spike times considered.
tol : float, optional, default 1e-9
Tolerance parameter.
max_iter : None or int, optional, default None
Maximum number of iterations before termination.
If None (default), it is set to window size.
Returns:
c : array of float, shape (T,)
The inferred denoised fluorescence signal at each time-bin.
s : array of float, shape (T,)
Discretized deconvolved neural activity (spikes).
References:
Friedrich J and Paninski L, NIPS 2016
Bro R and DeJong S, J Chemometrics 1997
"""
T = len(y)
if mask is None:
mask = np.ones(T, dtype=bool)
if window is None:
w = max(200, len(g) if len(g) > 2 else
int(-5 / log(g[0] if len(g) == 1 else
(g[0] + sqrt(g[0] * g[0] + 4 * g[1])) / 2)))
else:
w = window
w = min(T, w)
shift = min(w, shift)
K = np.zeros((w, w))
if len(g) == 1: # kernel for AR(1)
_y = y - lam * (1 - g[0])
_y[-1] = y[-1] - lam
h = np.exp(log(g[0]) * np.arange(w))
for i in range(w):
K[i:, i] = h[:w - i]
elif len(g) == 2: # kernel for AR(2)
_y = y - lam * (1 - g[0] - g[1])
_y[-2] = y[-2] - lam * (1 - g[0])
_y[-1] = y[-1] - lam
d = (g[0] + sqrt(g[0] * g[0] + 4 * g[1])) / 2
r = (g[0] - sqrt(g[0] * g[0] + 4 * g[1])) / 2
if d == r:
h = np.exp(log(d) * np.arange(1, w + 1)) * np.arange(1, w + 1)
else:
h = (np.exp(log(d) * np.arange(1, w + 1)) -
np.exp(log(r) * np.arange(1, w + 1))) / (d - r)
for i in range(w):
K[i:, i] = h[:w - i]
else: # arbitrary kernel
h = g
for i in range(w):
K[i:, i] = h[:w - i]
a = np.linalg.inv(K).sum(0)
_y = y - lam * a[0]
_y[-w:] = y[-w:] - lam * a
s = np.zeros(T)
KK = K.T.dot(K)
for i in range(0, max(1, T - w), shift):
s[i:i + w] = _nnls(KK, K.T.dot(_y[i:i + w]), s[i:i + w], mask=mask[i:i + w],
tol=tol, max_iter=max_iter)[:w]
# subtract contribution of spikes already committed to
_y[i:i + w] -= K[:, :shift].dot(s[i:i + shift])
s[i + shift:] = _nnls(KK[-(T - i - shift):, -(T - i - shift):],
K[:T - i - shift, :T - i -
shift].T.dot(_y[i + shift:]),
s[i + shift:], mask=mask[i + shift:])
c = np.zeros_like(s)
for t in np.where(s > tol)[0]:
c[t:t + w] += s[t] * h[:min(w, T - t)]
return c, s
def constrained_oasisAR2(y, g, sn, optimize_b=True, b_nonneg=True, optimize_g=0, decimate=5,
shift=100, window=None, tol=1e-9, max_iter=1, penalty=1, s_min=0):
""" Infer the most likely discretized spike train underlying an AR(2) fluorescence trace
Solves the noise constrained sparse non-negative deconvolution problem
min (s)_1 subject to (c-y)^2 = sn^2 T and s_t = c_t-g1 c_{t-1}-g2 c_{t-2} >= 0
Args:
y : array of float
One dimensional array containing the fluorescence intensities (with baseline
already subtracted) with one entry per time-bin.
g : (float, float)
Parameters of the AR(2) process that models the fluorescence impulse response.
sn : float
Standard deviation of the noise distribution.
optimize_b : bool, optional, default True
Optimize baseline if True else it is set to 0, see y.
b_nonneg: bool, optional, default True
Enforce strictly non-negative baseline if True.
optimize_g : int, optional, default 0
Number of large, isolated events to consider for optimizing g.
No optimization if optimize_g=0.
decimate : int, optional, default 5
Decimation factor for estimating hyper-parameters faster on decimated data.
shift : int, optional, default 100
Number of frames by which to shift window from on run of NNLS to the next.
window : int, optional, default None (200 or larger dependend on g)
Window size.
tol : float, optional, default 1e-9
Tolerance parameter.
max_iter : int, optional, default 1
Maximal number of iterations.
penalty : int, optional, default 1
Sparsity penalty. 1: min (s)_1 0: min (s)_0
s_min : float, optional, default 0
Minimal non-zero activity within each bin (minimal 'spike size').
For negative values the threshold is |s_min| * sn * sqrt(1-decay_constant)
If 0 the threshold is determined automatically such that RSS <= sn^2 T
Returns:
c : array of float
The inferred denoised fluorescence signal at each time-bin.
s : array of float
Discretized deconvolved neural activity (spikes).
b : float
Fluorescence baseline value.
(g1, g2) : tuple of float
Parameters of the AR(2) process that models the fluorescence impulse response.
lam : float
Sparsity penalty parameter lambda of dual problem.
References:
Friedrich J and Paninski L, NIPS 2016
Friedrich J, Zhou P, and Paninski L, arXiv 2016
"""
T = len(y)
d = (g[0] + sqrt(g[0] * g[0] + 4 * g[1])) / 2
r = (g[0] - sqrt(g[0] * g[0] + 4 * g[1])) / 2
if window is None:
window = int(min(T, max(200, -5 / log(d))))
if not optimize_g:
g11 = (np.exp(log(d) * np.arange(1, T + 1)) * np.arange(1, T + 1)) if d == r else \
(np.exp(log(d) * np.arange(1, T + 1)) -
np.exp(log(r) * np.arange(1, T + 1))) / (d - r)
g12 = np.append(0, g[1] * g11[:-1])
g11g11 = np.cumsum(g11 * g11)
g11g12 = np.cumsum(g11 * g12)
Sg11 = np.cumsum(g11)
f_lam = 1 - g[0] - g[1]
elif decimate == 0: # need to run AR1 anyways for estimating AR coeffs
decimate = 1
thresh = sn * sn * T
# get initial estimate of b and lam on downsampled data using AR1 model
if decimate > 0:
from caiman.source_extraction.cnmf.oasis import oasisAR1, constrained_oasisAR1
_, s, b, aa, lam = constrained_oasisAR1(
y[:len(y) // decimate * decimate].reshape(-1, decimate).mean(1),
d**decimate, sn / sqrt(decimate),
optimize_b=optimize_b, b_nonneg=b_nonneg, optimize_g=optimize_g)
if optimize_g:
from scipy.optimize import minimize
d = aa**(1. / decimate)
if decimate > 1:
s = oasisAR1(y - b, d, lam=lam * (1 - aa) / (1 - d))[1]
r = estimate_time_constant(s, 1, fudge_factor=.98)[0]
g[0] = d + r
g[1] = -d * r
g11 = (np.exp(log(d) * np.arange(1, T + 1)) -
np.exp(log(r) * np.arange(1, T + 1))) / (d - r)
g12 = np.append(0, g[1] * g11[:-1])
g11g11 = np.cumsum(g11 * g11)
g11g12 = np.cumsum(g11 * g12)
Sg11 = np.cumsum(g11)
f_lam = 1 - g[0] - g[1]
elif decimate > 1:
s = oasisAR1(y - b, d, lam=lam * (1 - aa) / (1 - d))[1]
lam *= (1 - d**decimate) / f_lam
# this window size seems necessary and sufficient
possible_spikes = [x + np.arange(-2, 3)
for x in np.where(s > s.max() / 10.)[0]]
ff = np.array(possible_spikes, dtype=np.int).ravel()
ff = np.unique(ff[(ff >= 0) * (ff < T)])
mask = np.zeros(T, dtype=bool)
mask[ff] = True
else:
b = np.percentile(y, 15) if optimize_b else 0
lam = 2 * sn * np.linalg.norm(g11)
mask = None
if b_nonneg:
b = max(b, 0)
# run ONNLS
c, s = onnls(y - b, g, lam=lam, mask=mask,
shift=shift, window=window, tol=tol)
if not optimize_b: # don't optimize b, just the dual variable lambda
for _ in range(max_iter - 1):
res = y - c
RSS = res.dot(res)
if np.abs(RSS - thresh) < 1e-4 * thresh:
break
# calc shift dlam, here attributed to sparsity penalty
tmp = np.empty(T)
ls = np.append(np.where(s > 1e-6)[0], T)
l = ls[0]
tmp[:l] = (1 + d) / (1 + d**l) * \
np.exp(log(d) * np.arange(l)) # first pool
for i, f in enumerate(ls[:-1]): # all other pools
l = ls[i + 1] - f - 1
# if and elif correct last 2 time points for |s|_1 instead |c|_1
if i == len(ls) - 2: # last pool
tmp[f] = (1. / f_lam if l == 0 else
(Sg11[l] + g[1] / f_lam * g11[l - 1]
+ (g[0] + g[1]) / f_lam * g11[l]
- g11g12[l] * tmp[f - 1]) / g11g11[l])
# secondlast pool if last one has length 1
elif i == len(ls) - 3 and ls[-2] == T - 1:
tmp[f] = (Sg11[l] + g[1] / f_lam * g11[l]
- g11g12[l] * tmp[f - 1]) / g11g11[l]
else: # all other pools
tmp[f] = (Sg11[l] - g11g12[l] * tmp[f - 1]) / g11g11[l]
l += 1
tmp[f + 1:f + l] = g11[1:l] * tmp[f] + g12[1:l] * tmp[f - 1]
aa = tmp.dot(tmp)
bb = res.dot(tmp)
cc = RSS - thresh
try:
db = (-bb + sqrt(bb * bb - aa * cc)) / aa
except:
db = -bb / aa
# perform shift
b += db
c, s = onnls(y - b, g, lam=lam, mask=mask,
shift=shift, window=window, tol=tol)
db = np.mean(y - c) - b
b += db
lam -= db / f_lam
else: # optimize b
db = max(np.mean(y - c), 0 if b_nonneg else -np.inf) - b
b += db
lam -= db / (1 - g[0] - g[1])
g_converged = False
for _ in range(max_iter - 1):
res = y - c - b
RSS = res.dot(res)
if np.abs(RSS - thresh) < 1e-4 * thresh:
break
# calc shift db, here attributed to baseline
tmp = np.empty(T)
ls = np.append(np.where(s > 1e-6)[0], T)
l = ls[0]
tmp[:l] = (1 + d) / (1 + d**l) * \
np.exp(log(d) * np.arange(l)) # first pool
for i, f in enumerate(ls[:-1]): # all other pools
l = ls[i + 1] - f
tmp[f] = (Sg11[l - 1] - g11g12[l - 1]
* tmp[f - 1]) / g11g11[l - 1]
tmp[f + 1:f + l] = g11[1:l] * tmp[f] + g12[1:l] * tmp[f - 1]
tmp -= tmp.mean()
aa = tmp.dot(tmp)
bb = res.dot(tmp)
cc = RSS - thresh
try:
db = (-bb + sqrt(bb * bb - aa * cc)) / aa
except:
db = -bb / aa
# perform shift
if b_nonneg:
db = max(db, -b)
b += db
c, s = onnls(y - b, g, lam=lam, mask=mask,
shift=shift, window=window, tol=tol)
# update b and lam
db = max(np.mean(y - c), 0 if b_nonneg else -np.inf) - b
b += db
lam -= db / f_lam
# update g and b
if optimize_g and (not g_converged):
def getRSS(y, opt):
b, ld, lr = opt
if ld < lr:
return 1e3 * thresh
d, r = exp(ld), exp(lr)
g1, g2 = d + r, -d * r
tmp = b + onnls(y - b, [g1, g2], lam,
mask=(s > 1e-2 * s.max()))[0] - y
return tmp.dot(tmp)
result = minimize(lambda x: getRSS(y, x), (b, log(d), log(r)),
bounds=((0 if b_nonneg else None, None),
(None, -1e-4), (None, -1e-3)), method='L-BFGS-B',
options={'gtol': 1e-04, 'maxiter': 10, 'ftol': 1e-05})
if abs(result['x'][1] - log(d)) < 1e-3:
g_converged = True
b, ld, lr = result['x']
d, r = exp(ld), exp(lr)
g = (d + r, -d * r)
c, s = onnls(y - b, g, lam=lam, mask=mask,
shift=shift, window=window, tol=tol)
# update b and lam
db = max(np.mean(y - c), 0 if b_nonneg else -np.inf) - b
b += db
lam -= db
if penalty == 0: # get (locally optimal) L0 solution
def c4smin(y, s, s_min):
ls = np.append(np.where(s > s_min)[0], T)
tmp = np.zeros_like(s)
l = ls[0] # first pool
tmp[:l] = max(0, np.exp(log(d) * np.arange(l)).dot(y[:l]) * (1 - d * d)
/ (1 - d**(2 * l))) * np.exp(log(d) * np.arange(l))
for i, f in enumerate(ls[:-1]): # all other pools
l = ls[i + 1] - f
tmp[f] = (g11[:l].dot(y[f:f + l]) - g11g12[l - 1]
* tmp[f - 1]) / g11g11[l - 1]
tmp[f + 1:f + l] = g11[1:l] * tmp[f] + g12[1:l] * tmp[f - 1]
return tmp
if s_min == 0:
spikesizes = np.sort(s[s > 1e-6])
l = 0
u = len(spikesizes) - 1
i = u // 2
if u >= 0:
while True:
s_min = spikesizes[i]
tmp = c4smin(y - b, s, s_min)
res = y - b - tmp
RSS = res.dot(res)
if RSS < thresh:
res0 = tmp
if i == u:
break
l = i
i = (l + u + 1) // 2
else:
if i == u or i == 0:
break
u = i
i = (l + u) // 2
if i > 0:
c = res0
s = np.append([0, 0], c[2:] - g[0] * c[1:-1] - g[1] * c[:-2])
else:
if s_min < 0:
s_min = -s_min * sn * np.sqrt(1 - d)
for factor in (.7, .8, .9, 1):
c = c4smin(y - b, s, factor * s_min)
s = np.append([0, 0], c[2:] - g[0] * c[1:-1] - g[1] * c[:-2])
s[s < np.finfo(np.float32).eps] = 0
return c, s, b, g, lam
def estimate_parameters(fluor, p=2, sn=None, g=None, range_ff=[0.25, 0.5],
method='logmexp', lags=5, fudge_factor=1.):
"""
Estimate noise standard deviation and AR coefficients if they are not present
Args:
p: positive integer
order of AR system
sn: float
noise standard deviation, estimated if not provided.
lags: positive integer
number of additional lags where he autocovariance is computed
range_ff : (1,2) array, nonnegative, max value <= 0.5
range of frequency (x Nyquist rate) over which the spectrum is averaged
method: string
method of averaging: Mean, median, exponentiated mean of logvalues (default)
fudge_factor: float (0< fudge_factor <= 1)
shrinkage factor to reduce bias
"""
if sn is None:
sn = GetSn(fluor, range_ff, method)
if g is None:
if p == 0:
g = np.array(0)
else:
g = estimate_time_constant(fluor, p, sn, lags, fudge_factor)
return g, sn
def estimate_time_constant(fluor, p=2, sn=None, lags=5, fudge_factor=1.):
"""
Estimate AR model parameters through the autocovariance function
Args:
fluor : nparray
One dimensional array containing the fluorescence intensities with
one entry per time-bin.
p : positive integer
order of AR system
sn : float
noise standard deviation, estimated if not provided.
lags : positive integer
number of additional lags where he autocovariance is computed
fudge_factor : float (0< fudge_factor <= 1)
shrinkage factor to reduce bias
Returns:
g : estimated coefficients of the AR process
"""
if sn is None:
sn = GetSn(fluor)
lags += p
xc = axcov(fluor, lags)
xc = xc[:, np.newaxis]
A = scipy.linalg.toeplitz(xc[lags + np.arange(lags)],
xc[lags + np.arange(p)]) - sn**2 * np.eye(lags, p)
g = np.linalg.lstsq(A, xc[lags + 1:], rcond=None)[0]
gr = np.roots(np.concatenate([np.array([1]), -g.flatten()]))
gr = old_div((gr + gr.conjugate()), 2.)
np.random.seed(45) # We want some variability below, but it doesn't have to be random at
# runtime. A static seed captures our intent, while still not disrupting
# the desired identical results from runs.
gr[gr > 1] = 0.95 + np.random.normal(0, 0.01, np.sum(gr > 1))
gr[gr < 0] = 0.15 + np.random.normal(0, 0.01, np.sum(gr < 0))
g = np.poly(fudge_factor * gr)
g = -g[1:]
return g.flatten()
def GetSn(fluor, range_ff=[0.25, 0.5], method='logmexp'):
"""
Estimate noise power through the power spectral density over the range of large frequencies
Args:
fluor : nparray
One dimensional array containing the fluorescence intensities with
one entry per time-bin.
range_ff : (1,2) array, nonnegative, max value <= 0.5
range of frequency (x Nyquist rate) over which the spectrum is averaged
method : string
method of averaging: Mean, median, exponentiated mean of logvalues (default)
Returns:
sn : noise standard deviation
"""
ff, Pxx = scipy.signal.welch(fluor)
ind1 = ff > range_ff[0]
ind2 = ff < range_ff[1]
ind = np.logical_and(ind1, ind2)
Pxx_ind = Pxx[ind]
sn = {
'mean': lambda Pxx_ind: np.sqrt(np.mean(old_div(Pxx_ind, 2))),
'median': lambda Pxx_ind: np.sqrt(np.median(old_div(Pxx_ind, 2))),
'logmexp': lambda Pxx_ind: np.sqrt(np.exp(np.mean(np.log(old_div(Pxx_ind, 2)))))
}[method](Pxx_ind)
return sn
def axcov(data, maxlag=5):
"""
Compute the autocovariance of data at lag = -maxlag:0:maxlag
Args:
data : array
Array containing fluorescence data
maxlag : int
Number of lags to use in autocovariance calculation
Returns:
axcov : array
Autocovariances computed from -maxlag:0:maxlag
"""
data = data - np.mean(data)
T = len(data)
bins = np.size(data)
xcov = np.fft.fft(data, np.power(2, nextpow2(2 * bins - 1)))
xcov = np.fft.ifft(np.square(np.abs(xcov)))
xcov = np.concatenate([xcov[np.arange(xcov.size - maxlag, xcov.size)],
xcov[np.arange(0, maxlag + 1)]])
return np.real(old_div(xcov, T))
def nextpow2(value):
"""
Find exponent such that 2^exponent is equal to or greater than abs(value).
Args:
value : int
Returns:
exponent : int
"""
exponent = 0
avalue = np.abs(value)
while avalue > np.power(2, exponent):
exponent += 1
return exponent
|
simonsfoundation/CaImAn
|
caiman/source_extraction/cnmf/deconvolution.py
|
Python
|
gpl-2.0
| 39,506
|
[
"NEURON"
] |
62b35dc5dff5b7f4aa999f67fbc31c9032c23f815325eefc460e04bf18e2d781
|
#import sys, os
#sys.path.insert(0, '/media/nmsutton/Ext3Drive/General/NEST/NEST/lib64/python3.4/site-packages')
#print (os.path.dirname(sys.executable))
#print (os.environ)
import pylab
import nest
print ("test")
neuron = nest.Create("iaf_neuron")
neuron2 = nest.Create("iaf_neuron")
neuron3 = nest.Create("iaf_neuron")
multimeter = nest.Create("multimeter")
nest.SetStatus(multimeter, {"withtime":True, "record_from":["V_m"]})
multimeter2 = nest.Create("multimeter")
nest.SetStatus(multimeter2, {"withtime":True, "record_from":["V_m"]})
nest.SetStatus(neuron, {"I_e": 396.0})
nest.SetStatus(neuron2, {"I_e": 376.0})
#nest.SetStatus(neuron2, {"V_m": 376.0})
nest.Connect(neuron, neuron2, syn_spec = {"weight":-150.0})
#nest.SetStatus(neuron, {"V_m": 376.0})
#print (nest.GetStatus(neuron, "V_m"))
nest.Connect(multimeter, neuron)
nest.Connect(multimeter2, neuron2)
spikedetector = nest.Create("spike_detector",
params={"withgid": True, "withtime": True})
nest.Connect(neuron, spikedetector)
nest.Simulate(1000.0)
dmm = nest.GetStatus(multimeter)[0]
Vms = dmm["events"]["V_m"]
ts = dmm["events"]["times"]
pylab.figure(1)
pylab.plot(ts, Vms)
dmb = nest.GetStatus(multimeter2)[0]
Vmsb = dmb["events"]["V_m"]
tsb = dmb["events"]["times"]
pylab.figure(2)
pylab.plot(tsb, Vmsb)
'''dSD = nest.GetStatus(spikedetector,keys='events')[0]
evs = dSD["senders"]
ts = dSD["times"]
pylab.figure(2)
pylab.plot(ts, evs, ".")'''
pylab.show()
|
nmsutton/MemoryModule
|
python_version/examples/nestTestSynapses3.py
|
Python
|
mit
| 1,457
|
[
"NEURON"
] |
4ee823b883dee44ca832d6497e9d000ad765cb41158653a04799939c3c7c05b3
|
# try using distribute or setuptools or distutils.
try:
import distribute_setup
distribute_setup.use_setuptools()
except ImportError:
pass
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
import re
# parse version from package/module without importing or evaluating the code
with open('scripts_bin/__init__.py') as fh:
for line in fh:
m = re.search(r"^__version__ = '(?P<version>[^']+)'$", line)
if m:
version = m.group('version')
break
if sys.version_info <= (2, 5):
sys.stderr.write("ERROR: santi_script requires Python Version 2.7 " +
"or above...exiting.\n")
sys.exit(1)
setup(
name="santi_script",
version=version,
author="Peter Thorpe",
author_email="please_dont_email_me@hutton.ac.uk",
description=''.join(["This script runs "
"metabarcoding pipeline "
"to identify Phytophthora species "]),
license="MIT",
keywords="genome bioinformatics sequence sequencing metabarcoding",
platforms="Linux; MacOS X",
url="http://widdowquinn.github.io/THAPBI", # project home
download_url="https://github.com/widdowquinn/THAPBI/releases",
scripts=['Identify_species.py'],
packages=[' '],
install_requires=['biopython' 'matoplotlib'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
)
|
widdowquinn/THAPBI
|
Phy_ITS_ident_pipline_DRAFT/setup.py
|
Python
|
mit
| 1,969
|
[
"Biopython"
] |
7f536b04b6cc91c97babd87ed4e20981ad61ae2eb83ca6f50167e33ab8461f20
|
# Copyright 2006-2007 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
#Nice link:
# http://www.ebi.ac.uk/help/formats_frame.html
"""Sequence input/output designed to look similar to the bioperl design.
The Bio.SeqIO module is also documented by a whole chapter in the Biopython
tutorial, and by this wiki on the webage, http://biopython.org/wiki/SeqIO
Input
=====
The main function is Bio.SeqIO.parse(...) which takes an input file handle,
and format string. This returns an iterator giving SeqRecord objects.
from Bio import SeqIO
handle = open("example.fasta", "rU")
for record in SeqIO.parse(handle, "fasta") :
print record
handle.close()
Note that the parse() function will all invoke the relevant parser for
the format with its default settings. You may want more control, in which case
you need to create a format specific sequence iterator directly.
For non-interlaced files (e.g. Fasta, GenBank, EMBL) with multiple records
using a sequence iterator can save you a lot of memory (RAM). There is less
benefit for interlaced file formats (e.g. most multiple alignment file formats).
However, an iterator only lets you access the records one by one.
If you want random access to the records by number, turn this into a list:
from Bio import SeqIO
handle = open("example.fasta", "rU")
records = list(SeqIO.parse(handle, "fasta"))
handle.close()
print records[0]
If you want random access to the records by a key such as the record id, turn
the iterator into a dictionary:
from Bio import SeqIO
handle = open("example.fasta", "rU")
record_dict = SeqIO.to_dict(SeqIO.parse(handle, "fasta"))
handle.close()
print record["gi:12345678"]
If you expect your file to contain one-and-only-one record, then we provide
the following 'helper' function which will return a single SeqRecord, or raise
an exception if there are no records or more than one record:
from Bio import SeqIO
handle = open("example.fasta", "rU")
record = SeqIO.read(handle, "fasta")
handle.close()
print record
This style is useful when you expect a single record only (and would consider
multiple records an error). For example, when dealing with GenBank files for
bacterial genomes or chromosomes, there is normally only a single record.
Alternatively, use this with a handle when download a single record from the
internet.
However, if you just want the first record from a file containing multiple
record, use the iterator's next() method:
from Bio import SeqIO
handle = open("example.fasta", "rU")
record = SeqIO.parse(handle, "fasta").next()
handle.close()
print record
The above code will work as long as the file contains at least one record.
Note that if there is more than one record, the remaining records will be
silently ignored.
Input - Alignments
==================
Currently an alignment class cannot be created from SeqRecord objects.
Instead, use the to_alignment(...) function, like so:
from Bio import SeqIO
handle = open("example.aln", "rU")
alignment = SeqIO.to_alignment(SeqIO.parse(handle, "clustal"))
handle.close()
This function may be removed in future once alignments can be created
directly from SeqRecord objects.
Output
======
Use the function Bio.SeqIO.write(...), which takes a complete set of SeqRecord
objects (either as a list, or an iterator), an output file handle and of course
the file format.
from Bio import SeqIO
records = ...
handle = open("example.faa", "w")
SeqIO.write(records, handle, "fasta")
handle.close()
In general, you are expected to call this function once (with all your records)
and then close the file handle.
Output - Advanced
=================
The effect of calling write() multiple times on a single file will vary
depending on the file format, and is best avoided unless you have a strong reason
to do so.
Trying this for certain alignment formats (e.g. phylip, clustal, stockholm) would
have the effect of concatenating several multiple sequence alignments together.
Such files are created by the PHYLIP suite of programs for bootstrap analysis.
For sequential files formats (e.g. fasta, genbank) each "record block" holds a
single sequence. For these files it would probably be safe to call write()
multiple times.
File Formats
============
When specifying formats, use lowercase strings.
Old Files
=========
The modules Bio.SeqIO.FASTA and Bio.SeqIO.generic are depreciated and may be
removed.
"""
#TODO
# - define policy on reading aligned sequences with gaps in
# (e.g. - and . characters) including how the alphabet interacts
#
# - Can we build the to_alignment(...) functionality
# into the generic Alignment class instead?
#
# - How best to handle unique/non unique record.id when writing.
# For most file formats reading such files is fine; The stockholm
# parser would fail.
#
# - MSF multiple alignment format, aka GCG, aka PileUp format (*.msf)
# http://www.bioperl.org/wiki/MSF_multiple_alignment_format
#
# - Writing NEXUS multiple alignment format (*.nxs)
# http://www.bioperl.org/wiki/NEXUS_multiple_alignment_format
# Can be simply offload to Bio.Nexus for this?
"""
FAO BioPython Developers
========================
The way I envision this SeqIO system working as that for any sequence file format
we have an iterator that returns SeqRecord objects.
This also applies to interlaced fileformats (like clustal) where the file cannot
be read record by record. You should still return an iterator!
These file format specific sequence iterators may be implemented as:
* Classes which take a handle for __init__ and provide the __iter__ method
* Functions that take a handle, and return an iterator object
* Generator functions that take a handle, and yeild SeqRecord objects
It is then trivial to turn this iterator into a list of SeqRecord objects, an in
memory dictionary, or a multiple sequence alignment object.
For building the dictionary by default the id propery of each SeqRecord is used
as the key. You should always populate the id property, and it should be unique.
For some file formats the accession number is a good choice.
When adding a new file format, please use the same lower case format name as
BioPerl, or if they have not defined one, try the names used by EMBOSS.
"""
import os
#from cStringIO import StringIO
from StringIO import StringIO
from Bio.Alphabet import generic_alphabet, generic_protein
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Align.Generic import Alignment
import FastaIO
import InsdcIO #EMBL and GenBank
import StockholmIO
import ClustalIO
import PhylipIO
import NexusIO
import SwissIO
#Convention for format names is "mainname-subtype" in lower case.
#Please use the same names as BioPerl where possible.
#
#Note that this simple system copes with defining
#multiple possible iterators for a given format/extension
#with the -subtype suffix
_FormatToIterator ={"fasta" : FastaIO.FastaIterator,
"genbank" : InsdcIO.GenBankIterator,
"genbank-cds" : InsdcIO.GenBankCdsFeatureIterator,
"embl" : InsdcIO.EmblIterator,
"embl-cds" : InsdcIO.EmblCdsFeatureIterator,
"clustal" : ClustalIO.ClustalIterator,
"phylip" : PhylipIO.PhylipIterator,
"nexus" : NexusIO.NexusIterator,
"stockholm" : StockholmIO.StockholmIterator,
"swiss" : SwissIO.SwissIterator,
}
_FormatToWriter ={"fasta" : FastaIO.FastaWriter,
"phylip" : PhylipIO.PhylipWriter,
"stockholm" : StockholmIO.StockholmWriter,
"clustal" : ClustalIO.ClustalWriter,
}
def write(sequences, handle, format) :
"""Write complete set of sequences to a file
sequences - A list (or iterator) of SeqRecord objects
handle - File handle object to write to
format - What format to use.
You should close the handle after calling this function.
There is no return value.
"""
#Try and give helpful error messages:
if isinstance(handle, basestring) :
raise TypeError("Need a file handle, not a string (i.e. not a filename)")
if not isinstance(format, basestring) :
raise TypeError("Need a string for the file format (lower case)")
if not format :
raise ValueError("Format required (lower case string)")
if format <> format.lower() :
raise ValueError("Format string '%s' should be lower case" % format)
if isinstance(sequences,SeqRecord):
raise ValueError("Use a SeqRecord list/iterator, not just a single SeqRecord")
#Map the file format to a writer class
try :
writer_class = _FormatToWriter[format]
except KeyError :
raise ValueError("Unknown format '%s'" % format)
writer_class(handle).write_file(sequences)
#Don't close the file, as that would prevent things like
#creating concatenated phylip files for bootstrapping.
#handle.close()
return
def parse(handle, format) :
"""Turns a sequence file into a iterator returning SeqRecords.
handle - handle to the file.
format - string describing the file format.
If you have the file name in a string 'filename', use:
from Bio import SeqIO
my_iterator = SeqIO.parse(open(filename,"rU"), format)
If you have a string 'data' containing the file contents, use:
from Bio import SeqIO
from StringIO import StringIO
my_iterator = SeqIO.parse(StringIO(data), format)
Note that file will be parsed with default settings,
which may result in a generic alphabet or other non-ideal
settings. For more control, you must use the format specific
iterator directly...
Use the Bio.SeqIO.read(handle, format) function when you expect
a single record only.
"""
#Try and give helpful error messages:
if isinstance(handle, basestring) :
raise TypeError("Need a file handle, not a string (i.e. not a filename)")
if not isinstance(format, basestring) :
raise TypeError("Need a string for the file format (lower case)")
if not format :
raise ValueError("Format required (lower case string)")
if format <> format.lower() :
raise ValueError("Format string '%s' should be lower case" % format)
#Map the file format to a sequence iterator:
try :
iterator_generator = _FormatToIterator[format]
except KeyError :
raise ValueError("Unknown format '%s'" % format)
#Its up to the caller to close this handle - they opened it.
return iterator_generator(handle)
def read(handle, format) :
"""Turns a sequence file into a single SeqRecord.
handle - handle to the file.
format - string describing the file format.
If the handle contains no records, or more than one record,
an exception is raised. For example, using a GenBank file
containing one record:
from Bio import SeqIO
record = SeqIO.read(open("example.gbk"), "genbank")
If however you want the first record from a file containing,
multiple records this function would raise an exception.
Instead use:
from Bio import SeqIO
record = SeqIO.parse(open("example.gbk"), "genbank").next()
Use the Bio.SeqIO.parse(handle, format) function if you want
to read multiple records from the handle.
"""
iterator = parse(handle, format)
try :
first = iterator.next()
except StopIteration :
first = None
if first is None :
raise ValueError, "No records found in handle"
try :
second = iterator.next()
except StopIteration :
second = None
if second is not None :
raise ValueError, "More than one record found in handle"
return first
def to_dict(sequences, key_function=None) :
"""Turns a sequence iterator or list into a dictionary.
sequences - An iterator that returns SeqRecord objects,
or simply a list of SeqRecord objects.
key_function - Optional function which when given a SeqRecord
returns a unique string for the dictionary key.
e.g. key_function = lambda rec : rec.name
or, key_function = lambda rec : rec.description.split()[0]
If key_function is ommitted then record.id is used, on the
assumption that the records objects returned are SeqRecords
with a unique id field.
If there are duplicate keys, an error is raised.
Example usage:
from Bio import SeqIO
filename = "example.fasta"
d = SeqIO.to_dict(SeqIO.parse(open(faa_filename, "rU")),
key_function = lambda rec : rec.description.split()[0])
print len(d)
print d.keys()[0:10]
key = d.keys()[0]
print d[key]
"""
if key_function is None :
key_function = lambda rec : rec.id
d = dict()
for record in sequences :
key = key_function(record)
if key in d :
raise ValueError("Duplicate key '%s'" % key)
d[key] = record
return d
def to_alignment(sequences, alphabet=generic_alphabet, strict=True) :
"""Returns a multiple sequence alignment.
sequences -An iterator that returns SeqRecord objects,
or simply a list of SeqRecord objects.
All the record sequences must be the same length.
alphabet - Optional alphabet. Stongly recommended.
strict - Optional, defaults to True. Should error checking
be done?
"""
#TODO - Move this functionality into the Alignment class instead?
alignment_length = None
alignment = Alignment(alphabet)
for record in sequences :
if strict :
if alignment_length is None :
alignment_length = len(record.seq)
elif alignment_length <> len(record.seq) :
raise ValueError("Sequences of different lengths")
if not isinstance(record.seq.alphabet, alphabet.__class__) :
raise ValueError("Incompatible sequence alphabet")
#ToDo, additional checks on the specified alignment...
#Should we look at the alphabet.contains() method?
#This is abusing the "private" records list,
#we should really have a method like add_sequence
#but which takes SeqRecord objects. See also Bug 1944
alignment._records.append(record)
return alignment
if __name__ == "__main__" :
#Run some tests...
from Bio.Alphabet import generic_nucleotide
from sets import Set
# Fasta file with unusual layout, from here:
# http://virgil.ruc.dk/kurser/Sekvens/Treedraw.htm
faa_example = \
""">V_Harveyi_PATH
mknwikvava aialsaatvq aatevkvgms gryfpftfvk qdklqgfevd mwdeigkrnd
ykieyvtanf sglfglletg ridtisnqit mtdarkakyl fadpyvvdga qitvrkgnds
iqgvedlagk tvavnlgsnf eqllrdydkd gkiniktydt giehdvalgr adafimdrls
alelikktgl plqlagepfe tiqnawpfvd nekgrklqae vnkalaemra dgtvekisvk
wfgaditk
>B_subtilis_YXEM
mkmkkwtvlv vaallavlsa cgngnssske ddnvlhvgat gqsypfayke ngkltgfdve
vmeavakkid mkldwkllef sglmgelqtg kldtisnqva vtderketyn ftkpyayagt
qivvkkdntd iksvddlkgk tvaavlgsnh aknleskdpd kkiniktyet qegtlkdvay
grvdayvnsr tvliaqikkt glplklagdp ivyeqvafpf akddahdklr kkvnkaldel
rkdgtlkkls ekyfneditv eqkh
>FLIY_ECOLI
mklahlgrqa lmgvmavalv agmsvksfad egllnkvker gtllvglegt yppfsfqgdd
gkltgfevef aqqlakhlgv easlkptkwd gmlasldskr idvvinqvti sderkkkydf
stpytisgiq alvkkgnegt iktaddlkgk kvgvglgtny eewlrqnvqg vdvrtydddp
tkyqdlrvgr idailvdrla aldlvkktnd tlavtgeafs rqesgvalrk gnedllkavn
daiaemqkdg tlqalsekwf gadvtk
>Deinococcus_radiodurans
mkksllslkl sgllvpsvla lslsacssps stlnqgtlki amegtyppft skneqgelvg
fdvdiakava qklnlkpefv ltewsgilag lqankydviv nqvgitperq nsigfsqpya
ysrpeiivak nntfnpqsla dlkgkrvgst lgsnyekqli dtgdikivty pgapeiladl
vagridaayn drlvvnyiin dqklpvrgag qigdaapvgi alkkgnsalk dqidkaltem
rsdgtfekis qkwfgqdvgq p
>B_subtilis_GlnH_homo_YCKK
mkkallalfm vvsiaalaac gagndnqskd nakdgdlwas ikkkgvltvg tegtyepfty
hdkdtdkltg ydveviteva krlglkvdfk etqwgsmfag lnskrfdvva nqvgktdred
kydfsdkytt sravvvtkkd nndikseadv kgktsaqslt snynklatna gakvegvegm
aqalqmiqqa rvdmtyndkl avlnylktsg nknvkiafet gepqstyftf rkgsgevvdq
vnkalkemke dgtlskiskk wfgedvsk
>YA80_HAEIN
mkkllfttal ltgaiafstf shageiadrv ektktllvgt egtyapftfh dksgkltgfd
vevirkvaek lglkvefket qwdamyagln akrfdvianq tnpsperlkk ysfttpynys
ggvivtkssd nsiksfedlk grksaqsats nwgkdakaag aqilvvdgla qslelikqgr
aeatindkla vldyfkqhpn sglkiaydrg dktptafafl qgedalitkf nqvlealrqd
gtlkqisiew fgyditq
>E_coli_GlnH
mksvlkvsla altlafavss haadkklvva tdtafvpfef kqgdkyvgfd vdlwaaiake
lkldyelkpm dfsgiipalq tknvdlalag ititderkka idfsdgyyks gllvmvkann
ndvksvkdld gkvvavksgt gsvdyakani ktkdlrqfpn idnaymelgt nradavlhdt
pnilyfikta gngqfkavgd sleaqqygia fpkgsdelrd kvngalktlr engtyneiyk
kwfgtepk
>HISJ_E_COLI
mkklvlslsl vlafssataa faaipqniri gtdptyapfe sknsqgelvg fdidlakelc
krintqctfv enpldalips lkakkidaim sslsitekrq qeiaftdkly aadsrlvvak
nsdiqptves lkgkrvgvlq gttqetfgne hwapkgieiv syqgqdniys dltagridaa
fqdevaaseg flkqpvgkdy kfggpsvkde klfgvgtgmg lrkednelre alnkafaemr
adgtyeklak kyfdfdvygg"""
# This alignment was created from the fasta example given above
aln_example = \
"""CLUSTAL X (1.83) multiple sequence alignment
V_Harveyi_PATH --MKNWIKVAVAAIA--LSAA------------------TVQAATEVKVG
B_subtilis_YXEM MKMKKWTVLVVAALLAVLSACG------------NGNSSSKEDDNVLHVG
B_subtilis_GlnH_homo_YCKK MKKALLALFMVVSIAALAACGAGNDNQSKDNAKDGDLWASIKKKGVLTVG
YA80_HAEIN MKKLLFTTALLTGAIAFSTF-----------SHAGEIADRVEKTKTLLVG
FLIY_ECOLI MKLAHLGRQALMGVMAVALVAG---MSVKSFADEG-LLNKVKERGTLLVG
E_coli_GlnH --MKSVLKVSLAALTLAFAVS------------------SHAADKKLVVA
Deinococcus_radiodurans -MKKSLLSLKLSGLLVPSVLALS--------LSACSSPSSTLNQGTLKIA
HISJ_E_COLI MKKLVLSLSLVLAFSSATAAF-------------------AAIPQNIRIG
: . : :.
V_Harveyi_PATH MSGRYFPFTFVKQ--DKLQGFEVDMWDEIGKRNDYKIEYVTANFSGLFGL
B_subtilis_YXEM ATGQSYPFAYKEN--GKLTGFDVEVMEAVAKKIDMKLDWKLLEFSGLMGE
B_subtilis_GlnH_homo_YCKK TEGTYEPFTYHDKDTDKLTGYDVEVITEVAKRLGLKVDFKETQWGSMFAG
YA80_HAEIN TEGTYAPFTFHDK-SGKLTGFDVEVIRKVAEKLGLKVEFKETQWDAMYAG
FLIY_ECOLI LEGTYPPFSFQGD-DGKLTGFEVEFAQQLAKHLGVEASLKPTKWDGMLAS
E_coli_GlnH TDTAFVPFEFKQG--DKYVGFDVDLWAAIAKELKLDYELKPMDFSGIIPA
Deinococcus_radiodurans MEGTYPPFTSKNE-QGELVGFDVDIAKAVAQKLNLKPEFVLTEWSGILAG
HISJ_E_COLI TDPTYAPFESKNS-QGELVGFDIDLAKELCKRINTQCTFVENPLDALIPS
** .: *::::. : :. . ..:
V_Harveyi_PATH LETGRIDTISNQITMTDARKAKYLFADPYVVDG-AQITVRKGNDSIQGVE
B_subtilis_YXEM LQTGKLDTISNQVAVTDERKETYNFTKPYAYAG-TQIVVKKDNTDIKSVD
B_subtilis_GlnH_homo_YCKK LNSKRFDVVANQVG-KTDREDKYDFSDKYTTSR-AVVVTKKDNNDIKSEA
YA80_HAEIN LNAKRFDVIANQTNPSPERLKKYSFTTPYNYSG-GVIVTKSSDNSIKSFE
FLIY_ECOLI LDSKRIDVVINQVTISDERKKKYDFSTPYTISGIQALVKKGNEGTIKTAD
E_coli_GlnH LQTKNVDLALAGITITDERKKAIDFSDGYYKSG-LLVMVKANNNDVKSVK
Deinococcus_radiodurans LQANKYDVIVNQVGITPERQNSIGFSQPYAYSRPEIIVAKNNTFNPQSLA
HISJ_E_COLI LKAKKIDAIMSSLSITEKRQQEIAFTDKLYAADSRLVVAKNSDIQP-TVE
*.: . * . * *: : : .
V_Harveyi_PATH DLAGKTVAVNLGSNFEQLLRDYDKDGKINIKTYDT--GIEHDVALGRADA
B_subtilis_YXEM DLKGKTVAAVLGSNHAKNLESKDPDKKINIKTYETQEGTLKDVAYGRVDA
B_subtilis_GlnH_homo_YCKK DVKGKTSAQSLTSNYNKLATN----AGAKVEGVEGMAQALQMIQQARVDM
YA80_HAEIN DLKGRKSAQSATSNWGKDAKA----AGAQILVVDGLAQSLELIKQGRAEA
FLIY_ECOLI DLKGKKVGVGLGTNYEEWLRQNV--QGVDVRTYDDDPTKYQDLRVGRIDA
E_coli_GlnH DLDGKVVAVKSGTGSVDYAKAN--IKTKDLRQFPNIDNAYMELGTNRADA
Deinococcus_radiodurans DLKGKRVGSTLGSNYEKQLIDTG---DIKIVTYPGAPEILADLVAGRIDA
HISJ_E_COLI SLKGKRVGVLQGTTQETFGNEHWAPKGIEIVSYQGQDNIYSDLTAGRIDA
.: *: . : .: : * :
V_Harveyi_PATH FIMDRLSALE-LIKKT-GLPLQLAGEPFETI-----QNAWPFVDNEKGRK
B_subtilis_YXEM YVNSRTVLIA-QIKKT-GLPLKLAGDPIVYE-----QVAFPFAKDDAHDK
B_subtilis_GlnH_homo_YCKK TYNDKLAVLN-YLKTSGNKNVKIAFETGEPQ-----STYFTFRKGS--GE
YA80_HAEIN TINDKLAVLD-YFKQHPNSGLKIAYDRGDKT-----PTAFAFLQGE--DA
FLIY_ECOLI ILVDRLAALD-LVKKT-NDTLAVTGEAFSRQ-----ESGVALRKGN--ED
E_coli_GlnH VLHDTPNILY-FIKTAGNGQFKAVGDSLEAQ-----QYGIAFPKGS--DE
Deinococcus_radiodurans AYNDRLVVNY-IINDQ-KLPVRGAGQIGDAA-----PVGIALKKGN--SA
HISJ_E_COLI AFQDEVAASEGFLKQPVGKDYKFGGPSVKDEKLFGVGTGMGLRKED--NE
. .: : . .
V_Harveyi_PATH LQAEVNKALAEMRADGTVEKISVKWFGADITK----
B_subtilis_YXEM LRKKVNKALDELRKDGTLKKLSEKYFNEDITVEQKH
B_subtilis_GlnH_homo_YCKK VVDQVNKALKEMKEDGTLSKISKKWFGEDVSK----
YA80_HAEIN LITKFNQVLEALRQDGTLKQISIEWFGYDITQ----
FLIY_ECOLI LLKAVNDAIAEMQKDGTLQALSEKWFGADVTK----
E_coli_GlnH LRDKVNGALKTLRENGTYNEIYKKWFGTEPK-----
Deinococcus_radiodurans LKDQIDKALTEMRSDGTFEKISQKWFGQDVGQP---
HISJ_E_COLI LREALNKAFAEMRADGTYEKLAKKYFDFDVYGG---
: .: .: :: :** . : ::*. :
"""
# This is the clustal example (above) but output in phylip format,
# with truncated names. Note there is an ambiguity here: two
# different sequences both called "B_subtilis", originally
# "B_subtilis_YXEM" and "B_subtilis_GlnH_homo_YCKK"
phy_example = \
""" 8 286
V_Harveyi_ --MKNWIKVA VAAIA--LSA A--------- ---------T VQAATEVKVG
B_subtilis MKMKKWTVLV VAALLAVLSA CG-------- ----NGNSSS KEDDNVLHVG
B_subtilis MKKALLALFM VVSIAALAAC GAGNDNQSKD NAKDGDLWAS IKKKGVLTVG
YA80_HAEIN MKKLLFTTAL LTGAIAFSTF ---------- -SHAGEIADR VEKTKTLLVG
FLIY_ECOLI MKLAHLGRQA LMGVMAVALV AG---MSVKS FADEG-LLNK VKERGTLLVG
E_coli_Gln --MKSVLKVS LAALTLAFAV S--------- ---------S HAADKKLVVA
Deinococcu -MKKSLLSLK LSGLLVPSVL ALS------- -LSACSSPSS TLNQGTLKIA
HISJ_E_COL MKKLVLSLSL VLAFSSATAA F--------- ---------- AAIPQNIRIG
MSGRYFPFTF VKQ--DKLQG FEVDMWDEIG KRNDYKIEYV TANFSGLFGL
ATGQSYPFAY KEN--GKLTG FDVEVMEAVA KKIDMKLDWK LLEFSGLMGE
TEGTYEPFTY HDKDTDKLTG YDVEVITEVA KRLGLKVDFK ETQWGSMFAG
TEGTYAPFTF HDK-SGKLTG FDVEVIRKVA EKLGLKVEFK ETQWDAMYAG
LEGTYPPFSF QGD-DGKLTG FEVEFAQQLA KHLGVEASLK PTKWDGMLAS
TDTAFVPFEF KQG--DKYVG FDVDLWAAIA KELKLDYELK PMDFSGIIPA
MEGTYPPFTS KNE-QGELVG FDVDIAKAVA QKLNLKPEFV LTEWSGILAG
TDPTYAPFES KNS-QGELVG FDIDLAKELC KRINTQCTFV ENPLDALIPS
LETGRIDTIS NQITMTDARK AKYLFADPYV VDG-AQITVR KGNDSIQGVE
LQTGKLDTIS NQVAVTDERK ETYNFTKPYA YAG-TQIVVK KDNTDIKSVD
LNSKRFDVVA NQVG-KTDRE DKYDFSDKYT TSR-AVVVTK KDNNDIKSEA
LNAKRFDVIA NQTNPSPERL KKYSFTTPYN YSG-GVIVTK SSDNSIKSFE
LDSKRIDVVI NQVTISDERK KKYDFSTPYT ISGIQALVKK GNEGTIKTAD
LQTKNVDLAL AGITITDERK KAIDFSDGYY KSG-LLVMVK ANNNDVKSVK
LQANKYDVIV NQVGITPERQ NSIGFSQPYA YSRPEIIVAK NNTFNPQSLA
LKAKKIDAIM SSLSITEKRQ QEIAFTDKLY AADSRLVVAK NSDIQP-TVE
DLAGKTVAVN LGSNFEQLLR DYDKDGKINI KTYDT--GIE HDVALGRADA
DLKGKTVAAV LGSNHAKNLE SKDPDKKINI KTYETQEGTL KDVAYGRVDA
DVKGKTSAQS LTSNYNKLAT N----AGAKV EGVEGMAQAL QMIQQARVDM
DLKGRKSAQS ATSNWGKDAK A----AGAQI LVVDGLAQSL ELIKQGRAEA
DLKGKKVGVG LGTNYEEWLR QNV--QGVDV RTYDDDPTKY QDLRVGRIDA
DLDGKVVAVK SGTGSVDYAK AN--IKTKDL RQFPNIDNAY MELGTNRADA
DLKGKRVGST LGSNYEKQLI DTG---DIKI VTYPGAPEIL ADLVAGRIDA
SLKGKRVGVL QGTTQETFGN EHWAPKGIEI VSYQGQDNIY SDLTAGRIDA
FIMDRLSALE -LIKKT-GLP LQLAGEPFET I-----QNAW PFVDNEKGRK
YVNSRTVLIA -QIKKT-GLP LKLAGDPIVY E-----QVAF PFAKDDAHDK
TYNDKLAVLN -YLKTSGNKN VKIAFETGEP Q-----STYF TFRKGS--GE
TINDKLAVLD -YFKQHPNSG LKIAYDRGDK T-----PTAF AFLQGE--DA
ILVDRLAALD -LVKKT-NDT LAVTGEAFSR Q-----ESGV ALRKGN--ED
VLHDTPNILY -FIKTAGNGQ FKAVGDSLEA Q-----QYGI AFPKGS--DE
AYNDRLVVNY -IINDQ-KLP VRGAGQIGDA A-----PVGI ALKKGN--SA
AFQDEVAASE GFLKQPVGKD YKFGGPSVKD EKLFGVGTGM GLRKED--NE
LQAEVNKALA EMRADGTVEK ISVKWFGADI TK----
LRKKVNKALD ELRKDGTLKK LSEKYFNEDI TVEQKH
VVDQVNKALK EMKEDGTLSK ISKKWFGEDV SK----
LITKFNQVLE ALRQDGTLKQ ISIEWFGYDI TQ----
LLKAVNDAIA EMQKDGTLQA LSEKWFGADV TK----
LRDKVNGALK TLRENGTYNE IYKKWFGTEP K-----
LKDQIDKALT EMRSDGTFEK ISQKWFGQDV GQP---
LREALNKAFA EMRADGTYEK LAKKYFDFDV YGG---
"""
# This is the clustal example (above) but output in phylip format,
nxs_example = \
"""#NEXUS
BEGIN DATA;
dimensions ntax=8 nchar=286;
format missing=?
symbols="ABCDEFGHIKLMNPQRSTUVWXYZ"
interleave datatype=PROTEIN gap= -;
matrix
V_Harveyi_PATH --MKNWIKVAVAAIA--LSAA------------------TVQAATEVKVG
B_subtilis_YXEM MKMKKWTVLVVAALLAVLSACG------------NGNSSSKEDDNVLHVG
B_subtilis_GlnH_homo_YCKK MKKALLALFMVVSIAALAACGAGNDNQSKDNAKDGDLWASIKKKGVLTVG
YA80_HAEIN MKKLLFTTALLTGAIAFSTF-----------SHAGEIADRVEKTKTLLVG
FLIY_ECOLI MKLAHLGRQALMGVMAVALVAG---MSVKSFADEG-LLNKVKERGTLLVG
E_coli_GlnH --MKSVLKVSLAALTLAFAVS------------------SHAADKKLVVA
Deinococcus_radiodurans -MKKSLLSLKLSGLLVPSVLALS--------LSACSSPSSTLNQGTLKIA
HISJ_E_COLI MKKLVLSLSLVLAFSSATAAF-------------------AAIPQNIRIG
V_Harveyi_PATH MSGRYFPFTFVKQ--DKLQGFEVDMWDEIGKRNDYKIEYVTANFSGLFGL
B_subtilis_YXEM ATGQSYPFAYKEN--GKLTGFDVEVMEAVAKKIDMKLDWKLLEFSGLMGE
B_subtilis_GlnH_homo_YCKK TEGTYEPFTYHDKDTDKLTGYDVEVITEVAKRLGLKVDFKETQWGSMFAG
YA80_HAEIN TEGTYAPFTFHDK-SGKLTGFDVEVIRKVAEKLGLKVEFKETQWDAMYAG
FLIY_ECOLI LEGTYPPFSFQGD-DGKLTGFEVEFAQQLAKHLGVEASLKPTKWDGMLAS
E_coli_GlnH TDTAFVPFEFKQG--DKYVGFDVDLWAAIAKELKLDYELKPMDFSGIIPA
Deinococcus_radiodurans MEGTYPPFTSKNE-QGELVGFDVDIAKAVAQKLNLKPEFVLTEWSGILAG
HISJ_E_COLI TDPTYAPFESKNS-QGELVGFDIDLAKELCKRINTQCTFVENPLDALIPS
V_Harveyi_PATH LETGRIDTISNQITMTDARKAKYLFADPYVVDG-AQITVRKGNDSIQGVE
B_subtilis_YXEM LQTGKLDTISNQVAVTDERKETYNFTKPYAYAG-TQIVVKKDNTDIKSVD
B_subtilis_GlnH_homo_YCKK LNSKRFDVVANQVG-KTDREDKYDFSDKYTTSR-AVVVTKKDNNDIKSEA
YA80_HAEIN LNAKRFDVIANQTNPSPERLKKYSFTTPYNYSG-GVIVTKSSDNSIKSFE
FLIY_ECOLI LDSKRIDVVINQVTISDERKKKYDFSTPYTISGIQALVKKGNEGTIKTAD
E_coli_GlnH LQTKNVDLALAGITITDERKKAIDFSDGYYKSG-LLVMVKANNNDVKSVK
Deinococcus_radiodurans LQANKYDVIVNQVGITPERQNSIGFSQPYAYSRPEIIVAKNNTFNPQSLA
HISJ_E_COLI LKAKKIDAIMSSLSITEKRQQEIAFTDKLYAADSRLVVAKNSDIQP-TVE
V_Harveyi_PATH DLAGKTVAVNLGSNFEQLLRDYDKDGKINIKTYDT--GIEHDVALGRADA
B_subtilis_YXEM DLKGKTVAAVLGSNHAKNLESKDPDKKINIKTYETQEGTLKDVAYGRVDA
B_subtilis_GlnH_homo_YCKK DVKGKTSAQSLTSNYNKLATN----AGAKVEGVEGMAQALQMIQQARVDM
YA80_HAEIN DLKGRKSAQSATSNWGKDAKA----AGAQILVVDGLAQSLELIKQGRAEA
FLIY_ECOLI DLKGKKVGVGLGTNYEEWLRQNV--QGVDVRTYDDDPTKYQDLRVGRIDA
E_coli_GlnH DLDGKVVAVKSGTGSVDYAKAN--IKTKDLRQFPNIDNAYMELGTNRADA
Deinococcus_radiodurans DLKGKRVGSTLGSNYEKQLIDTG---DIKIVTYPGAPEILADLVAGRIDA
HISJ_E_COLI SLKGKRVGVLQGTTQETFGNEHWAPKGIEIVSYQGQDNIYSDLTAGRIDA
V_Harveyi_PATH FIMDRLSALE-LIKKT-GLPLQLAGEPFETI-----QNAWPFVDNEKGRK
B_subtilis_YXEM YVNSRTVLIA-QIKKT-GLPLKLAGDPIVYE-----QVAFPFAKDDAHDK
B_subtilis_GlnH_homo_YCKK TYNDKLAVLN-YLKTSGNKNVKIAFETGEPQ-----STYFTFRKGS--GE
YA80_HAEIN TINDKLAVLD-YFKQHPNSGLKIAYDRGDKT-----PTAFAFLQGE--DA
FLIY_ECOLI ILVDRLAALD-LVKKT-NDTLAVTGEAFSRQ-----ESGVALRKGN--ED
E_coli_GlnH VLHDTPNILY-FIKTAGNGQFKAVGDSLEAQ-----QYGIAFPKGS--DE
Deinococcus_radiodurans AYNDRLVVNY-IINDQ-KLPVRGAGQIGDAA-----PVGIALKKGN--SA
HISJ_E_COLI AFQDEVAASEGFLKQPVGKDYKFGGPSVKDEKLFGVGTGMGLRKED--NE
V_Harveyi_PATH LQAEVNKALAEMRADGTVEKISVKWFGADITK----
B_subtilis_YXEM LRKKVNKALDELRKDGTLKKLSEKYFNEDITVEQKH
B_subtilis_GlnH_homo_YCKK VVDQVNKALKEMKEDGTLSKISKKWFGEDVSK----
YA80_HAEIN LITKFNQVLEALRQDGTLKQISIEWFGYDITQ----
FLIY_ECOLI LLKAVNDAIAEMQKDGTLQALSEKWFGADVTK----
E_coli_GlnH LRDKVNGALKTLRENGTYNEIYKKWFGTEPK-----
Deinococcus_radiodurans LKDQIDKALTEMRSDGTFEKISQKWFGQDVGQP---
HISJ_E_COLI LREALNKAFAEMRADGTYEKLAKKYFDFDVYGG---
;
end;
"""
# This example uses DNA, from here:
# http://www.molecularevolution.org/resources/fileformats/
nxs_example2 = \
"""#NEXUS
Begin data;
Dimensions ntax=10 nchar=705;
Format datatype=dna interleave=yes gap=- missing=?;
Matrix
Cow ATGGCATATCCCATACAACTAGGATTCCAAGATGCAACATCACCAATCATAGAAGAACTA
Carp ATGGCACACCCAACGCAACTAGGTTTCAAGGACGCGGCCATACCCGTTATAGAGGAACTT
Chicken ATGGCCAACCACTCCCAACTAGGCTTTCAAGACGCCTCATCCCCCATCATAGAAGAGCTC
Human ATGGCACATGCAGCGCAAGTAGGTCTACAAGACGCTACTTCCCCTATCATAGAAGAGCTT
Loach ATGGCACATCCCACACAATTAGGATTCCAAGACGCGGCCTCACCCGTAATAGAAGAACTT
Mouse ATGGCCTACCCATTCCAACTTGGTCTACAAGACGCCACATCCCCTATTATAGAAGAGCTA
Rat ATGGCTTACCCATTTCAACTTGGCTTACAAGACGCTACATCACCTATCATAGAAGAACTT
Seal ATGGCATACCCCCTACAAATAGGCCTACAAGATGCAACCTCTCCCATTATAGAGGAGTTA
Whale ATGGCATATCCATTCCAACTAGGTTTCCAAGATGCAGCATCACCCATCATAGAAGAGCTC
Frog ATGGCACACCCATCACAATTAGGTTTTCAAGACGCAGCCTCTCCAATTATAGAAGAATTA
Cow CTTCACTTTCATGACCACACGCTAATAATTGTCTTCTTAATTAGCTCATTAGTACTTTAC
Carp CTTCACTTCCACGACCACGCATTAATAATTGTGCTCCTAATTAGCACTTTAGTTTTATAT
Chicken GTTGAATTCCACGACCACGCCCTGATAGTCGCACTAGCAATTTGCAGCTTAGTACTCTAC
Human ATCACCTTTCATGATCACGCCCTCATAATCATTTTCCTTATCTGCTTCCTAGTCCTGTAT
Loach CTTCACTTCCATGACCATGCCCTAATAATTGTATTTTTGATTAGCGCCCTAGTACTTTAT
Mouse ATAAATTTCCATGATCACACACTAATAATTGTTTTCCTAATTAGCTCCTTAGTCCTCTAT
Rat ACAAACTTTCATGACCACACCCTAATAATTGTATTCCTCATCAGCTCCCTAGTACTTTAT
Seal CTACACTTCCATGACCACACATTAATAATTGTGTTCCTAATTAGCTCATTAGTACTCTAC
Whale CTACACTTTCACGATCATACACTAATAATCGTTTTTCTAATTAGCTCTTTAGTTCTCTAC
Frog CTTCACTTCCACGACCATACCCTCATAGCCGTTTTTCTTATTAGTACGCTAGTTCTTTAC
Cow ATTATTTCACTAATACTAACGACAAAGCTGACCCATACAAGCACGATAGATGCACAAGAA
Carp ATTATTACTGCAATGGTATCAACTAAACTTACTAATAAATATATTCTAGACTCCCAAGAA
Chicken CTTCTAACTCTTATACTTATAGAAAAACTATCA---TCAAACACCGTAGATGCCCAAGAA
Human GCCCTTTTCCTAACACTCACAACAAAACTAACTAATACTAACATCTCAGACGCTCAGGAA
Loach GTTATTATTACAACCGTCTCAACAAAACTCACTAACATATATATTTTGGACTCACAAGAA
Mouse ATCATCTCGCTAATATTAACAACAAAACTAACACATACAAGCACAATAGATGCACAAGAA
Rat ATTATTTCACTAATACTAACAACAAAACTAACACACACAAGCACAATAGACGCCCAAGAA
Seal ATTATCTCACTTATACTAACCACGAAACTCACCCACACAAGTACAATAGACGCACAAGAA
Whale ATTATTACCCTAATGCTTACAACCAAATTAACACATACTAGTACAATAGACGCCCAAGAA
Frog ATTATTACTATTATAATAACTACTAAACTAACTAATACAAACCTAATGGACGCACAAGAG
Cow GTAGAGACAATCTGAACCATTCTGCCCGCCATCATCTTAATTCTAATTGCTCTTCCTTCT
Carp ATCGAAATCGTATGAACCATTCTACCAGCCGTCATTTTAGTACTAATCGCCCTGCCCTCC
Chicken GTTGAACTAATCTGAACCATCCTACCCGCTATTGTCCTAGTCCTGCTTGCCCTCCCCTCC
Human ATAGAAACCGTCTGAACTATCCTGCCCGCCATCATCCTAGTCCTCATCGCCCTCCCATCC
Loach ATTGAAATCGTATGAACTGTGCTCCCTGCCCTAATCCTCATTTTAATCGCCCTCCCCTCA
Mouse GTTGAAACCATTTGAACTATTCTACCAGCTGTAATCCTTATCATAATTGCTCTCCCCTCT
Rat GTAGAAACAATTTGAACAATTCTCCCAGCTGTCATTCTTATTCTAATTGCCCTTCCCTCC
Seal GTGGAAACGGTGTGAACGATCCTACCCGCTATCATTTTAATTCTCATTGCCCTACCATCA
Whale GTAGAAACTGTCTGAACTATCCTCCCAGCCATTATCTTAATTTTAATTGCCTTGCCTTCA
Frog ATCGAAATAGTGTGAACTATTATACCAGCTATTAGCCTCATCATAATTGCCCTTCCATCC
Cow TTACGAATTCTATACATAATAGATGAAATCAATAACCCATCTCTTACAGTAAAAACCATA
Carp CTACGCATCCTGTACCTTATAGACGAAATTAACGACCCTCACCTGACAATTAAAGCAATA
Chicken CTCCAAATCCTCTACATAATAGACGAAATCGACGAACCTGATCTCACCCTAAAAGCCATC
Human CTACGCATCCTTTACATAACAGACGAGGTCAACGATCCCTCCCTTACCATCAAATCAATT
Loach CTACGAATTCTATATCTTATAGACGAGATTAATGACCCCCACCTAACAATTAAGGCCATG
Mouse CTACGCATTCTATATATAATAGACGAAATCAACAACCCCGTATTAACCGTTAAAACCATA
Rat CTACGAATTCTATACATAATAGACGAGATTAATAACCCAGTTCTAACAGTAAAAACTATA
Seal TTACGAATCCTCTACATAATGGACGAGATCAATAACCCTTCCTTGACCGTAAAAACTATA
Whale TTACGGATCCTTTACATAATAGACGAAGTCAATAACCCCTCCCTCACTGTAAAAACAATA
Frog CTTCGTATCCTATATTTAATAGATGAAGTTAATGATCCACACTTAACAATTAAAGCAATC
Cow GGACATCAGTGATACTGAAGCTATGAGTATACAGATTATGAGGACTTAAGCTTCGACTCC
Carp GGACACCAATGATACTGAAGTTACGAGTATACAGACTATGAAAATCTAGGATTCGACTCC
Chicken GGACACCAATGATACTGAACCTATGAATACACAGACTTCAAGGACCTCTCATTTGACTCC
Human GGCCACCAATGGTACTGAACCTACGAGTACACCGACTACGGCGGACTAATCTTCAACTCC
Loach GGGCACCAATGATACTGAAGCTACGAGTATACTGATTATGAAAACTTAAGTTTTGACTCC
Mouse GGGCACCAATGATACTGAAGCTACGAATATACTGACTATGAAGACCTATGCTTTGATTCA
Rat GGACACCAATGATACTGAAGCTATGAATATACTGACTATGAAGACCTATGCTTTGACTCC
Seal GGACATCAGTGATACTGAAGCTATGAGTACACAGACTACGAAGACCTGAACTTTGACTCA
Whale GGTCACCAATGATATTGAAGCTATGAGTATACCGACTACGAAGACCTAAGCTTCGACTCC
Frog GGCCACCAATGATACTGAAGCTACGAATATACTAACTATGAGGATCTCTCATTTGACTCT
Cow TACATAATTCCAACATCAGAATTAAAGCCAGGGGAGCTACGACTATTAGAAGTCGATAAT
Carp TATATAGTACCAACCCAAGACCTTGCCCCCGGACAATTCCGACTTCTGGAAACAGACCAC
Chicken TACATAACCCCAACAACAGACCTCCCCCTAGGCCACTTCCGCCTACTAGAAGTCGACCAT
Human TACATACTTCCCCCATTATTCCTAGAACCAGGCGACCTGCGACTCCTTGACGTTGACAAT
Loach TACATAATCCCCACCCAGGACCTAACCCCTGGACAATTCCGGCTACTAGAGACAGACCAC
Mouse TATATAATCCCAACAAACGACCTAAAACCTGGTGAACTACGACTGCTAGAAGTTGATAAC
Rat TACATAATCCCAACCAATGACCTAAAACCAGGTGAACTTCGTCTATTAGAAGTTGATAAT
Seal TATATGATCCCCACACAAGAACTAAAGCCCGGAGAACTACGACTGCTAGAAGTAGACAAT
Whale TATATAATCCCAACATCAGACCTAAAGCCAGGAGAACTACGATTATTAGAAGTAGATAAC
Frog TATATAATTCCAACTAATGACCTTACCCCTGGACAATTCCGGCTGCTAGAAGTTGATAAT
Cow CGAGTTGTACTACCAATAGAAATAACAATCCGAATGTTAGTCTCCTCTGAAGACGTATTA
Carp CGAATAGTTGTTCCAATAGAATCCCCAGTCCGTGTCCTAGTATCTGCTGAAGACGTGCTA
Chicken CGCATTGTAATCCCCATAGAATCCCCCATTCGAGTAATCATCACCGCTGATGACGTCCTC
Human CGAGTAGTACTCCCGATTGAAGCCCCCATTCGTATAATAATTACATCACAAGACGTCTTG
Loach CGAATGGTTGTTCCCATAGAATCCCCTATTCGCATTCTTGTTTCCGCCGAAGATGTACTA
Mouse CGAGTCGTTCTGCCAATAGAACTTCCAATCCGTATATTAATTTCATCTGAAGACGTCCTC
Rat CGGGTAGTCTTACCAATAGAACTTCCAATTCGTATACTAATCTCATCCGAAGACGTCCTG
Seal CGAGTAGTCCTCCCAATAGAAATAACAATCCGCATACTAATCTCATCAGAAGATGTACTC
Whale CGAGTTGTCTTACCTATAGAAATAACAATCCGAATATTAGTCTCATCAGAAGACGTACTC
Frog CGAATAGTAGTCCCAATAGAATCTCCAACCCGACTTTTAGTTACAGCCGAAGACGTCCTC
Cow CACTCATGAGCTGTGCCCTCTCTAGGACTAAAAACAGACGCAATCCCAGGCCGTCTAAAC
Carp CATTCTTGAGCTGTTCCATCCCTTGGCGTAAAAATGGACGCAGTCCCAGGACGACTAAAT
Chicken CACTCATGAGCCGTACCCGCCCTCGGGGTAAAAACAGACGCAATCCCTGGACGACTAAAT
Human CACTCATGAGCTGTCCCCACATTAGGCTTAAAAACAGATGCAATTCCCGGACGTCTAAAC
Loach CACTCCTGGGCCCTTCCAGCCATGGGGGTAAAGATAGACGCGGTCCCAGGACGCCTTAAC
Mouse CACTCATGAGCAGTCCCCTCCCTAGGACTTAAAACTGATGCCATCCCAGGCCGACTAAAT
Rat CACTCATGAGCCATCCCTTCACTAGGGTTAAAAACCGACGCAATCCCCGGCCGCCTAAAC
Seal CACTCATGAGCCGTACCGTCCCTAGGACTAAAAACTGATGCTATCCCAGGACGACTAAAC
Whale CACTCATGGGCCGTACCCTCCTTGGGCCTAAAAACAGATGCAATCCCAGGACGCCTAAAC
Frog CACTCGTGAGCTGTACCCTCCTTGGGTGTCAAAACAGATGCAATCCCAGGACGACTTCAT
Cow CAAACAACCCTTATATCGTCCCGTCCAGGCTTATATTACGGTCAATGCTCAGAAATTTGC
Carp CAAGCCGCCTTTATTGCCTCACGCCCAGGGGTCTTTTACGGACAATGCTCTGAAATTTGT
Chicken CAAACCTCCTTCATCACCACTCGACCAGGAGTGTTTTACGGACAATGCTCAGAAATCTGC
Human CAAACCACTTTCACCGCTACACGACCGGGGGTATACTACGGTCAATGCTCTGAAATCTGT
Loach CAAACCGCCTTTATTGCCTCCCGCCCCGGGGTATTCTATGGGCAATGCTCAGAAATCTGT
Mouse CAAGCAACAGTAACATCAAACCGACCAGGGTTATTCTATGGCCAATGCTCTGAAATTTGT
Rat CAAGCTACAGTCACATCAAACCGACCAGGTCTATTCTATGGCCAATGCTCTGAAATTTGC
Seal CAAACAACCCTAATAACCATACGACCAGGACTGTACTACGGTCAATGCTCAGAAATCTGT
Whale CAAACAACCTTAATATCAACACGACCAGGCCTATTTTATGGACAATGCTCAGAGATCTGC
Frog CAAACATCATTTATTGCTACTCGTCCGGGAGTATTTTACGGACAATGTTCAGAAATTTGC
Cow GGGTCAAACCACAGTTTCATACCCATTGTCCTTGAGTTAGTCCCACTAAAGTACTTTGAA
Carp GGAGCTAATCACAGCTTTATACCAATTGTAGTTGAAGCAGTACCTCTCGAACACTTCGAA
Chicken GGAGCTAACCACAGCTACATACCCATTGTAGTAGAGTCTACCCCCCTAAAACACTTTGAA
Human GGAGCAAACCACAGTTTCATGCCCATCGTCCTAGAATTAATTCCCCTAAAAATCTTTGAA
Loach GGAGCAAACCACAGCTTTATACCCATCGTAGTAGAAGCGGTCCCACTATCTCACTTCGAA
Mouse GGATCTAACCATAGCTTTATGCCCATTGTCCTAGAAATGGTTCCACTAAAATATTTCGAA
Rat GGCTCAAATCACAGCTTCATACCCATTGTACTAGAAATAGTGCCTCTAAAATATTTCGAA
Seal GGTTCAAACCACAGCTTCATACCTATTGTCCTCGAATTGGTCCCACTATCCCACTTCGAG
Whale GGCTCAAACCACAGTTTCATACCAATTGTCCTAGAACTAGTACCCCTAGAAGTCTTTGAA
Frog GGAGCAAACCACAGCTTTATACCAATTGTAGTTGAAGCAGTACCGCTAACCGACTTTGAA
Cow AAATGATCTGCGTCAATATTA---------------------TAA
Carp AACTGATCCTCATTAATACTAGAAGACGCCTCGCTAGGAAGCTAA
Chicken GCCTGATCCTCACTA------------------CTGTCATCTTAA
Human ATA---------------------GGGCCCGTATTTACCCTATAG
Loach AACTGGTCCACCCTTATACTAAAAGACGCCTCACTAGGAAGCTAA
Mouse AACTGATCTGCTTCAATAATT---------------------TAA
Rat AACTGATCAGCTTCTATAATT---------------------TAA
Seal AAATGATCTACCTCAATGCTT---------------------TAA
Whale AAATGATCTGTATCAATACTA---------------------TAA
Frog AACTGATCTTCATCAATACTA---GAAGCATCACTA------AGA
;
End;
"""
# This example uses amino acids, from here:
# http://www.molecularevolution.org/resources/fileformats/
nxs_example3 = \
"""#NEXUS
Begin data;
Dimensions ntax=10 nchar=234;
Format datatype=protein gap=- interleave;
Matrix
Cow MAYPMQLGFQDATSPIMEELLHFHDHTLMIVFLISSLVLYIISLMLTTKLTHTSTMDAQE
Carp MAHPTQLGFKDAAMPVMEELLHFHDHALMIVLLISTLVLYIITAMVSTKLTNKYILDSQE
Chicken MANHSQLGFQDASSPIMEELVEFHDHALMVALAICSLVLYLLTLMLMEKLS-SNTVDAQE
Human MAHAAQVGLQDATSPIMEELITFHDHALMIIFLICFLVLYALFLTLTTKLTNTNISDAQE
Loach MAHPTQLGFQDAASPVMEELLHFHDHALMIVFLISALVLYVIITTVSTKLTNMYILDSQE
Mouse MAYPFQLGLQDATSPIMEELMNFHDHTLMIVFLISSLVLYIISLMLTTKLTHTSTMDAQE
Rat MAYPFQLGLQDATSPIMEELTNFHDHTLMIVFLISSLVLYIISLMLTTKLTHTSTMDAQE
Seal MAYPLQMGLQDATSPIMEELLHFHDHTLMIVFLISSLVLYIISLMLTTKLTHTSTMDAQE
Whale MAYPFQLGFQDAASPIMEELLHFHDHTLMIVFLISSLVLYIITLMLTTKLTHTSTMDAQE
Frog MAHPSQLGFQDAASPIMEELLHFHDHTLMAVFLISTLVLYIITIMMTTKLTNTNLMDAQE
Cow VETIWTILPAIILILIALPSLRILYMMDEINNPSLTVKTMGHQWYWSYEYTDYEDLSFDS
Carp IEIVWTILPAVILVLIALPSLRILYLMDEINDPHLTIKAMGHQWYWSYEYTDYENLGFDS
Chicken VELIWTILPAIVLVLLALPSLQILYMMDEIDEPDLTLKAIGHQWYWTYEYTDFKDLSFDS
Human METVWTILPAIILVLIALPSLRILYMTDEVNDPSLTIKSIGHQWYWTYEYTDYGGLIFNS
Loach IEIVWTVLPALILILIALPSLRILYLMDEINDPHLTIKAMGHQWYWSYEYTDYENLSFDS
Mouse VETIWTILPAVILIMIALPSLRILYMMDEINNPVLTVKTMGHQWYWSYEYTDYEDLCFDS
Rat VETIWTILPAVILILIALPSLRILYMMDEINNPVLTVKTMGHQWYWSYEYTDYEDLCFDS
Seal VETVWTILPAIILILIALPSLRILYMMDEINNPSLTVKTMGHQWYWSYEYTDYEDLNFDS
Whale VETVWTILPAIILILIALPSLRILYMMDEVNNPSLTVKTMGHQWYWSYEYTDYEDLSFDS
Frog IEMVWTIMPAISLIMIALPSLRILYLMDEVNDPHLTIKAIGHQWYWSYEYTNYEDLSFDS
Cow YMIPTSELKPGELRLLEVDNRVVLPMEMTIRMLVSSEDVLHSWAVPSLGLKTDAIPGRLN
Carp YMVPTQDLAPGQFRLLETDHRMVVPMESPVRVLVSAEDVLHSWAVPSLGVKMDAVPGRLN
Chicken YMTPTTDLPLGHFRLLEVDHRIVIPMESPIRVIITADDVLHSWAVPALGVKTDAIPGRLN
Human YMLPPLFLEPGDLRLLDVDNRVVLPIEAPIRMMITSQDVLHSWAVPTLGLKTDAIPGRLN
Loach YMIPTQDLTPGQFRLLETDHRMVVPMESPIRILVSAEDVLHSWALPAMGVKMDAVPGRLN
Mouse YMIPTNDLKPGELRLLEVDNRVVLPMELPIRMLISSEDVLHSWAVPSLGLKTDAIPGRLN
Rat YMIPTNDLKPGELRLLEVDNRVVLPMELPIRMLISSEDVLHSWAIPSLGLKTDAIPGRLN
Seal YMIPTQELKPGELRLLEVDNRVVLPMEMTIRMLISSEDVLHSWAVPSLGLKTDAIPGRLN
Whale YMIPTSDLKPGELRLLEVDNRVVLPMEMTIRMLVSSEDVLHSWAVPSLGLKTDAIPGRLN
Frog YMIPTNDLTPGQFRLLEVDNRMVVPMESPTRLLVTAEDVLHSWAVPSLGVKTDAIPGRLH
Cow QTTLMSSRPGLYYGQCSEICGSNHSFMPIVLELVPLKYFEKWSASML-------
Carp QAAFIASRPGVFYGQCSEICGANHSFMPIVVEAVPLEHFENWSSLMLEDASLGS
Chicken QTSFITTRPGVFYGQCSEICGANHSYMPIVVESTPLKHFEAWSSL------LSS
Human QTTFTATRPGVYYGQCSEICGANHSFMPIVLELIPLKIFEM-------GPVFTL
Loach QTAFIASRPGVFYGQCSEICGANHSFMPIVVEAVPLSHFENWSTLMLKDASLGS
Mouse QATVTSNRPGLFYGQCSEICGSNHSFMPIVLEMVPLKYFENWSASMI-------
Rat QATVTSNRPGLFYGQCSEICGSNHSFMPIVLEMVPLKYFENWSASMI-------
Seal QTTLMTMRPGLYYGQCSEICGSNHSFMPIVLELVPLSHFEKWSTSML-------
Whale QTTLMSTRPGLFYGQCSEICGSNHSFMPIVLELVPLEVFEKWSVSML-------
Frog QTSFIATRPGVFYGQCSEICGANHSFMPIVVEAVPLTDFENWSSSML-EASL--
;
End;
"""
# This example with its slightly odd (partial) annotation is from here:
# http://www.cgb.ki.se/cgb/groups/sonnhammer/Stockholm.html
sth_example = \
"""# STOCKHOLM 1.0
#=GF ID CBS
#=GF AC PF00571
#=GF DE CBS domain
#=GF AU Bateman A
#=GF CC CBS domains are small intracellular modules mostly found
#=GF CC in 2 or four copies within a protein.
#=GF SQ 67
#=GS O31698/18-71 AC O31698
#=GS O83071/192-246 AC O83071
#=GS O83071/259-312 AC O83071
#=GS O31698/88-139 AC O31698
#=GS O31698/88-139 OS Bacillus subtilis
O83071/192-246 MTCRAQLIAVPRASSLAE..AIACAQKM....RVSRVPVYERS
#=GR O83071/192-246 SA 999887756453524252..55152525....36463774777
O83071/259-312 MQHVSAPVFVFECTRLAY..VQHKLRAH....SRAVAIVLDEY
#=GR O83071/259-312 SS CCCCCHHHHHHHHHHHHH..EEEEEEEE....EEEEEEEEEEE
O31698/18-71 MIEADKVAHVQVGNNLEH..ALLVLTKT....GYTAIPVLDPS
#=GR O31698/18-71 SS CCCHHHHHHHHHHHHHHH..EEEEEEEE....EEEEEEEEHHH
O31698/88-139 EVMLTDIPRLHINDPIMK..GFGMVINN......GFVCVENDE
#=GR O31698/88-139 SS CCCCCCCHHHHHHHHHHH..HEEEEEEE....EEEEEEEEEEH
#=GC SS_cons CCCCCHHHHHHHHHHHHH..EEEEEEEE....EEEEEEEEEEH
O31699/88-139 EVMLTDIPRLHINDPIMK..GFGMVINN......GFVCVENDE
#=GR O31699/88-139 AS ________________*__________________________
#=GR_O31699/88-139_IN ____________1______________2__________0____
//
"""
# Interlaced example from BioPerl documentation. Also note the blank line.
# http://www.bioperl.org/wiki/Stockholm_multiple_alignment_format
sth_example2 = \
"""# STOCKHOLM 1.0
#=GC SS_cons .................<<<<<<<<...<<<<<<<........>>>>>>>..
AP001509.1 UUAAUCGAGCUCAACACUCUUCGUAUAUCCUC-UCAAUAUGG-GAUGAGGGU
#=GR AP001509.1 SS -----------------<<<<<<<<---..<<-<<-------->>->>..--
AE007476.1 AAAAUUGAAUAUCGUUUUACUUGUUUAU-GUCGUGAAU-UGG-CACGA-CGU
#=GR AE007476.1 SS -----------------<<<<<<<<-----<<.<<-------->>.>>----
#=GC SS_cons ......<<<<<<<.......>>>>>>>..>>>>>>>>...............
AP001509.1 CUCUAC-AGGUA-CCGUAAA-UACCUAGCUACGAAAAGAAUGCAGUUAAUGU
#=GR AP001509.1 SS -------<<<<<--------->>>>>--->>>>>>>>---------------
AE007476.1 UUCUACAAGGUG-CCGG-AA-CACCUAACAAUAAGUAAGUCAGCAGUGAGAU
#=GR AE007476.1 SS ------.<<<<<--------->>>>>.-->>>>>>>>---------------
//"""
# Sample GenBank record from here:
# http://www.ncbi.nlm.nih.gov/Sitemap/samplerecord.html
gbk_example = \
"""LOCUS SCU49845 5028 bp DNA PLN 21-JUN-1999
DEFINITION Saccharomyces cerevisiae TCP1-beta gene, partial cds, and Axl2p
(AXL2) and Rev7p (REV7) genes, complete cds.
ACCESSION U49845
VERSION U49845.1 GI:1293613
KEYWORDS .
SOURCE Saccharomyces cerevisiae (baker's yeast)
ORGANISM Saccharomyces cerevisiae
Eukaryota; Fungi; Ascomycota; Saccharomycotina; Saccharomycetes;
Saccharomycetales; Saccharomycetaceae; Saccharomyces.
REFERENCE 1 (bases 1 to 5028)
AUTHORS Torpey,L.E., Gibbs,P.E., Nelson,J. and Lawrence,C.W.
TITLE Cloning and sequence of REV7, a gene whose function is required for
DNA damage-induced mutagenesis in Saccharomyces cerevisiae
JOURNAL Yeast 10 (11), 1503-1509 (1994)
PUBMED 7871890
REFERENCE 2 (bases 1 to 5028)
AUTHORS Roemer,T., Madden,K., Chang,J. and Snyder,M.
TITLE Selection of axial growth sites in yeast requires Axl2p, a novel
plasma membrane glycoprotein
JOURNAL Genes Dev. 10 (7), 777-793 (1996)
PUBMED 8846915
REFERENCE 3 (bases 1 to 5028)
AUTHORS Roemer,T.
TITLE Direct Submission
JOURNAL Submitted (22-FEB-1996) Terry Roemer, Biology, Yale University, New
Haven, CT, USA
FEATURES Location/Qualifiers
source 1..5028
/organism="Saccharomyces cerevisiae"
/db_xref="taxon:4932"
/chromosome="IX"
/map="9"
CDS <1..206
/codon_start=3
/product="TCP1-beta"
/protein_id="AAA98665.1"
/db_xref="GI:1293614"
/translation="SSIYNGISTSGLDLNNGTIADMRQLGIVESYKLKRAVVSSASEA
AEVLLRVDNIIRARPRTANRQHM"
gene 687..3158
/gene="AXL2"
CDS 687..3158
/gene="AXL2"
/note="plasma membrane glycoprotein"
/codon_start=1
/function="required for axial budding pattern of S.
cerevisiae"
/product="Axl2p"
/protein_id="AAA98666.1"
/db_xref="GI:1293615"
/translation="MTQLQISLLLTATISLLHLVVATPYEAYPIGKQYPPVARVNESF
TFQISNDTYKSSVDKTAQITYNCFDLPSWLSFDSSSRTFSGEPSSDLLSDANTTLYFN
VILEGTDSADSTSLNNTYQFVVTNRPSISLSSDFNLLALLKNYGYTNGKNALKLDPNE
VFNVTFDRSMFTNEESIVSYYGRSQLYNAPLPNWLFFDSGELKFTGTAPVINSAIAPE
TSYSFVIIATDIEGFSAVEVEFELVIGAHQLTTSIQNSLIINVTDTGNVSYDLPLNYV
YLDDDPISSDKLGSINLLDAPDWVALDNATISGSVPDELLGKNSNPANFSVSIYDTYG
DVIYFNFEVVSTTDLFAISSLPNINATRGEWFSYYFLPSQFTDYVNTNVSLEFTNSSQ
DHDWVKFQSSNLTLAGEVPKNFDKLSLGLKANQGSQSQELYFNIIGMDSKITHSNHSA
NATSTRSSHHSTSTSSYTSSTYTAKISSTSAAATSSAPAALPAANKTSSHNKKAVAIA
CGVAIPLGVILVALICFLIFWRRRRENPDDENLPHAISGPDLNNPANKPNQENATPLN
NPFDDDASSYDDTSIARRLAALNTLKLDNHSATESDISSVDEKRDSLSGMNTYNDQFQ
SQSKEELLAKPPVQPPESPFFDPQNRSSSVYMDSEPAVNKSWRYTGNLSPVSDIVRDS
YGSQKTVDTEKLFDLEAPEKEKRTSRDVTMSSLDPWNSNISPSPVRKSVTPSPYNVTK
HRNRHLQNIQDSQSGKNGITPTTMSTSSSDDFVPVKDGENFCWVHSMEPDRRPSKKRL
VDFSNKSNVNVGQVKDIHGRIPEML"
gene complement(3300..4037)
/gene="REV7"
CDS complement(3300..4037)
/gene="REV7"
/codon_start=1
/product="Rev7p"
/protein_id="AAA98667.1"
/db_xref="GI:1293616"
/translation="MNRWVEKWLRVYLKCYINLILFYRNVYPPQSFDYTTYQSFNLPQ
FVPINRHPALIDYIEELILDVLSKLTHVYRFSICIINKKNDLCIEKYVLDFSELQHVD
KDDQIITETEVFDEFRSSLNSLIMHLEKLPKVNDDTITFEAVINAIELELGHKLDRNR
RVDSLEEKAEIERDSNWVKCQEDENLPDNNGFQPPKIKLTSLVGSDVGPLIIHQFSEK
LISGDDKILNGVYSQYEEGESIFGSLF"
ORIGIN
1 gatcctccat atacaacggt atctccacct caggtttaga tctcaacaac ggaaccattg
61 ccgacatgag acagttaggt atcgtcgaga gttacaagct aaaacgagca gtagtcagct
121 ctgcatctga agccgctgaa gttctactaa gggtggataa catcatccgt gcaagaccaa
181 gaaccgccaa tagacaacat atgtaacata tttaggatat acctcgaaaa taataaaccg
241 ccacactgtc attattataa ttagaaacag aacgcaaaaa ttatccacta tataattcaa
301 agacgcgaaa aaaaaagaac aacgcgtcat agaacttttg gcaattcgcg tcacaaataa
361 attttggcaa cttatgtttc ctcttcgagc agtactcgag ccctgtctca agaatgtaat
421 aatacccatc gtaggtatgg ttaaagatag catctccaca acctcaaagc tccttgccga
481 gagtcgccct cctttgtcga gtaattttca cttttcatat gagaacttat tttcttattc
541 tttactctca catcctgtag tgattgacac tgcaacagcc accatcacta gaagaacaga
601 acaattactt aatagaaaaa ttatatcttc ctcgaaacga tttcctgctt ccaacatcta
661 cgtatatcaa gaagcattca cttaccatga cacagcttca gatttcatta ttgctgacag
721 ctactatatc actactccat ctagtagtgg ccacgcccta tgaggcatat cctatcggaa
781 aacaataccc cccagtggca agagtcaatg aatcgtttac atttcaaatt tccaatgata
841 cctataaatc gtctgtagac aagacagctc aaataacata caattgcttc gacttaccga
901 gctggctttc gtttgactct agttctagaa cgttctcagg tgaaccttct tctgacttac
961 tatctgatgc gaacaccacg ttgtatttca atgtaatact cgagggtacg gactctgccg
1021 acagcacgtc tttgaacaat acataccaat ttgttgttac aaaccgtcca tccatctcgc
1081 tatcgtcaga tttcaatcta ttggcgttgt taaaaaacta tggttatact aacggcaaaa
1141 acgctctgaa actagatcct aatgaagtct tcaacgtgac ttttgaccgt tcaatgttca
1201 ctaacgaaga atccattgtg tcgtattacg gacgttctca gttgtataat gcgccgttac
1261 ccaattggct gttcttcgat tctggcgagt tgaagtttac tgggacggca ccggtgataa
1321 actcggcgat tgctccagaa acaagctaca gttttgtcat catcgctaca gacattgaag
1381 gattttctgc cgttgaggta gaattcgaat tagtcatcgg ggctcaccag ttaactacct
1441 ctattcaaaa tagtttgata atcaacgtta ctgacacagg taacgtttca tatgacttac
1501 ctctaaacta tgtttatctc gatgacgatc ctatttcttc tgataaattg ggttctataa
1561 acttattgga tgctccagac tgggtggcat tagataatgc taccatttcc gggtctgtcc
1621 cagatgaatt actcggtaag aactccaatc ctgccaattt ttctgtgtcc atttatgata
1681 cttatggtga tgtgatttat ttcaacttcg aagttgtctc cacaacggat ttgtttgcca
1741 ttagttctct tcccaatatt aacgctacaa ggggtgaatg gttctcctac tattttttgc
1801 cttctcagtt tacagactac gtgaatacaa acgtttcatt agagtttact aattcaagcc
1861 aagaccatga ctgggtgaaa ttccaatcat ctaatttaac attagctgga gaagtgccca
1921 agaatttcga caagctttca ttaggtttga aagcgaacca aggttcacaa tctcaagagc
1981 tatattttaa catcattggc atggattcaa agataactca ctcaaaccac agtgcgaatg
2041 caacgtccac aagaagttct caccactcca cctcaacaag ttcttacaca tcttctactt
2101 acactgcaaa aatttcttct acctccgctg ctgctacttc ttctgctcca gcagcgctgc
2161 cagcagccaa taaaacttca tctcacaata aaaaagcagt agcaattgcg tgcggtgttg
2221 ctatcccatt aggcgttatc ctagtagctc tcatttgctt cctaatattc tggagacgca
2281 gaagggaaaa tccagacgat gaaaacttac cgcatgctat tagtggacct gatttgaata
2341 atcctgcaaa taaaccaaat caagaaaacg ctacaccttt gaacaacccc tttgatgatg
2401 atgcttcctc gtacgatgat acttcaatag caagaagatt ggctgctttg aacactttga
2461 aattggataa ccactctgcc actgaatctg atatttccag cgtggatgaa aagagagatt
2521 ctctatcagg tatgaataca tacaatgatc agttccaatc ccaaagtaaa gaagaattat
2581 tagcaaaacc cccagtacag cctccagaga gcccgttctt tgacccacag aataggtctt
2641 cttctgtgta tatggatagt gaaccagcag taaataaatc ctggcgatat actggcaacc
2701 tgtcaccagt ctctgatatt gtcagagaca gttacggatc acaaaaaact gttgatacag
2761 aaaaactttt cgatttagaa gcaccagaga aggaaaaacg tacgtcaagg gatgtcacta
2821 tgtcttcact ggacccttgg aacagcaata ttagcccttc tcccgtaaga aaatcagtaa
2881 caccatcacc atataacgta acgaagcatc gtaaccgcca cttacaaaat attcaagact
2941 ctcaaagcgg taaaaacgga atcactccca caacaatgtc aacttcatct tctgacgatt
3001 ttgttccggt taaagatggt gaaaattttt gctgggtcca tagcatggaa ccagacagaa
3061 gaccaagtaa gaaaaggtta gtagattttt caaataagag taatgtcaat gttggtcaag
3121 ttaaggacat tcacggacgc atcccagaaa tgctgtgatt atacgcaacg atattttgct
3181 taattttatt ttcctgtttt attttttatt agtggtttac agatacccta tattttattt
3241 agtttttata cttagagaca tttaatttta attccattct tcaaatttca tttttgcact
3301 taaaacaaag atccaaaaat gctctcgccc tcttcatatt gagaatacac tccattcaaa
3361 attttgtcgt caccgctgat taatttttca ctaaactgat gaataatcaa aggccccacg
3421 tcagaaccga ctaaagaagt gagttttatt ttaggaggtt gaaaaccatt attgtctggt
3481 aaattttcat cttcttgaca tttaacccag tttgaatccc tttcaatttc tgctttttcc
3541 tccaaactat cgaccctcct gtttctgtcc aacttatgtc ctagttccaa ttcgatcgca
3601 ttaataactg cttcaaatgt tattgtgtca tcgttgactt taggtaattt ctccaaatgc
3661 ataatcaaac tatttaagga agatcggaat tcgtcgaaca cttcagtttc cgtaatgatc
3721 tgatcgtctt tatccacatg ttgtaattca ctaaaatcta aaacgtattt ttcaatgcat
3781 aaatcgttct ttttattaat aatgcagatg gaaaatctgt aaacgtgcgt taatttagaa
3841 agaacatcca gtataagttc ttctatatag tcaattaaag caggatgcct attaatggga
3901 acgaactgcg gcaagttgaa tgactggtaa gtagtgtagt cgaatgactg aggtgggtat
3961 acatttctat aaaataaaat caaattaatg tagcatttta agtataccct cagccacttc
4021 tctacccatc tattcataaa gctgacgcaa cgattactat tttttttttc ttcttggatc
4081 tcagtcgtcg caaaaacgta taccttcttt ttccgacctt ttttttagct ttctggaaaa
4141 gtttatatta gttaaacagg gtctagtctt agtgtgaaag ctagtggttt cgattgactg
4201 atattaagaa agtggaaatt aaattagtag tgtagacgta tatgcatatg tatttctcgc
4261 ctgtttatgt ttctacgtac ttttgattta tagcaagggg aaaagaaata catactattt
4321 tttggtaaag gtgaaagcat aatgtaaaag ctagaataaa atggacgaaa taaagagagg
4381 cttagttcat cttttttcca aaaagcaccc aatgataata actaaaatga aaaggatttg
4441 ccatctgtca gcaacatcag ttgtgtgagc aataataaaa tcatcacctc cgttgccttt
4501 agcgcgtttg tcgtttgtat cttccgtaat tttagtctta tcaatgggaa tcataaattt
4561 tccaatgaat tagcaatttc gtccaattct ttttgagctt cttcatattt gctttggaat
4621 tcttcgcact tcttttccca ttcatctctt tcttcttcca aagcaacgat ccttctaccc
4681 atttgctcag agttcaaatc ggcctctttc agtttatcca ttgcttcctt cagtttggct
4741 tcactgtctt ctagctgttg ttctagatcc tggtttttct tggtgtagtt ctcattatta
4801 gatctcaagt tattggagtc ttcagccaat tgctttgtat cagacaattg actctctaac
4861 ttctccactt cactgtcgag ttgctcgttt ttagcggaca aagatttaat ctcgttttct
4921 ttttcagtgt tagattgctc taattctttg agctgttctc tcagctcctc atatttttct
4981 tgccatgact cagattctaa ttttaagcta ttcaatttct ctttgatc
//"""
# GenBank format protein (aka GenPept) file from:
# http://www.molecularevolution.org/resources/fileformats/
gbk_example2 = \
"""LOCUS AAD51968 143 aa linear BCT 21-AUG-2001
DEFINITION transcriptional regulator RovA [Yersinia enterocolitica].
ACCESSION AAD51968
VERSION AAD51968.1 GI:5805369
DBSOURCE locus AF171097 accession AF171097.1
KEYWORDS .
SOURCE Yersinia enterocolitica
ORGANISM Yersinia enterocolitica
Bacteria; Proteobacteria; Gammaproteobacteria; Enterobacteriales;
Enterobacteriaceae; Yersinia.
REFERENCE 1 (residues 1 to 143)
AUTHORS Revell,P.A. and Miller,V.L.
TITLE A chromosomally encoded regulator is required for expression of the
Yersinia enterocolitica inv gene and for virulence
JOURNAL Mol. Microbiol. 35 (3), 677-685 (2000)
MEDLINE 20138369
PUBMED 10672189
REFERENCE 2 (residues 1 to 143)
AUTHORS Revell,P.A. and Miller,V.L.
TITLE Direct Submission
JOURNAL Submitted (22-JUL-1999) Molecular Microbiology, Washington
University School of Medicine, Campus Box 8230, 660 South Euclid,
St. Louis, MO 63110, USA
COMMENT Method: conceptual translation.
FEATURES Location/Qualifiers
source 1..143
/organism="Yersinia enterocolitica"
/mol_type="unassigned DNA"
/strain="JB580v"
/serotype="O:8"
/db_xref="taxon:630"
Protein 1..143
/product="transcriptional regulator RovA"
/name="regulates inv expression"
CDS 1..143
/gene="rovA"
/coded_by="AF171097.1:380..811"
/note="regulator of virulence"
/transl_table=11
ORIGIN
1 mestlgsdla rlvrvwrali dhrlkplelt qthwvtlhni nrlppeqsqi qlakaigieq
61 pslvrtldql eekglitrht candrrakri klteqsspii eqvdgvicst rkeilggisp
121 deiellsgli dklerniiql qsk
//"""
swiss_example = \
"""ID 104K_THEAN Reviewed; 893 AA.
AC Q4U9M9;
DT 18-APR-2006, integrated into UniProtKB/Swiss-Prot.
DT 05-JUL-2005, sequence version 1.
DT 31-OCT-2006, entry version 8.
DE 104 kDa microneme-rhoptry antigen precursor (p104).
GN ORFNames=TA08425;
OS Theileria annulata.
OC Eukaryota; Alveolata; Apicomplexa; Piroplasmida; Theileriidae;
OC Theileria.
OX NCBI_TaxID=5874;
RN [1]
RP NUCLEOTIDE SEQUENCE [LARGE SCALE GENOMIC DNA].
RC STRAIN=Ankara;
RX PubMed=15994557; DOI=10.1126/science.1110418;
RA Pain A., Renauld H., Berriman M., Murphy L., Yeats C.A., Weir W.,
RA Kerhornou A., Aslett M., Bishop R., Bouchier C., Cochet M.,
RA Coulson R.M.R., Cronin A., de Villiers E.P., Fraser A., Fosker N.,
RA Gardner M., Goble A., Griffiths-Jones S., Harris D.E., Katzer F.,
RA Larke N., Lord A., Maser P., McKellar S., Mooney P., Morton F.,
RA Nene V., O'Neil S., Price C., Quail M.A., Rabbinowitsch E.,
RA Rawlings N.D., Rutter S., Saunders D., Seeger K., Shah T., Squares R.,
RA Squares S., Tivey A., Walker A.R., Woodward J., Dobbelaere D.A.E.,
RA Langsley G., Rajandream M.A., McKeever D., Shiels B., Tait A.,
RA Barrell B.G., Hall N.;
RT "Genome of the host-cell transforming parasite Theileria annulata
RT compared with T. parva.";
RL Science 309:131-133(2005).
CC -!- SUBCELLULAR LOCATION: Cell membrane; lipid-anchor; GPI-anchor
CC (Potential). In microneme/rhoptry complexes (By similarity).
DR EMBL; CR940353; CAI76474.1; -; Genomic_DNA.
DR InterPro; IPR007480; DUF529.
DR Pfam; PF04385; FAINT; 4.
KW Complete proteome; GPI-anchor; Lipoprotein; Membrane; Repeat; Signal;
KW Sporozoite.
FT SIGNAL 1 19 Potential.
FT CHAIN 20 873 104 kDa microneme-rhoptry antigen.
FT /FTId=PRO_0000232680.
FT PROPEP 874 893 Removed in mature form (Potential).
FT /FTId=PRO_0000232681.
FT COMPBIAS 215 220 Poly-Leu.
FT COMPBIAS 486 683 Lys-rich.
FT COMPBIAS 854 859 Poly-Arg.
FT LIPID 873 873 GPI-anchor amidated aspartate
FT (Potential).
SQ SEQUENCE 893 AA; 101921 MW; 2F67CEB3B02E7AC1 CRC64;
MKFLVLLFNI LCLFPILGAD ELVMSPIPTT DVQPKVTFDI NSEVSSGPLY LNPVEMAGVK
YLQLQRQPGV QVHKVVEGDI VIWENEEMPL YTCAIVTQNE VPYMAYVELL EDPDLIFFLK
EGDQWAPIPE DQYLARLQQL RQQIHTESFF SLNLSFQHEN YKYEMVSSFQ HSIKMVVFTP
KNGHICKMVY DKNIRIFKAL YNEYVTSVIG FFRGLKLLLL NIFVIDDRGM IGNKYFQLLD
DKYAPISVQG YVATIPKLKD FAEPYHPIIL DISDIDYVNF YLGDATYHDP GFKIVPKTPQ
CITKVVDGNE VIYESSNPSV ECVYKVTYYD KKNESMLRLD LNHSPPSYTS YYAKREGVWV
TSTYIDLEEK IEELQDHRST ELDVMFMSDK DLNVVPLTNG NLEYFMVTPK PHRDIIIVFD
GSEVLWYYEG LENHLVCTWI YVTEGAPRLV HLRVKDRIPQ NTDIYMVKFG EYWVRISKTQ
YTQEIKKLIK KSKKKLPSIE EEDSDKHGGP PKGPEPPTGP GHSSSESKEH EDSKESKEPK
EHGSPKETKE GEVTKKPGPA KEHKPSKIPV YTKRPEFPKK SKSPKRPESP KSPKRPVSPQ
RPVSPKSPKR PESLDIPKSP KRPESPKSPK RPVSPQRPVS PRRPESPKSP KSPKSPKSPK
VPFDPKFKEK LYDSYLDKAA KTKETVTLPP VLPTDESFTH TPIGEPTAEQ PDDIEPIEES
VFIKETGILT EEVKTEDIHS ETGEPEEPKR PDSPTKHSPK PTGTHPSMPK KRRRSDGLAL
STTDLESEAG RILRDPTGKI VTMKRSKSFD DLTTVREKEH MGAEIRKIVV DDDGTEADDE
DTHPSKEKHL STVRRRRPRP KKSSKSSKPR KPDSAFVPSI IFIFLVSLIV GIL
//
ID 104K_THEPA Reviewed; 924 AA.
AC P15711; Q4N2B5;
DT 01-APR-1990, integrated into UniProtKB/Swiss-Prot.
DT 01-APR-1990, sequence version 1.
DT 31-OCT-2006, entry version 31.
DE 104 kDa microneme-rhoptry antigen precursor (p104).
GN OrderedLocusNames=TP04_0437;
OS Theileria parva.
OC Eukaryota; Alveolata; Apicomplexa; Piroplasmida; Theileriidae;
OC Theileria.
OX NCBI_TaxID=5875;
RN [1]
RP NUCLEOTIDE SEQUENCE [GENOMIC DNA].
RC STRAIN=Muguga;
RX MEDLINE=90158697; PubMed=1689460; DOI=10.1016/0166-6851(90)90007-9;
RA Iams K.P., Young J.R., Nene V., Desai J., Webster P., Ole-Moiyoi O.K.,
RA Musoke A.J.;
RT "Characterisation of the gene encoding a 104-kilodalton microneme-
RT rhoptry protein of Theileria parva.";
RL Mol. Biochem. Parasitol. 39:47-60(1990).
RN [2]
RP NUCLEOTIDE SEQUENCE [LARGE SCALE GENOMIC DNA].
RC STRAIN=Muguga;
RX PubMed=15994558; DOI=10.1126/science.1110439;
RA Gardner M.J., Bishop R., Shah T., de Villiers E.P., Carlton J.M.,
RA Hall N., Ren Q., Paulsen I.T., Pain A., Berriman M., Wilson R.J.M.,
RA Sato S., Ralph S.A., Mann D.J., Xiong Z., Shallom S.J., Weidman J.,
RA Jiang L., Lynn J., Weaver B., Shoaibi A., Domingo A.R., Wasawo D.,
RA Crabtree J., Wortman J.R., Haas B., Angiuoli S.V., Creasy T.H., Lu C.,
RA Suh B., Silva J.C., Utterback T.R., Feldblyum T.V., Pertea M.,
RA Allen J., Nierman W.C., Taracha E.L.N., Salzberg S.L., White O.R.,
RA Fitzhugh H.A., Morzaria S., Venter J.C., Fraser C.M., Nene V.;
RT "Genome sequence of Theileria parva, a bovine pathogen that transforms
RT lymphocytes.";
RL Science 309:134-137(2005).
CC -!- SUBCELLULAR LOCATION: Cell membrane; lipid-anchor; GPI-anchor
CC (Potential). In microneme/rhoptry complexes.
CC -!- DEVELOPMENTAL STAGE: Sporozoite antigen.
DR EMBL; M29954; AAA18217.1; -; Unassigned_DNA.
DR EMBL; AAGK01000004; EAN31789.1; -; Genomic_DNA.
DR PIR; A44945; A44945.
DR InterPro; IPR007480; DUF529.
DR Pfam; PF04385; FAINT; 4.
KW Complete proteome; GPI-anchor; Lipoprotein; Membrane; Repeat; Signal;
KW Sporozoite.
FT SIGNAL 1 19 Potential.
FT CHAIN 20 904 104 kDa microneme-rhoptry antigen.
FT /FTId=PRO_0000046081.
FT PROPEP 905 924 Removed in mature form (Potential).
FT /FTId=PRO_0000232679.
FT COMPBIAS 508 753 Pro-rich.
FT COMPBIAS 880 883 Poly-Arg.
FT LIPID 904 904 GPI-anchor amidated aspartate
FT (Potential).
SQ SEQUENCE 924 AA; 103626 MW; 289B4B554A61870E CRC64;
MKFLILLFNI LCLFPVLAAD NHGVGPQGAS GVDPITFDIN SNQTGPAFLT AVEMAGVKYL
QVQHGSNVNI HRLVEGNVVI WENASTPLYT GAIVTNNDGP YMAYVEVLGD PNLQFFIKSG
DAWVTLSEHE YLAKLQEIRQ AVHIESVFSL NMAFQLENNK YEVETHAKNG ANMVTFIPRN
GHICKMVYHK NVRIYKATGN DTVTSVVGFF RGLRLLLINV FSIDDNGMMS NRYFQHVDDK
YVPISQKNYE TGIVKLKDYK HAYHPVDLDI KDIDYTMFHL ADATYHEPCF KIIPNTGFCI
TKLFDGDQVL YESFNPLIHC INEVHIYDRN NGSIICLHLN YSPPSYKAYL VLKDTGWEAT
THPLLEEKIE ELQDQRACEL DVNFISDKDL YVAALTNADL NYTMVTPRPH RDVIRVSDGS
EVLWYYEGLD NFLVCAWIYV SDGVASLVHL RIKDRIPANN DIYVLKGDLY WTRITKIQFT
QEIKRLVKKS KKKLAPITEE DSDKHDEPPE GPGASGLPPK APGDKEGSEG HKGPSKGSDS
SKEGKKPGSG KKPGPAREHK PSKIPTLSKK PSGPKDPKHP RDPKEPRKSK SPRTASPTRR
PSPKLPQLSK LPKSTSPRSP PPPTRPSSPE RPEGTKIIKT SKPPSPKPPF DPSFKEKFYD
DYSKAASRSK ETKTTVVLDE SFESILKETL PETPGTPFTT PRPVPPKRPR TPESPFEPPK
DPDSPSTSPS EFFTPPESKR TRFHETPADT PLPDVTAELF KEPDVTAETK SPDEAMKRPR
SPSEYEDTSP GDYPSLPMKR HRLERLRLTT TEMETDPGRM AKDASGKPVK LKRSKSFDDL
TTVELAPEPK ASRIVVDDEG TEADDEETHP PEERQKTEVR RRRPPKKPSK SPRPSKPKKP
KKPDSAYIPS ILAILVVSLI VGIL
//
ID 108_SOLLC Reviewed; 102 AA.
AC Q43495;
DT 15-JUL-1999, integrated into UniProtKB/Swiss-Prot.
DT 01-NOV-1996, sequence version 1.
DT 31-OCT-2006, entry version 37.
DE Protein 108 precursor.
OS Solanum lycopersicum (Tomato) (Lycopersicon esculentum).
OC Eukaryota; Viridiplantae; Streptophyta; Embryophyta; Tracheophyta;
OC Spermatophyta; Magnoliophyta; eudicotyledons; core eudicotyledons;
OC asterids; lamiids; Solanales; Solanaceae; Solanum; Lycopersicon.
OX NCBI_TaxID=4081;
RN [1]
RP NUCLEOTIDE SEQUENCE [MRNA].
RC STRAIN=cv. VF36; TISSUE=Anther;
RX MEDLINE=94143497; PubMed=8310077; DOI=10.1104/pp.101.4.1413;
RA Chen R., Smith A.G.;
RT "Nucleotide sequence of a stamen- and tapetum-specific gene from
RT Lycopersicon esculentum.";
RL Plant Physiol. 101:1413-1413(1993).
CC -!- TISSUE SPECIFICITY: Stamen- and tapetum-specific.
CC -!- SIMILARITY: Belongs to the A9/FIL1 family.
DR EMBL; Z14088; CAA78466.1; -; mRNA.
DR PIR; S26409; S26409.
DR InterPro; IPR013770; LPT_helical.
DR InterPro; IPR003612; LTP/seed_store/tryp_amyl_inhib.
DR Pfam; PF00234; Tryp_alpha_amyl; 1.
DR SMART; SM00499; AAI; 1.
KW Signal.
FT SIGNAL 1 30 Potential.
FT CHAIN 31 102 Protein 108.
FT /FTId=PRO_0000000238.
FT DISULFID 41 77 By similarity.
FT DISULFID 51 66 By similarity.
FT DISULFID 67 92 By similarity.
FT DISULFID 79 99 By similarity.
SQ SEQUENCE 102 AA; 10576 MW; CFBAA1231C3A5E92 CRC64;
MASVKSSSSS SSSSFISLLL LILLVIVLQS QVIECQPQQS CTASLTGLNV CAPFLVPGSP
TASTECCNAV QSINHDCMCN TMRIAAQIPA QCNLPPLSCS AN
//
"""
print "#########################################################"
print "# Sequence Input Tests #"
print "#########################################################"
#ToDo - Check alphabet, or at least DNA/amino acid, for those
# filetype that specify it (e.g. Nexus, GenBank)
tests = [
(aln_example, "clustal", 8, "HISJ_E_COLI",
"MKKLVLSLSLVLAFSSATAAF-------------------AAIPQNIRIG" + \
"TDPTYAPFESKNS-QGELVGFDIDLAKELCKRINTQCTFVENPLDALIPS" + \
"LKAKKIDAIMSSLSITEKRQQEIAFTDKLYAADSRLVVAKNSDIQP-TVE" + \
"SLKGKRVGVLQGTTQETFGNEHWAPKGIEIVSYQGQDNIYSDLTAGRIDA" + \
"AFQDEVAASEGFLKQPVGKDYKFGGPSVKDEKLFGVGTGMGLRKED--NE" + \
"LREALNKAFAEMRADGTYEKLAKKYFDFDVYGG---", True),
(phy_example, "phylip", 8, "HISJ_E_COL", None, False),
(nxs_example, "nexus", 8, "HISJ_E_COLI", None, True),
(nxs_example2, "nexus", 10, "Frog",
"ATGGCACACCCATCACAATTAGGTTTTCAAGACGCAGCCTCTCCAATTATAGAAGAATTA" + \
"CTTCACTTCCACGACCATACCCTCATAGCCGTTTTTCTTATTAGTACGCTAGTTCTTTAC" + \
"ATTATTACTATTATAATAACTACTAAACTAACTAATACAAACCTAATGGACGCACAAGAG" + \
"ATCGAAATAGTGTGAACTATTATACCAGCTATTAGCCTCATCATAATTGCCCTTCCATCC" + \
"CTTCGTATCCTATATTTAATAGATGAAGTTAATGATCCACACTTAACAATTAAAGCAATC" + \
"GGCCACCAATGATACTGAAGCTACGAATATACTAACTATGAGGATCTCTCATTTGACTCT" + \
"TATATAATTCCAACTAATGACCTTACCCCTGGACAATTCCGGCTGCTAGAAGTTGATAAT" + \
"CGAATAGTAGTCCCAATAGAATCTCCAACCCGACTTTTAGTTACAGCCGAAGACGTCCTC" + \
"CACTCGTGAGCTGTACCCTCCTTGGGTGTCAAAACAGATGCAATCCCAGGACGACTTCAT" + \
"CAAACATCATTTATTGCTACTCGTCCGGGAGTATTTTACGGACAATGTTCAGAAATTTGC" + \
"GGAGCAAACCACAGCTTTATACCAATTGTAGTTGAAGCAGTACCGCTAACCGACTTTGAA" + \
"AACTGATCTTCATCAATACTA---GAAGCATCACTA------AGA", True),
(nxs_example3, "nexus", 10, "Frog",
'MAHPSQLGFQDAASPIMEELLHFHDHTLMAVFLISTLVLYIITIMMTTKLTNTNLMDAQE' + \
'IEMVWTIMPAISLIMIALPSLRILYLMDEVNDPHLTIKAIGHQWYWSYEYTNYEDLSFDS' + \
'YMIPTNDLTPGQFRLLEVDNRMVVPMESPTRLLVTAEDVLHSWAVPSLGVKTDAIPGRLH' + \
'QTSFIATRPGVFYGQCSEICGANHSFMPIVVEAVPLTDFENWSSSML-EASL--', True),
(faa_example, "fasta", 8, "HISJ_E_COLI",
'mkklvlslslvlafssataafaaipqnirigtdptyapfesknsqgelvgfdidlakelc' + \
'krintqctfvenpldalipslkakkidaimsslsitekrqqeiaftdklyaadsrlvvak' + \
'nsdiqptveslkgkrvgvlqgttqetfgnehwapkgieivsyqgqdniysdltagridaa' + \
'fqdevaasegflkqpvgkdykfggpsvkdeklfgvgtgmglrkednelrealnkafaemr' + \
'adgtyeklakkyfdfdvygg', True),
(sth_example, "stockholm", 5, "O31699/88-139",
'EVMLTDIPRLHINDPIMK--GFGMVINN------GFVCVENDE', True),
(sth_example2, "stockholm", 2, "AE007476.1",
'AAAAUUGAAUAUCGUUUUACUUGUUUAU-GUCGUGAAU-UGG-CACGA-CGU' + \
'UUCUACAAGGUG-CCGG-AA-CACCUAACAAUAAGUAAGUCAGCAGUGAGAU', True),
(gbk_example, "genbank", 1, "U49845.1", None, True),
(gbk_example2,"genbank", 1, 'AAD51968.1',
"MESTLGSDLARLVRVWRALIDHRLKPLELTQTHWVTLHNINRLPPEQSQIQLAKAIGIEQ" + \
"PSLVRTLDQLEEKGLITRHTCANDRRAKRIKLTEQSSPIIEQVDGVICSTRKEILGGISP" + \
"DEIELLSGLIDKLERNIIQLQSK", True),
(gbk_example, "genbank-cds", 3, "AAA98667.1",
'MNRWVEKWLRVYLKCYINLILFYRNVYPPQSFDYTTYQSFNLPQFVPINRHPALIDYIEE' + \
'LILDVLSKLTHVYRFSICIINKKNDLCIEKYVLDFSELQHVDKDDQIITETEVFDEFRSS' + \
'LNSLIMHLEKLPKVNDDTITFEAVINAIELELGHKLDRNRRVDSLEEKAEIERDSNWVKC' + \
'QEDENLPDNNGFQPPKIKLTSLVGSDVGPLIIHQFSEKLISGDDKILNGVYSQYEEGESI' + \
'FGSLF', True),
(swiss_example,"swiss", 3, "Q43495",
"MASVKSSSSSSSSSFISLLLLILLVIVLQSQVIECQPQQSCTASLTGLNVCAPFLVPGSP" + \
"TASTECCNAVQSINHDCMCNTMRIAAQIPAQCNLPPLSCSAN", True),
]
for (data, format, rec_count, last_id, last_seq, dict_check) in tests:
print "%s file with %i records" % (format, rec_count)
print "Bio.SeqIO.parse(handle)"
#Basic check, turning the iterator into a list...
#This uses "for x in iterator" interally.
iterator = parse(StringIO(data), format=format)
as_list = list(iterator)
assert len(as_list) == rec_count, \
"Expected %i records, found %i" \
% (rec_count, len(as_list))
assert as_list[-1].id == last_id, \
"Expected '%s' as last record ID, found '%s'" \
% (last_id, as_list[-1].id)
if last_seq :
assert as_list[-1].seq.tostring() == last_seq
#Test iteration including use of the next() method and "for x in iterator"
iterator = parse(StringIO(data), format=format)
count = 1
record = iterator.next()
assert record is not None
assert str(record.__class__) == "Bio.SeqRecord.SeqRecord"
#print record
for record in iterator :
assert record.id == as_list[count].id
assert record.seq.tostring() == as_list[count].seq.tostring()
count = count + 1
assert count == rec_count
assert record is not None
assert record.id == last_id
#Test iteration using just next() method
iterator = parse(StringIO(data), format=format)
count = 0
while True :
try :
record = iterator.next()
except StopIteration :
break
if record is None : break
assert record.id == as_list[count].id
assert record.seq.tostring() == as_list[count].seq.tostring()
count=count+1
assert count == rec_count
print "parse(...)"
iterator = parse(StringIO(data), format=format)
for (i, record) in enumerate(iterator) :
assert record.id == as_list[i].id
assert record.seq.tostring() == as_list[i].seq.tostring()
assert i+1 == rec_count
print "parse(handle to empty file)"
iterator = parse(StringIO(""), format=format)
assert len(list(iterator))==0
if dict_check :
print "to_dict(parse(...))"
seq_dict = to_dict(parse(StringIO(data), format=format))
assert Set(seq_dict.keys()) == Set([r.id for r in as_list])
assert last_id in seq_dict
assert seq_dict[last_id].seq.tostring() == as_list[-1].seq.tostring()
if len(Set([len(r.seq) for r in as_list]))==1 :
#All the sequences in the example are the same length,
#so it make sense to try turning this file into an alignment.
print "to_alignment(parse(handle))"
alignment = to_alignment(parse(handle = StringIO(data), format=format))
assert len(alignment._records)==rec_count
assert alignment.get_alignment_length() == len(as_list[0].seq)
for i in range(0, rec_count) :
assert as_list[i].id == alignment._records[i].id
assert as_list[i].id == alignment.get_all_seqs()[i].id
assert as_list[i].seq.tostring() == alignment._records[i].seq.tostring()
assert as_list[i].seq.tostring() == alignment.get_all_seqs()[i].seq.tostring()
print "read(...)"
if rec_count == 1 :
record = read(StringIO(data), format)
assert isinstance(record, SeqRecord)
else :
try :
record = read(StringIO(data), format)
assert False, "Should have failed"
except ValueError :
#Expected to fail
pass
print
print "Checking phy <-> aln examples agree using list(parse(...))"
#Only compare the first 10 characters of the record.id as they
#are truncated in the phylip file. Cannot use to_dict(parse(...))
#on the phylip file as there is a repeared id.
aln_list = list(parse(StringIO(aln_example), format="clustal"))
phy_list = list(parse(StringIO(phy_example), format="phylip"))
assert len(aln_list) == len(phy_list)
assert Set([r.id[0:10] for r in aln_list]) == Set([r.id for r in phy_list])
for i in range(0, len(aln_list)) :
assert aln_list[i].id[0:10] == phy_list[i].id
assert aln_list[i].seq.tostring() == phy_list[i].seq.tostring()
print "Checking nxs <-> aln examples agree using parse"
#Only compare the first 10 characters of the record.id as they
#are truncated in the phylip file. Cannot use to_dict(parse(...))
#on the phylip file as there is a repeared id.
aln_iter = parse(StringIO(aln_example), format="clustal")
nxs_iter = parse(StringIO(nxs_example), format="nexus")
while True :
try :
aln_record = aln_iter.next()
except StopIteration :
aln_record = None
try :
nxs_record = nxs_iter.next()
except StopIteration :
nxs_record = None
if aln_record is None or nxs_record is None :
assert aln_record is None
assert nxs_record is None
break
assert aln_record.id == nxs_record.id
assert aln_record.seq.tostring() == nxs_record.seq.tostring()
print "Checking faa <-> aln examples agree using to_dict(parse(...)"
#In my examples, aln_example is an alignment of faa_example
aln_dict = to_dict(parse(StringIO(aln_example), format="clustal"))
faa_dict = to_dict(parse(StringIO(faa_example), format="fasta"))
ids = Set(aln_dict.keys())
assert ids == Set(faa_dict.keys())
for id in ids :
#The aln file contains gaps as "-", and this fasta file does not
assert aln_dict[id].seq.tostring().upper().replace("-","") == \
faa_dict[id].seq.tostring().upper()
print
print "#########################################################"
print "# Sequence Output Tests #"
print "#########################################################"
print
general_output_formats = ["fasta"]
alignment_formats = ["phylip","stockholm","clustal"]
for (in_data, in_format, rec_count, last_id, last_seq, unique_ids) in tests:
if unique_ids :
in_list = list(parse(StringIO(in_data), format=in_format))
seq_lengths = [len(r.seq) for r in in_list]
output_formats = general_output_formats[:]
if min(seq_lengths)==max(seq_lengths) :
output_formats.extend(alignment_formats)
print "Checking conversion from %s (including to alignment formats)" % in_format
else :
print "Checking conversion from %s (excluding alignment formats)" % in_format
for out_format in output_formats :
print "Converting %s iterator -> %s" % (in_format, out_format)
output = open("temp.txt","w")
iterator = parse(StringIO(in_data), format=in_format)
#I am using an iterator here deliberately, as some format
#writers (e.g. phylip and stockholm) will have to cope with
#this and get the record count.
try :
write(iterator, output, out_format)
except ValueError, e:
print "FAILED: %s" % str(e)
#Try next format instead...
continue
output.close()
print "Checking %s <-> %s" % (in_format, out_format)
out_list = list(parse(open("temp.txt","rU"), format=out_format))
assert rec_count == len(out_list)
if last_seq :
assert last_seq == out_list[-1].seq.tostring()
if out_format=="phylip" :
assert last_id[0:10] == out_list[-1].id
else :
assert last_id == out_list[-1].id
for i in range(0, rec_count) :
assert in_list[-1].seq.tostring() == out_list[-1].seq.tostring()
if out_format=="phylip" :
assert in_list[i].id[0:10] == out_list[i].id
else :
assert in_list[i].id == out_list[i].id
print
print "#########################################################"
print "# SeqIO Tests finished #"
print "#########################################################"
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/SeqIO/__init__.py
|
Python
|
apache-2.0
| 76,925
|
[
"BioPerl",
"Biopython"
] |
bc2f02b5c80fc29a59afaaf537b7c096fddc599c3e06f39c94d952d28fa8904a
|
#!/usr/bin/env python
"""
Complete pipeline for motion correction, source extraction, and deconvolution
of one photon microendoscopic calcium imaging data using the CaImAn package.
The demo demonstrates how to use the params, MotionCorrect and cnmf objects
for processing 1p microendoscopic data. The analysis pipeline is similar as in
the case of 2p data processing with core difference being the usage of the
CNMF-E algorithm for source extraction (as opposed to plain CNMF). Check
the companion paper for more details.
You can also run a large part of the pipeline with a single method
(cnmf.fit_file) See inside for details.
Demo is also available as a jupyter notebook (see demo_pipeline_cnmfE.ipynb)
"""
import logging
import matplotlib.pyplot as plt
import numpy as np
try:
if __IPYTHON__:
# this is used for debugging purposes only. allows to reload classes when changed
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
except NameError:
pass
import caiman as cm
from caiman.source_extraction import cnmf
from caiman.utils.utils import download_demo
from caiman.utils.visualization import inspect_correlation_pnr
from caiman.motion_correction import MotionCorrect
from caiman.source_extraction.cnmf import params as params
#%%
# Set up the logger; change this if you like.
# You can log to a file using the filename parameter, or make the output more or less
# verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR
logging.basicConfig(format=
"%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s]"\
"[%(process)d] %(message)s",
level=logging.WARNING)
# filename="/tmp/caiman.log"
#%%
def main():
pass # For compatibility between running under Spyder and the CLI
# %% start the cluster
try:
cm.stop_server() # stop it if it was running
except():
pass
c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
n_processes=24, # number of process to use, if you go out of memory try to reduce this one
single_thread=False)
# %% First setup some parameters for motion correction
# dataset dependent parameters
fnames = ['data_endoscope.tif'] # filename to be processed
fnames = [download_demo(fnames[0])] # download file if not already present
filename_reorder = fnames
fr = 10 # movie frame rate
decay_time = 0.4 # length of a typical transient in seconds
# motion correction parameters
motion_correct = True # flag for motion correction
pw_rigid = False # flag for pw-rigid motion correction
gSig_filt = (3, 3) # size of filter, in general gSig (see below),
# change this one if algorithm does not work
max_shifts = (5, 5) # maximum allowed rigid shift
strides = (48, 48) # start a new patch for pw-rigid motion correction every x pixels
overlaps = (24, 24) # overlap between pathes (size of patch strides+overlaps)
# maximum deviation allowed for patch with respect to rigid shifts
max_deviation_rigid = 3
border_nan = 'copy'
mc_dict = {
'fnames': fnames,
'fr': fr,
'decay_time': decay_time,
'pw_rigid': pw_rigid,
'max_shifts': max_shifts,
'gSig_filt': gSig_filt,
'strides': strides,
'overlaps': overlaps,
'max_deviation_rigid': max_deviation_rigid,
'border_nan': border_nan
}
opts = params.CNMFParams(params_dict=mc_dict)
# %% MOTION CORRECTION
# The pw_rigid flag set above, determines where to use rigid or pw-rigid
# motion correction
if motion_correct:
# do motion correction rigid
mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion'))
mc.motion_correct(save_movie=True)
fname_mc = mc.fname_tot_els if pw_rigid else mc.fname_tot_rig
if pw_rigid:
bord_px = np.ceil(np.maximum(np.max(np.abs(mc.x_shifts_els)),
np.max(np.abs(mc.y_shifts_els)))).astype(np.int)
else:
bord_px = np.ceil(np.max(np.abs(mc.shifts_rig))).astype(np.int)
plt.subplot(1, 2, 1); plt.imshow(mc.total_template_rig) # % plot template
plt.subplot(1, 2, 2); plt.plot(mc.shifts_rig) # % plot rigid shifts
plt.legend(['x shifts', 'y shifts'])
plt.xlabel('frames')
plt.ylabel('pixels')
bord_px = 0 if border_nan is 'copy' else bord_px
fname_new = cm.save_memmap(fname_mc, base_name='memmap_', order='C',
border_to_0=bord_px)
else: # if no motion correction just memory map the file
fname_new = cm.save_memmap(filename_reorder, base_name='memmap_',
order='C', border_to_0=0, dview=dview)
# load memory mappable file
Yr, dims, T = cm.load_memmap(fname_new)
images = Yr.T.reshape((T,) + dims, order='F')
# %% Parameters for source extraction and deconvolution (CNMF-E algorithm)
p = 1 # order of the autoregressive system
K = None # upper bound on number of components per patch, in general None for 1p data
gSig = (3, 3) # gaussian width of a 2D gaussian kernel, which approximates a neuron
gSiz = (13, 13) # average diameter of a neuron, in general 4*gSig+1
Ain = None # possibility to seed with predetermined binary masks
merge_thr = .7 # merging threshold, max correlation allowed
rf = 40 # half-size of the patches in pixels. e.g., if rf=40, patches are 80x80
stride_cnmf = 20 # amount of overlap between the patches in pixels
# (keep it at least large as gSiz, i.e 4 times the neuron size gSig)
tsub = 2 # downsampling factor in time for initialization,
# increase if you have memory problems
ssub = 1 # downsampling factor in space for initialization,
# increase if you have memory problems
# you can pass them here as boolean vectors
low_rank_background = None # None leaves background of each patch intact,
# True performs global low-rank approximation if gnb>0
gnb = 0 # number of background components (rank) if positive,
# else exact ring model with following settings
# gnb= 0: Return background as b and W
# gnb=-1: Return full rank background B
# gnb<-1: Don't return background
nb_patch = 0 # number of background components (rank) per patch if gnb>0,
# else it is set automatically
min_corr = .8 # min peak value from correlation image
min_pnr = 10 # min peak to noise ration from PNR image
ssub_B = 2 # additional downsampling factor in space for background
ring_size_factor = 1.4 # radius of ring is gSiz*ring_size_factor
opts.change_params(params_dict={'dims': dims,
'method_init': 'corr_pnr', # use this for 1 photon
'K': K,
'gSig': gSig,
'gSiz': gSiz,
'merge_thr': merge_thr,
'p': p,
'tsub': tsub,
'ssub': ssub,
'rf': rf,
'stride': stride_cnmf,
'only_init': True, # set it to True to run CNMF-E
'nb': gnb,
'nb_patch': nb_patch,
'method_deconvolution': 'oasis', # could use 'cvxpy' alternatively
'low_rank_background': low_rank_background,
'update_background_components': True, # sometimes setting to False improve the results
'min_corr': min_corr,
'min_pnr': min_pnr,
'normalize_init': False, # just leave as is
'center_psf': True, # leave as is for 1 photon
'ssub_B': ssub_B,
'ring_size_factor': ring_size_factor,
'del_duplicates': True, # whether to remove duplicates from initialization
'border_pix': bord_px}) # number of pixels to not consider in the borders)
# %% compute some summary images (correlation and peak to noise)
# change swap dim if output looks weird, it is a problem with tiffile
cn_filter, pnr = cm.summary_images.correlation_pnr(images[::1], gSig=gSig[0], swap_dim=False)
# if your images file is too long this computation will take unnecessarily
# long time and consume a lot of memory. Consider changing images[::1] to
# images[::5] or something similar to compute on a subset of the data
# inspect the summary images and set the parameters
inspect_correlation_pnr(cn_filter, pnr)
# print parameters set above, modify them if necessary based on summary images
print(min_corr) # min correlation of peak (from correlation image)
print(min_pnr) # min peak to noise ratio
# %% RUN CNMF ON PATCHES
cnm = cnmf.CNMF(n_processes=n_processes, dview=dview, Ain=Ain, params=opts)
cnm.fit(images)
# %% ALTERNATE WAY TO RUN THE PIPELINE AT ONCE
# you can also perform the motion correction plus cnmf fitting steps
# simultaneously after defining your parameters object using
# cnm1 = cnmf.CNMF(n_processes, params=opts, dview=dview)
# cnm1.fit_file(motion_correct=True)
# %% DISCARD LOW QUALITY COMPONENTS
min_SNR = 2.5 # adaptive way to set threshold on the transient size
r_values_min = 0.85 # threshold on space consistency (if you lower more components
# will be accepted, potentially with worst quality)
cnm.params.set('quality', {'min_SNR': min_SNR,
'rval_thr': r_values_min,
'use_cnn': False})
cnm.estimates.evaluate_components(images, cnm.params, dview=dview)
print(' ***** ')
print('Number of total components: ', len(cnm.estimates.C))
print('Number of accepted components: ', len(cnm.estimates.idx_components))
# %% PLOT COMPONENTS
cnm.dims = dims
display_images = True # Set to true to show movies and images
if display_images:
cnm.estimates.plot_contours(img=cn_filter, idx=cnm.estimates.idx_components)
cnm.estimates.view_components(images, idx=cnm.estimates.idx_components)
# %% MOVIES
display_images = False # Set to true to show movies and images
if display_images:
# fully reconstructed movie
cnm.estimates.play_movie(images, q_max=99.5, magnification=2,
include_bck=True, gain_res=10, bpx=bord_px)
# movie without background
cnm.estimates.play_movie(images, q_max=99.9, magnification=2,
include_bck=False, gain_res=4, bpx=bord_px)
# %% STOP SERVER
cm.stop_server(dview=dview)
# %% This is to mask the differences between running this demo in Spyder
# versus from the CLI
if __name__ == "__main__":
main()
|
agiovann/Constrained_NMF
|
demos/general/demo_pipeline_cnmfE.py
|
Python
|
gpl-2.0
| 11,895
|
[
"Gaussian",
"NEURON"
] |
d46aea15093d7fc9d6adab57c5b8f9c6eda9db2f513c25089e925a29f3b7733a
|
#!/usr/bin/env python
# generate Python Manifest for the OpenEmbedded build system
# (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# (C) 2007 Jeremy Laine
# licensed under MIT, see COPYING.MIT
#
# June 22, 2011 -- Mark Hatle <mark.hatle@windriver.com>
# * Updated to no longer generate special -dbg package, instead use the
# single system -dbg
# * Update version with ".1" to indicate this change
import os
import sys
import time
VERSION = "2.7.2"
__author__ = "Michael 'Mickey' Lauer <mlauer@vanille-media.de>"
__version__ = "20110222.2"
class MakefileMaker:
def __init__( self, outfile ):
"""initialize"""
self.packages = {}
self.targetPrefix = "${libdir}/python%s/" % VERSION[:3]
self.output = outfile
self.out( """
# WARNING: This file is AUTO GENERATED: Manual edits will be lost next time I regenerate the file.
# Generator: '%s' Version %s (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# Visit the Python for Embedded Systems Site => http://www.Vanille.de/projects/python.spy
""" % ( sys.argv[0], __version__ ) )
#
# helper functions
#
def out( self, data ):
"""print a line to the output file"""
self.output.write( "%s\n" % data )
def setPrefix( self, targetPrefix ):
"""set a file prefix for addPackage files"""
self.targetPrefix = targetPrefix
def doProlog( self ):
self.out( """ """ )
self.out( "" )
def addPackage( self, name, description, dependencies, filenames ):
"""add a package to the Makefile"""
if type( filenames ) == type( "" ):
filenames = filenames.split()
fullFilenames = []
for filename in filenames:
if filename[0] != "$":
fullFilenames.append( "%s%s" % ( self.targetPrefix, filename ) )
else:
fullFilenames.append( filename )
self.packages[name] = description, dependencies, fullFilenames
def doBody( self ):
"""generate body of Makefile"""
global VERSION
#
# generate provides line
#
provideLine = 'PROVIDES+="'
for name in sorted(self.packages):
provideLine += "%s " % name
provideLine += '"'
self.out( provideLine )
self.out( "" )
#
# generate package line
#
packageLine = 'PACKAGES="${PN}-dbg '
for name in sorted(self.packages):
if name.startswith("${PN}-distutils"):
if name == "${PN}-distutils":
packageLine += "%s-staticdev %s " % (name, name)
elif name != '${PN}-dbg':
packageLine += "%s " % name
packageLine += '${PN}-modules"'
self.out( packageLine )
self.out( "" )
#
# generate package variables
#
for name, data in sorted(self.packages.iteritems()):
desc, deps, files = data
#
# write out the description, revision and dependencies
#
self.out( 'DESCRIPTION_%s="%s"' % ( name, desc ) )
self.out( 'RDEPENDS_%s="%s"' % ( name, deps ) )
line = 'FILES_%s="' % name
#
# check which directories to make in the temporary directory
#
dirset = {} # if python had a set-datatype this would be sufficient. for now, we're using a dict instead.
for target in files:
dirset[os.path.dirname( target )] = True
#
# generate which files to copy for the target (-dfR because whole directories are also allowed)
#
for target in files:
line += "%s " % target
line += '"'
self.out( line )
self.out( "" )
self.out( 'DESCRIPTION_${PN}-modules="All Python modules"' )
line = 'RDEPENDS_${PN}-modules="'
for name, data in sorted(self.packages.iteritems()):
if name not in ['${PN}-dev', '${PN}-distutils-staticdev']:
line += "%s " % name
self.out( "%s \"" % line )
self.out( 'ALLOW_EMPTY_${PN}-modules = "1"' )
def doEpilog( self ):
self.out( """""" )
self.out( "" )
def make( self ):
self.doProlog()
self.doBody()
self.doEpilog()
if __name__ == "__main__":
if len( sys.argv ) > 1:
try:
os.unlink(sys.argv[1])
except Exception:
sys.exc_clear()
outfile = file( sys.argv[1], "w" )
else:
outfile = sys.stdout
m = MakefileMaker( outfile )
# Add packages here. Only specify dlopen-style library dependencies here, no ldd-style dependencies!
# Parameters: revision, name, description, dependencies, filenames
#
m.addPackage( "${PN}-core", "Python Interpreter and core modules (needed!)", "${PN}-lang ${PN}-re",
"__future__.* _abcoll.* abc.* copy.* copy_reg.* ConfigParser.* " +
"genericpath.* getopt.* linecache.* new.* " +
"os.* posixpath.* struct.* " +
"warnings.* site.* stat.* " +
"UserDict.* UserList.* UserString.* " +
"lib-dynload/binascii.so lib-dynload/_struct.so lib-dynload/time.so " +
"lib-dynload/xreadlines.so types.* platform.* ${bindir}/python* " +
"_weakrefset.* sysconfig.* config/Makefile " +
"${includedir}/python${PYTHON_MAJMIN}/pyconfig*.h " +
"${libdir}/python${PYTHON_MAJMIN}/sitecustomize.py ")
m.addPackage( "${PN}-dev", "Python Development Package", "${PN}-core",
"${includedir} " +
"${libdir}/lib*${SOLIBSDEV} " +
"${libdir}/*.la " +
"${libdir}/*.a " +
"${libdir}/*.o " +
"${libdir}/pkgconfig " +
"${base_libdir}/*.a " +
"${base_libdir}/*.o " +
"${datadir}/aclocal " +
"${datadir}/pkgconfig " )
m.addPackage( "${PN}-2to3", "Python Automated Python 2 to 3 code translation", "${PN}-core",
"${bindir}/2to3 lib2to3" ) # package
m.addPackage( "${PN}-idle", "Python Integrated Development Environment", "${PN}-core ${PN}-tkinter",
"${bindir}/idle idlelib" ) # package
m.addPackage( "${PN}-pydoc", "Python Interactive Help Support", "${PN}-core ${PN}-lang ${PN}-stringold ${PN}-re",
"${bindir}/pydoc pydoc.* pydoc_data" )
m.addPackage( "${PN}-smtpd", "Python Simple Mail Transport Daemon", "${PN}-core ${PN}-netserver ${PN}-email ${PN}-mime",
"${bindir}/smtpd.* smtpd.*" )
m.addPackage( "${PN}-audio", "Python Audio Handling", "${PN}-core",
"wave.* chunk.* sndhdr.* lib-dynload/ossaudiodev.so lib-dynload/audioop.so audiodev.* sunaudio.* sunau.* toaiff.*" )
m.addPackage( "${PN}-bsddb", "Python Berkeley Database Bindings", "${PN}-core",
"bsddb lib-dynload/_bsddb.so" ) # package
m.addPackage( "${PN}-codecs", "Python Codecs, Encodings & i18n Support", "${PN}-core ${PN}-lang",
"codecs.* encodings gettext.* locale.* lib-dynload/_locale.so lib-dynload/_codecs* lib-dynload/_multibytecodec.so lib-dynload/unicodedata.so stringprep.* xdrlib.*" )
m.addPackage( "${PN}-compile", "Python Bytecode Compilation Support", "${PN}-core",
"py_compile.* compileall.*" )
m.addPackage( "${PN}-compiler", "Python Compiler Support", "${PN}-core",
"compiler" ) # package
m.addPackage( "${PN}-compression", "Python High Level Compression Support", "${PN}-core ${PN}-zlib",
"gzip.* zipfile.* tarfile.* lib-dynload/bz2.so" )
m.addPackage( "${PN}-crypt", "Python Basic Cryptographic and Hashing Support", "${PN}-core",
"hashlib.* md5.* sha.* lib-dynload/crypt.so lib-dynload/_hashlib.so lib-dynload/_sha256.so lib-dynload/_sha512.so" )
m.addPackage( "${PN}-textutils", "Python Option Parsing, Text Wrapping and Comma-Separated-Value Support", "${PN}-core ${PN}-io ${PN}-re ${PN}-stringold",
"lib-dynload/_csv.so csv.* optparse.* textwrap.*" )
m.addPackage( "${PN}-curses", "Python Curses Support", "${PN}-core",
"curses lib-dynload/_curses.so lib-dynload/_curses_panel.so" ) # directory + low level module
m.addPackage( "${PN}-ctypes", "Python C Types Support", "${PN}-core",
"ctypes lib-dynload/_ctypes.so lib-dynload/_ctypes_test.so" ) # directory + low level module
m.addPackage( "${PN}-datetime", "Python Calendar and Time support", "${PN}-core ${PN}-codecs",
"_strptime.* calendar.* lib-dynload/datetime.so" )
m.addPackage( "${PN}-db", "Python File-Based Database Support", "${PN}-core",
"anydbm.* dumbdbm.* whichdb.* " )
m.addPackage( "${PN}-debugger", "Python Debugger", "${PN}-core ${PN}-io ${PN}-lang ${PN}-re ${PN}-stringold ${PN}-shell ${PN}-pprint",
"bdb.* pdb.*" )
m.addPackage( "${PN}-difflib", "Python helpers for computing deltas between objects.", "${PN}-lang ${PN}-re",
"difflib.*" )
m.addPackage( "${PN}-distutils-staticdev", "Python Distribution Utilities (Static Libraries)", "${PN}-distutils",
"config/lib*.a" ) # package
m.addPackage( "${PN}-distutils", "Python Distribution Utilities", "${PN}-core",
"config distutils" ) # package
m.addPackage( "${PN}-doctest", "Python framework for running examples in docstrings.", "${PN}-core ${PN}-lang ${PN}-io ${PN}-re ${PN}-unittest ${PN}-debugger ${PN}-difflib",
"doctest.*" )
# FIXME consider adding to some higher level package
m.addPackage( "${PN}-elementtree", "Python elementree", "${PN}-core",
"lib-dynload/_elementtree.so" )
m.addPackage( "${PN}-email", "Python Email Support", "${PN}-core ${PN}-io ${PN}-re ${PN}-mime ${PN}-audio ${PN}-image ${PN}-netclient",
"imaplib.* email" ) # package
m.addPackage( "${PN}-fcntl", "Python's fcntl Interface", "${PN}-core",
"lib-dynload/fcntl.so" )
m.addPackage( "${PN}-hotshot", "Python Hotshot Profiler", "${PN}-core",
"hotshot lib-dynload/_hotshot.so" )
m.addPackage( "${PN}-html", "Python HTML Processing", "${PN}-core",
"formatter.* htmlentitydefs.* htmllib.* markupbase.* sgmllib.* HTMLParser.* " )
m.addPackage( "${PN}-gdbm", "Python GNU Database Support", "${PN}-core",
"lib-dynload/gdbm.so" )
m.addPackage( "${PN}-image", "Python Graphical Image Handling", "${PN}-core",
"colorsys.* imghdr.* lib-dynload/imageop.so lib-dynload/rgbimg.so" )
m.addPackage( "${PN}-io", "Python Low-Level I/O", "${PN}-core ${PN}-math ${PN}-textutils",
"lib-dynload/_socket.so lib-dynload/_io.so lib-dynload/_ssl.so lib-dynload/select.so lib-dynload/termios.so lib-dynload/cStringIO.so " +
"pipes.* socket.* ssl.* tempfile.* StringIO.* io.* _pyio.*" )
m.addPackage( "${PN}-json", "Python JSON Support", "${PN}-core ${PN}-math ${PN}-re",
"json lib-dynload/_json.so" ) # package
m.addPackage( "${PN}-lang", "Python Low-Level Language Support", "${PN}-core",
"lib-dynload/_bisect.so lib-dynload/_collections.so lib-dynload/_heapq.so lib-dynload/_weakref.so lib-dynload/_functools.so " +
"lib-dynload/array.so lib-dynload/itertools.so lib-dynload/operator.so lib-dynload/parser.so " +
"atexit.* bisect.* code.* codeop.* collections.* dis.* functools.* heapq.* inspect.* keyword.* opcode.* symbol.* repr.* token.* " +
"tokenize.* traceback.* weakref.*" )
m.addPackage( "${PN}-logging", "Python Logging Support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-stringold",
"logging" ) # package
m.addPackage( "${PN}-mailbox", "Python Mailbox Format Support", "${PN}-core ${PN}-mime",
"mailbox.*" )
m.addPackage( "${PN}-math", "Python Math Support", "${PN}-core ${PN}-crypt",
"lib-dynload/cmath.so lib-dynload/math.so lib-dynload/_random.so random.* sets.*" )
m.addPackage( "${PN}-mime", "Python MIME Handling APIs", "${PN}-core ${PN}-io",
"mimetools.* uu.* quopri.* rfc822.* MimeWriter.*" )
m.addPackage( "${PN}-mmap", "Python Memory-Mapped-File Support", "${PN}-core ${PN}-io",
"lib-dynload/mmap.so " )
m.addPackage( "${PN}-multiprocessing", "Python Multiprocessing Support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-threading",
"lib-dynload/_multiprocessing.so multiprocessing" ) # package
m.addPackage( "${PN}-netclient", "Python Internet Protocol Clients", "${PN}-core ${PN}-crypt ${PN}-datetime ${PN}-io ${PN}-lang ${PN}-logging ${PN}-mime",
"*Cookie*.* " +
"base64.* cookielib.* ftplib.* gopherlib.* hmac.* httplib.* mimetypes.* nntplib.* poplib.* smtplib.* telnetlib.* urllib.* urllib2.* urlparse.* uuid.* rfc822.* mimetools.*" )
m.addPackage( "${PN}-netserver", "Python Internet Protocol Servers", "${PN}-core ${PN}-netclient",
"cgi.* *HTTPServer.* SocketServer.*" )
m.addPackage( "${PN}-numbers", "Python Number APIs", "${PN}-core ${PN}-lang ${PN}-re",
"decimal.* numbers.*" )
m.addPackage( "${PN}-pickle", "Python Persistence Support", "${PN}-core ${PN}-codecs ${PN}-io ${PN}-re",
"pickle.* shelve.* lib-dynload/cPickle.so pickletools.*" )
m.addPackage( "${PN}-pkgutil", "Python Package Extension Utility Support", "${PN}-core",
"pkgutil.*")
m.addPackage( "${PN}-pprint", "Python Pretty-Print Support", "${PN}-core ${PN}-io",
"pprint.*" )
m.addPackage( "${PN}-profile", "Python Basic Profiling Support", "${PN}-core ${PN}-textutils",
"profile.* pstats.* cProfile.* lib-dynload/_lsprof.so" )
m.addPackage( "${PN}-re", "Python Regular Expression APIs", "${PN}-core",
"re.* sre.* sre_compile.* sre_constants* sre_parse.*" ) # _sre is builtin
m.addPackage( "${PN}-readline", "Python Readline Support", "${PN}-core",
"lib-dynload/readline.so rlcompleter.*" )
m.addPackage( "${PN}-resource", "Python Resource Control Interface", "${PN}-core",
"lib-dynload/resource.so" )
m.addPackage( "${PN}-shell", "Python Shell-Like Functionality", "${PN}-core ${PN}-re",
"cmd.* commands.* dircache.* fnmatch.* glob.* popen2.* shlex.* shutil.*" )
m.addPackage( "${PN}-robotparser", "Python robots.txt parser", "${PN}-core ${PN}-netclient",
"robotparser.*")
m.addPackage( "${PN}-subprocess", "Python Subprocess Support", "${PN}-core ${PN}-io ${PN}-re ${PN}-fcntl ${PN}-pickle",
"subprocess.*" )
m.addPackage( "${PN}-sqlite3", "Python Sqlite3 Database Support", "${PN}-core ${PN}-datetime ${PN}-lang ${PN}-crypt ${PN}-io ${PN}-threading ${PN}-zlib",
"lib-dynload/_sqlite3.so sqlite3/dbapi2.* sqlite3/__init__.* sqlite3/dump.*" )
m.addPackage( "${PN}-sqlite3-tests", "Python Sqlite3 Database Support Tests", "${PN}-core ${PN}-sqlite3",
"sqlite3/test" )
m.addPackage( "${PN}-stringold", "Python String APIs [deprecated]", "${PN}-core ${PN}-re",
"lib-dynload/strop.so string.* stringold.*" )
m.addPackage( "${PN}-syslog", "Python Syslog Interface", "${PN}-core",
"lib-dynload/syslog.so" )
m.addPackage( "${PN}-terminal", "Python Terminal Controlling Support", "${PN}-core ${PN}-io",
"pty.* tty.*" )
m.addPackage( "${PN}-tests", "Python Tests", "${PN}-core",
"test" ) # package
m.addPackage( "${PN}-threading", "Python Threading & Synchronization Support", "${PN}-core ${PN}-lang",
"_threading_local.* dummy_thread.* dummy_threading.* mutex.* threading.* Queue.*" )
m.addPackage( "${PN}-tkinter", "Python Tcl/Tk Bindings", "${PN}-core",
"lib-dynload/_tkinter.so lib-tk" ) # package
m.addPackage( "${PN}-unittest", "Python Unit Testing Framework", "${PN}-core ${PN}-stringold ${PN}-lang",
"unittest/" )
m.addPackage( "${PN}-unixadmin", "Python Unix Administration Support", "${PN}-core",
"lib-dynload/nis.so lib-dynload/grp.so lib-dynload/pwd.so getpass.*" )
m.addPackage( "${PN}-xml", "Python basic XML support.", "${PN}-core ${PN}-elementtree ${PN}-re",
"lib-dynload/pyexpat.so xml xmllib.*" ) # package
m.addPackage( "${PN}-xmlrpc", "Python XMLRPC Support", "${PN}-core ${PN}-xml ${PN}-netserver ${PN}-lang",
"xmlrpclib.* SimpleXMLRPCServer.* DocXMLRPCServer.*" )
m.addPackage( "${PN}-zlib", "Python zlib Support.", "${PN}-core",
"lib-dynload/zlib.so" )
m.addPackage( "${PN}-mailbox", "Python Mailbox Format Support", "${PN}-core ${PN}-mime",
"mailbox.*" )
m.make()
|
PhiInnovations/mdp28-linux-bsp
|
scripts/contrib/python/generate-manifest-2.7.py
|
Python
|
mit
| 16,090
|
[
"VisIt"
] |
c756c984238e9e78b9b1846ce6657aed3d993f3443434e8441375fa9f4ef0563
|
#
# Copyright 2008, 2009 Brian R. D'Urso
#
# This file is part of Python Instrument Control System, also known as Pythics.
#
# Pythics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pythics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pythics. If not, see <http://www.gnu.org/licenses/>.
#
# Interface class for Keithley 2000 Multimeter
import visa
import pythics.libinstrument
#
# OLD KEITHLEY2000 DRIVER
#
class Keithley2000(visa.GpibInstrument):
def __init__(self, *args, **kwargs):
visa.GpibInstrument.__init__(self, *args, **kwargs)
def Configure(self, **settings):
self.scan = False
#self.write('*RST')
if settings['function'].lower() == 'voltage:dc':
self.write(":sense:function 'voltage:dc'")
# measurement range
if 'range' in settings:
v = settings['range']
if v == 'auto':
self.write(':sense:voltage:DC:range:auto on')
else:
self.write(':sense:voltage:DC:range:upper ' + str(v))
# measurement integration line cycles
if 'n_powerline_cycles' in settings:
v = settings['n_powerline_cycles']
# should be a float between 0.01 and 10
self.write(':sense:voltage:DC:nplcycles ' + str(v))
self.npl_cycles = v
else:
self.write(':sense:voltage:DC:nplcycles 1')
self.npl_cycles = 1.0
# measurement filter
if 'filter' in settings:
v = settings['filter']
if v == 'repeat':
self.write(':sense:voltage:DC:average:state 1')
self.write(':sense:voltage:DC:average:tcontrol repeat')
elif v == 'moving':
self.write(':sense:voltage:DC:average:state 1')
self.write(':sense:voltage:DC:average:tcontrol moving')
else:
# no filter
self.write(':sense:voltage:DC:average:state 0')
# filter time in seconds
if 'filter_time' in settings:
v_t = settings['filter_time']
# integration time is based on power line cycles (60 Hz)
v = 60.0*v_t/self.npl_cycles
self.write(':sense:voltage:DC:average:count ' + str(v))
elif settings['function'].lower() == 'temperature':
self.write(":sense:function 'temperature'")
self.write(":unit:temperature C")
# measurement integration line cycles
if 'n_powerline_cycles' in settings:
v = settings['n_powerline_cycles']
# should be a float between 0.01 and 10
self.write(':sense:temperature:nplcycles ' + str(v))
self.npl_cycles = v
else:
self.write(':sense:temperature:nplcycles 1')
self.npl_cycles = 1.0
# measurement filter
if 'filter' in settings:
v = settings['filter']
if v == 'repeat':
self.write(':sense:temperature:average:state 1')
self.write(':sense:temperature:average:tcontrol repeat')
elif v == 'moving':
self.write(':sense:temperature:average:state 1')
self.write(':sense:temperature:average:tcontrol moving')
else:
# no filter
self.write(':sense:temperature:average:state 0')
# filter time in seconds
if 'filter_time' in settings:
v_t = settings['filter_time']
# integration time is based on power line cycles (60 Hz)
v = 60.0*v_t/self.npl_cycles
self.write(':sense:temperature:average:count ' + str(v))
# thermocouple type: should be 'J', 'K', or 'T'
if 'thermocouple_type' in settings:
v = settings['thermocouple_type']
self.write(':sense:temperature:tcouple:type ' + str(v))
# SHOULD BE ABLE TO CONFIGURE THIS
self.write(':sense:temperature:tcouple:rjunction:rselect real')
# scanning: should be a tuple of consecutive channels e.g. (1, 2, 3)
if 'scan' in settings:
self.scan = True
v = settings['scan']
s = '(@' + str(v).strip('()[]') + ')'
self.write(':initiate:continuous off')
self.write(':trigger:count ' + str(len(v)))
self.write(':trigger:source timer')
self.write(':route:scan ' + s)
self.write(':trace:points ' + str(len(v)))
self.write(':trace:feed sense')
self.write(':trace:feed:control next')
# time spent at each channel (s)
if 'scan_step_time' in settings:
v = settings['scan_step_time']
self.write(':trigger:timer ' + str(v))
# start the scan
self.write(':route:scan:lselect internal')
self.write(':initiate')
def Read(self):
if self.scan == True:
v = self.ask(':trace:data?')
self.write(':trace:feed:control next')
self.write(':initiate')
v = v.split(',')
v = list(map(float, v))
return v
else:
v = self.ask('fetch?')
return float(v)
#
# Use a Keithley 2000 as a DC voltmeter
#
class Voltmeter(pythics.libinstrument.GPIBInstrument):
def __init__(self, *args, **kwargs):
pythics.libinstrument.GPIBInstrument.__init__(self, *args, **kwargs)
# DO WE NEED TO INITIALIZE SOME DEFAULT VALUES?
self.write(":sense:function 'voltage:dc'")
# range property: measurement range
def __get_range(self):
# NEEDS WORK
return None
def __set_range(self, value):
if value == 'auto':
self.write(':sense:voltage:DC:range:auto on')
else:
self.write(':sense:voltage:DC:range:upper ' + str(value))
range = property(__get_range, __set_range)
# powerline_cycles property: measurement integration line cycles
def __get_powerline_cycles(self):
# NEEDS WORK
return None
def __set_powerline_cycles(self, value):
# should be a float between 0.01 and 10
self.write(':sense:voltage:DC:nplcycles ' + str(value))
self._powerline_cycles = value
powerline_cycles = property(__get_powerline_cycles, __set_powerline_cycles)
# filter property: measurement filter
def __get_filter(self):
# NEEDS WORK
return None
def __set_filter(self, value):
if value == 'repeat':
self.write(':sense:voltage:DC:average:state 1')
self.write(':sense:voltage:DC:average:tcontrol repeat')
elif value == 'moving':
self.write(':sense:voltage:DC:average:state 1')
self.write(':sense:voltage:DC:average:tcontrol moving')
else:
# no filter
self.write(':sense:voltage:DC:average:state 0')
filter = property(__get_filter, __set_filter)
# filter_time property: filter time in seconds
def __get_filter_time(self):
# NEEDS WORK
return None
def __set_filter_time(self, value):
# integration time is based on power line cycles (60 Hz)
v = 60.0*value/self._powerline_cycles
self.write(':sense:voltage:DC:average:count ' + str(v))
filter_time = property(__get_filter_time, __set_filter_time)
# error property
def __get_error(self):
return self.ask('system:error?')
error = property(__get_error)
# voltage property
def __get_voltage(self):
v = self.ask('fetch?')
return float(v)
voltage = property(__get_voltage)
#
# Use a Keithley 2000 as a DC Ammeter
#
class Ammeter(pythics.libinstrument.GPIBInstrument):
def __init__(self, *args, **kwargs):
pythics.libinstrument.GPIBInstrument.__init__(self, *args, **kwargs)
# DO WE NEED TO INITIALIZE SOME DEFAULT VALUES?
self.write(":sense:function 'current:dc'")
# range property: measurement range
def __get_range(self):
# NEEDS WORK
return None
def __set_range(self, value):
if value == 'auto':
self.write(':sense:current:DC:range:auto on')
else:
self.write(':sense:current:DC:range:upper ' + str(value))
range = property(__get_range, __set_range)
# powerline_cycles property: measurement integration line cycles
def __get_powerline_cycles(self):
# NEEDS WORK
return None
def __set_powerline_cycles(self, value):
# should be a float between 0.01 and 10
self.write(':sense:current:DC:nplcycles ' + str(value))
self._powerline_cycles = value
powerline_cycles = property(__get_powerline_cycles, __set_powerline_cycles)
# filter property: measurement filter
def __get_filter(self):
# NEEDS WORK
return None
def __set_filter(self, value):
if value == 'repeat':
self.write(':sense:current:DC:average:state 1')
self.write(':sense:current:DC:average:tcontrol repeat')
elif value == 'moving':
self.write(':sense:current:DC:average:state 1')
self.write(':sense:current:DC:average:tcontrol moving')
else:
# no filter
self.write(':sense:current:DC:average:state 0')
filter = property(__get_filter, __set_filter)
# filter_time property: filter time in seconds
def __get_filter_time(self):
# NEEDS WORK
return None
def __set_filter_time(self, value):
# integration time is based on power line cycles (60 Hz)
v = 60.0*value/self._powerline_cycles
self.write(':sense:current:DC:average:count ' + str(v))
filter_time = property(__get_filter_time, __set_filter_time)
# error property
def __get_error(self):
return self.ask('system:error?')
error = property(__get_error)
# current property
def __get_current(self):
i = self.ask('fetch?')
return float(i)
current = property(__get_current)
#
# Use a Keithley 2000 as a 2 probe ohmmeter
#
class Ohmmeter(pythics.libinstrument.GPIBInstrument):
def __init__(self, *args, **kwargs):
pythics.libinstrument.GPIBInstrument.__init__(self, *args, **kwargs)
# DO WE NEED TO INITIALIZE SOME DEFAULT VALUES?
self.write(":sense:function 'resistance'")
# range property: measurement range
def __get_range(self):
# NEEDS WORK
return None
def __set_range(self, value):
if value == 'auto':
self.write(':sense:resistance:range:auto on')
else:
self.write(':sense:resistance:range:upper ' + str(value))
range = property(__get_range, __set_range)
# powerline_cycles property: measurement integration line cycles
def __get_powerline_cycles(self):
# NEEDS WORK
return None
def __set_powerline_cycles(self, value):
# should be a float between 0.01 and 10
self.write(':sense:resistance:nplcycles ' + str(value))
self._powerline_cycles = value
powerline_cycles = property(__get_powerline_cycles, __set_powerline_cycles)
# filter property: measurement filter
def __get_filter(self):
# NEEDS WORK
return None
def __set_filter(self, value):
if value == 'repeat':
self.write(':sense:resistance:average:state 1')
self.write(':sense:resistance:average:tcontrol repeat')
elif value == 'moving':
self.write(':sense:resistance:average:state 1')
self.write(':sense:resistance:average:tcontrol moving')
else:
# no filter
self.write(':sense:resistance:average:state 0')
filter = property(__get_filter, __set_filter)
# filter_time property: filter time in seconds
def __get_filter_time(self):
# NEEDS WORK
return None
def __set_filter_time(self, value):
# integration time is based on power line cycles (60 Hz)
v = 60.0*value/self._powerline_cycles
self.write(':sense:resistance:average:count ' + str(v))
filter_time = property(__get_filter_time, __set_filter_time)
# error property
def __get_error(self):
return self.ask('system:error?')
error = property(__get_error)
# resistance property
def __get_resistance(self):
r = self.ask('fetch?')
return float(r)
resistance = property(__get_resistance)
#
# Use a Keithley 2000 for scanning through thermocouples
#
class TemperatureScanner(pythics.libinstrument.GPIBInstrument):
def __init__(self, *args, **kwargs):
pythics.libinstrument.GPIBInstrument.__init__(self, *args, **kwargs)
# DO WE NEED TO INITIALIZE SOME DEFAULT VALUES?
self.write(":sense:function 'temperature'")
self.write(":unit:temperature C")
# measurement integration line cycles
v = settings['n_powerline_cycles']
# should be a float between 0.01 and 10
self.write(':sense:temperature:nplcycles ' + str(v))
self.npl_cycles = v
#else:
self.write(':sense:temperature:nplcycles 1')
self.npl_cycles = 1.0
# measurement filter
v = settings['filter']
if v == 'repeat':
self.write(':sense:temperature:average:state 1')
self.write(':sense:temperature:average:tcontrol repeat')
elif v == 'moving':
self.write(':sense:temperature:average:state 1')
self.write(':sense:temperature:average:tcontrol moving')
else:
# no filter
self.write(':sense:temperature:average:state 0')
# filter time in seconds
v_t = settings['filter_time']
# integration time is based on power line cycles (60 Hz)
v = 60.0*v_t/self.npl_cycles
self.write(':sense:temperature:average:count ' + str(v))
# thermocouple type: should be 'J', 'K', or 'T'
v = settings['thermocouple_type']
self.write(':sense:temperature:tcouple:type ' + str(v))
# SHOULD BE ABLE TO CONFIGURE THIS
self.write(':sense:temperature:tcouple:rjunction:rselect real')
# scanning: should be a tuple of consecutive channels e.g. (1, 2, 3)
self.scan = True
v = settings['scan']
s = '(@' + str(v).strip('(')
self.write(':initiate:continuous off')
self.write(':trigger:count ' + str(len(v)))
self.write(':trigger:source timer')
self.write(':route:scan ' + s)
self.write(':trace:points ' + str(len(v)))
self.write(':trace:feed sense')
self.write(':trace:feed:control next')
# time spent at each channel (s)
v = settings['scan_step_time']
self.write(':trigger:timer ' + str(v))
# start the scan
self.write(':route:scan:lselect internal')
self.write(':initiate')
# range property: measurement range
def __get_range(self):
# NEEDS WORK
return None
def __set_range(self, value):
if value == 'auto':
self.write(':sense:voltage:DC:range:auto on')
else:
self.write(':sense:voltage:DC:range:upper ' + str(value))
range = property(__get_range, __set_range)
# powerline_cycles property: measurement integration line cycles
def __get_powerline_cycles(self):
# NEEDS WORK
return None
def __set_powerline_cycles(self, value):
# should be a float between 0.01 and 10
self.write(':sense:voltage:DC:nplcycles ' + str(value))
self._powerline_cycles = value
powerline_cycles = property(__get_powerline_cycles, __set_powerline_cycles)
# filter property: measurement filter
def __get_filter(self):
# NEEDS WORK
return None
def __set_filter(self, value):
if value == 'repeat':
self.write(':sense:voltage:DC:average:state 1')
self.write(':sense:voltage:DC:average:tcontrol repeat')
elif value == 'moving':
self.write(':sense:voltage:DC:average:state 1')
self.write(':sense:voltage:DC:average:tcontrol moving')
else:
# no filter
self.write(':sense:voltage:DC:average:state 0')
filter = property(__get_filter, __set_filter)
# filter_time property: filter time in seconds
def __get_filter_time(self):
# NEEDS WORK
return None
def __set_filter_time(self, value):
# integration time is based on power line cycles (60 Hz)
v = 60.0*value/self._powerline_cycles
self.write(':sense:voltage:DC:average:count ' + str(v))
filter_time = property(__get_filter_time, __set_filter_time)
# error property
def __get_error(self):
return self.ask('system:error?')
error = property(__get_error)
# voltage property
def __get_voltage(self):
v = self.ask('fetch?')
return float(v)
voltage = property(__get_voltage)
|
dursobr/Pythics
|
pythics/instruments/keithley_2000.py
|
Python
|
gpl-3.0
| 18,103
|
[
"Brian"
] |
96fb61cd15ab4f52326bed69396ad6a8b79052d49d1b0e1f989177d3481e859c
|
"""
Created on Mar 17, 2014
@author: tjoneslo
"""
import logging
import math
from wikistats import WikiStats
from collections import OrderedDict, defaultdict
from AllyGen import AllyGen
from Star import UWPCodes
class Populations(object):
def __init__(self):
self.code = ""
self.homeworlds = []
self.count = 0
self.population = 0
def add_population(self, population, homeworld):
self.count += 1
self.population += population
if homeworld:
self.homeworlds.append(homeworld)
def __lt__(self, other):
return self.population < other.population
class ObjectStatistics(object):
base_mapping = {'C':'Corsair base', 'D':'Naval depot', 'E': 'Embassy', 'K': 'Naval base', 'M': 'Military base',
'N': 'Naval base', 'O': 'Naval outpost',
'R': 'Clan base', 'S': 'Scout base', 'T':'Tlaukhu base', 'V': 'Scout base', 'W': 'Way station',
'*': 'Unknown', 'I': 'Unknown',
'G': 'Vargr Naval base', 'J': 'Naval base',
'L': 'Hiver naval base', 'P': 'Droyne Naval base', 'Q': 'Droyne military garrison',
'X': 'Zhodani relay station', 'Y': 'Zhodani depot',
'A': 'Split', 'B': 'Split', 'F': 'Split','H': 'Split', 'U': 'Split', 'Z': 'Split' }
def __init__(self):
self.population = 0
self.populations = defaultdict(Populations)
self.economy = 0
self.trade = 0
self.tradeExt = 0
self.tradeVol = 0
self.percapita = 0
self.number = 0
self.milBudget = 0
self.maxTL = 0
self.maxPort = 'X'
self.maxPop = 0
self.sum_ru = 0
self.shipyards = 0
self.col_be = 0
self.im_be = 0
self.passengers = 0
self.spa_people = 0
self.port_size = defaultdict(int)
self.code_counts = defaultdict(int)
self.bases = defaultdict(int)
self.eti_worlds = 0
self.eti_cargo = 0
self.eti_pass = 0
self.homeworlds = []
self.high_pop_worlds = []
self.high_tech_worlds = []
self.TLmean = 0
self.TLstddev = 0
self.subsectorCp = []
self.sectorCp = []
self.otherCp = []
self.gg_count = 0
self.worlds = 0
self.stars = 0
self.star_count = defaultdict(int)
self.primary_count = defaultdict(int)
# For the JSONPickel work
def __getstate__(self):
state = self.__dict__.copy()
del state['high_pop_worlds']
del state['high_tech_worlds']
del state['subsectorCp']
del state['sectorCp']
del state['otherCp']
del state['homeworlds']
return state
def homeworld_count(self):
return len(self.homeworlds)
def high_pop_worlds_count(self):
return len(self.high_pop_worlds)
def high_pop_worlds_list(self):
return [world.wiki_name() for world in self.high_pop_worlds[0:6]]
def high_tech_worlds_count(self):
return len(self.high_tech_worlds)
def high_tech_worlds_list(self):
return [world.wiki_name() for world in self.high_tech_worlds[0:6]]
def populations_count(self):
return len(self.populations)
class UWPCollection(object):
def __init__(self):
self.uwp = OrderedDict()
for uwpCode in UWPCodes.uwpCodes:
self.uwp[uwpCode] = {}
def stats(self, code, value):
return self.uwp[code].setdefault(value, ObjectStatistics())
def __getitem__(self, index):
return self.uwp[index]
def __setitem__(self, index, value):
self.uwp[index] = value
class StatCalculation(object):
"""
Statistics calculations and output.
"""
def __init__(self, galaxy):
self.logger = logging.getLogger('PyRoute.StatCalculation')
self.galaxy = galaxy
self.all_uwp = UWPCollection()
self.imp_uwp = UWPCollection()
def calculate_statistics(self, ally_match):
self.logger.info('Calculating statistics for {:d} worlds'.format(len(self.galaxy.stars)))
for sector in self.galaxy.sectors.values():
if sector is None:
continue
for star in sector.worlds:
star.starportSize = max(self.trade_to_btn(star.tradeIn + star.tradeOver) - 5, 0)
star.uwpCodes['Starport Size'] = star.starportSize
# Budget in MCr
star.starportBudget = \
((star.tradeIn // 10000) * 150 + (star.tradeOver // 10000) * 140 +
(star.passIn) * 500 + (star.passOver) * 460) // 1000000
# Population in people employed.
star.starportPop = int(star.starportBudget / 0.2)
self.add_stats(sector.stats, star)
self.add_stats(self.galaxy.stats, star)
self.add_stats(sector.subsectors[star.subsector()].stats, star)
self.max_tl(sector.stats, star)
self.max_tl(sector.subsectors[star.subsector()].stats, star)
self.add_alg_stats(self.galaxy, star, star.alg_code)
self.add_alg_stats(sector, star, star.alg_code)
self.add_alg_stats(sector.subsectors[star.subsector()], star, star.alg_code)
if star.alg_base_code != star.alg_code:
self.add_alg_stats(self.galaxy, star, star.alg_base_code)
self.add_alg_stats(sector, star, star.alg_base_code)
self.add_alg_stats(sector.subsectors[star.subsector()], star, star.alg_base_code)
if AllyGen.imperial_align(star.alg_code):
for uwpCode, uwpValue in star.uwpCodes.items():
self.add_stats(self.imp_uwp.stats(uwpCode, uwpValue), star)
for uwpCode, uwpValue in star.uwpCodes.items():
self.add_stats(self.all_uwp.stats(uwpCode, uwpValue), star)
self.per_capita(sector.worlds, sector.stats) # Per capita sector stats
sector.alg_sorted = AllyGen.sort_allegiances(sector.alg, ally_match)
for alg in sector.alg_sorted:
self.per_capita(alg.worlds, alg.stats)
for subsector in sector.subsectors.values():
self.per_capita(subsector.worlds, subsector.stats)
subsector.alg_sorted = AllyGen.sort_allegiances(subsector.alg, ally_match)
for alg in subsector.alg_sorted:
self.per_capita(alg.worlds, alg.stats)
self.per_capita(None, self.galaxy.stats)
self.galaxy.alg_sorted = AllyGen.sort_allegiances(self.galaxy.alg, ally_match)
for alg in self.galaxy.alg_sorted:
self.per_capita(alg.worlds, alg.stats)
for uwpName in self.all_uwp.uwp.values():
for uwpStats in uwpName.values():
self.per_capita(None, uwpStats)
for uwpName in self.imp_uwp.uwp.values():
for uwpStats in uwpName.values():
self.per_capita(None, uwpStats)
def add_alg_stats(self, area, star, alg):
algStats = area.alg[alg].stats
self.add_stats(algStats, star)
self.max_tl(algStats, star)
def add_pop_to_sophont(self, stats, star):
total_pct = 100
default_soph = 'Huma'
home = None
for sophont in star.tradeCode.sophonts:
soph_code = sophont[0:4]
soph_pct = sophont[4:]
if soph_pct == 'A':
default_soph = soph_code
continue
soph_pct = 100.0 if soph_pct == 'W' else 0.0 if soph_pct in ['X', 'A'] else \
5.0 if soph_pct == '0' else 10.0 * int(soph_pct)
if any([soph for soph in star.tradeCode.homeworld if soph.startswith(soph_code)]):
home = star
# Soph_pct == 'X' is dieback or extinct.
if soph_pct == 'X':
stats.populations[soph_code].population = -1
# skip the empty worlds
elif not star.tradeCode.barren:
stats.populations[soph_code].add_population(int(star.population * (soph_pct / 100.0)), home)
total_pct -= soph_pct
if total_pct < -5:
self.logger.warning("{} has sophont percent over 100%: {}".format(star, total_pct))
elif total_pct < 0:
self.logger.info("{} has a sophont percent just over 100%: {}".format(star, total_pct))
elif not star.tradeCode.barren:
stats.populations[default_soph].add_population(int(star.population * (total_pct / 100.0)), None)
def add_stats(self, stats, star):
stats.population += star.population
if star.tradeCode.homeworld:
stats.homeworlds.append(star)
self.add_pop_to_sophont(stats, star)
stats.economy += star.gwp
stats.number += 1
stats.sum_ru += star.ru
stats.shipyards += star.ship_capacity
stats.tradeVol += (star.tradeOver + star.tradeIn)
stats.col_be += star.col_be
stats.im_be += star.im_be
stats.passengers += star.passIn
stats.spa_people += star.starportPop
stats.port_size[star.starportSize] += 1
stats.port_size[star.port] += 1
for code in star.tradeCode.codes:
stats.code_counts[code] += 1
if star.ggCount:
stats.gg_count += 1
stats.worlds += star.worlds
if star.star_list:
stats.stars += len(star.star_list)
stats.star_count[len(star.star_list)] += 1
stats.primary_count[star.star_list[0][0]] += 1
for code in star.baseCode:
if code != '-':
if code == 'A':
stats.bases[ObjectStatistics.base_mapping['N']] += 1
stats.bases[ObjectStatistics.base_mapping['S']] += 1
elif code == 'B':
stats.bases[ObjectStatistics.base_mapping['N']] += 1
stats.bases[ObjectStatistics.base_mapping['W']] += 1
elif code == 'F':
stats.bases[ObjectStatistics.base_mapping['K']] += 1
stats.bases[ObjectStatistics.base_mapping['M']] += 1
elif code == 'H':
stats.bases[ObjectStatistics.base_mapping['C']] += 1
stats.bases[ObjectStatistics.base_mapping['K']] += 1
elif code == 'U':
stats.bases[ObjectStatistics.base_mapping['T']] += 1
stats.bases[ObjectStatistics.base_mapping['R']] += 1
elif code == 'Z':
stats.bases[ObjectStatistics.base_mapping['K']] += 1
stats.bases[ObjectStatistics.base_mapping['M']] += 1
else:
stats.bases[ObjectStatistics.base_mapping[code]] += 1
if star.eti_cargo_volume > 0 or star.eti_pass_volume > 0:
stats.eti_worlds += 1
stats.eti_cargo += star.eti_cargo_volume
stats.eti_pass += star.eti_pass_volume
def max_tl(self, stats, star):
stats.maxTL = max(stats.maxTL, star.tl)
stats.maxPort = 'ABCDEX?'[min('ABCDEX?'.index(star.uwpCodes['Starport']), 'ABCDEX?'.index(stats.maxPort))]
stats.maxPop = max(stats.maxPop, star.popCode)
def per_capita(self, worlds, stats):
if stats.population > 100000:
stats.percapita = stats.economy // (stats.population // 1000)
elif stats.population > 0:
stats.percapita = stats.economy * 1000 // stats.population
else:
stats.percapita = 0
if stats.shipyards > 1000000:
stats.shipyards //= 1000000
else:
stats.shipyards = 0
if worlds:
stats.high_pop_worlds = [world for world in worlds if world.popCode == stats.maxPop]
stats.high_pop_worlds.sort(key=lambda star: star.popM, reverse=True)
stats.high_tech_worlds = [world for world in worlds if world.tl == stats.maxTL]
stats.subsectorCp = [world for world in worlds if world.tradeCode.subsector_capital]
stats.sectorCp = [world for world in worlds if world.tradeCode.sector_capital]
stats.otherCp = [world for world in worlds if world.tradeCode.other_capital]
TLList = [world.tl for world in worlds]
if len(TLList) > 3:
stats.TLmean = sum(TLList) / len(TLList)
TLVar = [math.pow(tl - stats.TLmean, 2) for tl in TLList]
stats.TLstddev = math.sqrt(sum(TLVar) / len(TLVar))
def find_colonizer(self, world, owner_hex):
for target in self.galaxy.ranges.neighbors_iter(world):
if target.position == owner_hex:
target.tradeCode.append("C:{}-{}".format(world.sector[0:4], world.position))
pass
def write_statistics(self, ally_count, ally_match, json_data):
self.logger.info('Charted star count: ' + str(self.galaxy.stats.number))
self.logger.info('Charted population {:,d}'.format(self.galaxy.stats.population))
if self.logger.isEnabledFor(logging.DEBUG):
for sector in self.galaxy.sectors.values():
self.logger.debug('Sector {} star count: {:,d}'.format(sector.name, sector.stats.number))
for code, aleg in self.galaxy.alg.items():
if aleg.base:
s = 'Allegiance {0} ({1}: base {3}) star count: {2:,d}'.format(aleg.name, code, aleg.stats.number,
aleg.base)
else:
s = 'Allegiance {0} ({1}: base {3} -> {4}) star count: {2:,d}'.format(aleg.name, code, aleg.stats.number,
aleg.base, AllyGen.same_align(aleg.code))
self.logger.debug(s)
self.logger.debug("min count: {}, match: {}".format(ally_count, ally_match))
wiki = WikiStats(self.galaxy, self.all_uwp, ally_count, ally_match, json_data)
wiki.write_statistics()
@staticmethod
def trade_to_btn(trade):
if trade == 0:
return 0
return int(math.log(trade, 10))
|
makhidkarun/traveller_pyroute
|
PyRoute/StatCalculation.py
|
Python
|
mit
| 14,360
|
[
"Galaxy"
] |
79f20f325b7305c8a6f6862e14d2332d0b820edc9b8af48597a7273279500325
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import os
import pytest
from django.core.urlresolvers import reverse
from shuup.core.models import Order, OrderStatus
from shuup.testing.browser_utils import (
click_element, wait_until_appeared, wait_until_condition
)
from shuup.testing.factories import create_empty_order, get_default_shop
from shuup.testing.utils import initialize_admin_browser_test
pytestmark = pytest.mark.skipif(os.environ.get("SHUUP_BROWSER_TESTS", "0") != "1", reason="No browser tests run.")
@pytest.mark.browser
@pytest.mark.djangodb
def test_orders_list_view(browser, admin_user, live_server):
shop = get_default_shop()
for i in range(0, 10):
order = create_empty_order(shop=shop)
order.save()
# Set last one canceled
Order.objects.last().set_canceled()
initialize_admin_browser_test(browser, live_server)
_visit_orders_list_view(browser, live_server)
_test_status_filter(browser) # Will set three orders from end canceled
def _visit_orders_list_view(browser, live_server):
url = reverse("shuup_admin:order.list")
browser.visit("%s%s" % (live_server, url))
assert browser.is_text_present("Orders")
wait_until_appeared(browser, ".picotable-item-info")
def _test_status_filter(browser):
# Check initial row count where the cancelled order should be excluded
_check_row_count(browser, Order.objects.count() - 1)
# Take three last valid orders and set those cancelled
orders = Order.objects.valid()[:3]
for order in orders:
order.set_canceled()
# Filter with cancelled
cancelled_status = OrderStatus.objects.get_default_canceled()
_change_status_filter(browser, "%s" % cancelled_status.pk)
# Check cancelled row count
_check_row_count(browser, (3 + 1))
# Filter with initial
initial_status = OrderStatus.objects.get_default_initial()
_change_status_filter(browser, "%s" % initial_status.pk)
# Take new count
_check_row_count(browser, (Order.objects.count() - 3 - 1))
# Change status filter to all
_change_status_filter(browser, '"_all"')
# Now all orders should be visible
_check_row_count(browser, Order.objects.count())
def _check_row_count(browser, expected_row_count):
picotable = browser.find_by_id("picotable")
tbody = picotable.find_by_tag("tbody").first
wait_until_condition(browser, lambda x: len(x.find_by_css("#picotable tbody tr")) == expected_row_count)
# technically this is handled above, but do the assertion anyways ;)
assert len(browser.find_by_css("#picotable tbody tr")) == expected_row_count
def _change_status_filter(browser, to_value):
picotable = browser.find_by_id("picotable")
click_element(browser, "#picotable div.choice-filter")
click_element(browser, "#picotable div.choice-filter option[value='%s']" % to_value)
|
shawnadelic/shuup
|
shuup_tests/browser/admin/test_order_list.py
|
Python
|
agpl-3.0
| 3,065
|
[
"VisIt"
] |
efb41eff49bcde75a3b21146e977730482d0b4ffe5eeda5d8307d647ca9052c8
|
# Copyright (C) 2011-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
import unittest as ut
from ek_eof_one_species_base import ek_eof_one_species
from ek_eof_one_species_base import params_base
params_x = dict([
('box_x', 3.0),
('box_y', 3.0),
('box_z', params_base['width'] + 2 * params_base['padding']),
('ext_force_density', [params_base['force'], 0.0, 0.0]),
('wall_normal_1', [0, 0, 1]),
('wall_normal_2', [0, 0, -1]),
('periodic_dirs', (0, 1)),
('non_periodic_dir', 2),
('n_roll_index', 0),
('calculated_pressure_xy', 0.0),
('calculated_pressure_yz', 0.0)
])
class eof_x(ek_eof_one_species):
def test(self):
self.run_test(params_x)
if __name__ == "__main__":
ut.main()
|
hmenke/espresso
|
testsuite/python/ek_eof_one_species_x.py
|
Python
|
gpl-3.0
| 1,244
|
[
"ESPResSo"
] |
414a03612f7e8372da82cbc00af7c59d2883d7e2c13c984202131d3ffce05ba0
|
"""Display a rotomap."""
import enum
import functools
import sys
import cv2
import numpy
import mel.lib.common
import mel.lib.fullscreenui
import mel.lib.image
import mel.rotomap.detectmoles
import mel.rotomap.mask
import mel.rotomap.moles
import mel.rotomap.relate
import mel.rotomap.tricolour
DEFAULT_MASKER_RADIUS = 200
_WHITE = (255, 255, 255)
_BLACK = (0, 0, 0)
def draw_mole(image, x, y, colours):
def circle(radius, col):
cv2.circle(image, (x, y), radius, col, -1)
circle(20, _WHITE)
circle(18, _BLACK)
radius = 16
for index in range(3):
circle(radius, colours[index])
radius -= 4
def draw_non_canonical_mole(image, x, y, colours):
def rect(size, col):
top_left = (x - size, y - size)
bottom_right = (x + size, y + size)
cv2.rectangle(image, top_left, bottom_right, col, -1)
rect(20, _WHITE)
rect(18, _BLACK)
draw_mole(image, x, y, colours)
def draw_crosshair(image, x, y):
inner_radius = 16
outer_radius = 24
directions = [(1, 0), (0, -1), (-1, 0), (0, 1)] # Right, down, left, up
size_color_list = [(3, _WHITE), (2, _BLACK)]
for size, color in size_color_list:
for d in directions:
cv2.line(
image,
(x + (inner_radius * d[0]), y + (inner_radius * d[1])),
(x + (outer_radius * d[0]), y + (outer_radius * d[1])),
color,
size,
)
class Display:
def __init__(self, screen):
self._image_display = screen
self._rect = numpy.array((screen.width, screen.height))
self._transform = None
self._zoom_pos = None
self._is_zoomed = False
self._zoom_level = 1
def show_current(self, image, overlay):
if self._is_zoomed:
self._transform = ZoomedImageTransform(
image, self._zoom_pos, self._rect, scale=self._zoom_level
)
else:
self._transform = FittedImageTransform(image, self._rect)
image = self._transform.render()
if overlay is not None:
image = overlay(image, self._transform)
self._image_display.show_opencv_image(image)
def set_fitted(self):
self._is_zoomed = False
def set_zoom_level(self, zoom_level=1):
self._zoom_level = zoom_level
def set_zoomed(self, x, y, zoom_level=None):
self._zoom_pos = numpy.array((x, y))
self._is_zoomed = True
if zoom_level is not None:
self._zoom_level = zoom_level
def is_zoomed(self):
return self._is_zoomed
def get_zoom_pos(self):
if not self.is_zoomed():
raise Exception("Not zoomed")
return self._zoom_pos
def windowxy_to_imagexy(self, window_x, window_y):
return self._transform.transformedxy_to_imagexy(window_x, window_y)
def set_title(self, _):
# cv2.setWindowTitle(self._name, title)
pass
def make_composite_overlay(*overlays):
"""Return an overlay, which will composite the supplied overlays in turn.
:*overlays: The overlay callables to composite.
:returns: A function which will composite *overlays and return the image.
"""
def do_overlay(image, transform):
for o in overlays:
image = o(image, transform)
return image
return do_overlay
class StatusOverlay:
def __init__(self):
self.text = ""
def __call__(self, image, transform):
if self.text:
text_image = mel.lib.image.render_text_as_image(self.text)
mel.lib.common.copy_image_into_image(text_image, image, 0, 0)
return image
class MoleMarkerOverlay:
def __init__(self, uuid_to_tricolour):
self._is_showing_markers = True
self._is_faded_markers = True
self._highlight_uuid = None
self._uuid_to_tricolour = uuid_to_tricolour
if self._uuid_to_tricolour is None:
self._uuid_to_tricolour = (
mel.rotomap.tricolour.uuid_to_tricolour_first_digits
)
self.moles = None
def toggle_markers(self):
self._is_showing_markers = not self._is_showing_markers
def set_highlight_uuid(self, highlight_uuid):
self._highlight_uuid = highlight_uuid
def toggle_faded_markers(self):
self._is_faded_markers = not self._is_faded_markers
def __call__(self, image, transform):
if not self._is_showing_markers:
return image
highlight_mole = None
if self._highlight_uuid is not None:
for m in self.moles:
if m["uuid"] == self._highlight_uuid:
highlight_mole = m
break
marker_image = image
if self._is_faded_markers:
marker_image = image.copy()
for mole in self.moles:
x, y = transform.imagexy_to_transformedxy(mole["x"], mole["y"])
if mole is highlight_mole:
draw_crosshair(marker_image, x, y)
colours = self._uuid_to_tricolour(mole["uuid"])
if mole[mel.rotomap.moles.KEY_IS_CONFIRMED]:
draw_mole(marker_image, x, y, colours)
else:
draw_non_canonical_mole(marker_image, x, y, colours)
if self._is_faded_markers:
image = cv2.addWeighted(image, 0.75, marker_image, 0.25, 0.0)
return image
class MarkedMoleOverlay:
"""An overlay to make marked moles obvious, for checking mark positions."""
def __init__(self):
self.moles = None
self._highlight_uuid = None
self.is_accentuate_marked_mode = False
def set_highlight_uuid(self, highlight_uuid):
self._highlight_uuid = highlight_uuid
def __call__(self, image, transform):
if self.is_accentuate_marked_mode:
return self._draw_accentuated(image, transform)
else:
return self._draw_markers(image, transform)
def _draw_accentuated(self, image, transform):
# Reveal the moles that have been marked, whilst still showing
# markers. This is good for verifying that markers are actually
# positioned on moles.
mask_radius = 50
image = image.copy() // 2
mask = numpy.zeros((*image.shape[:2], 1), numpy.uint8)
for mole in self.moles:
x, y = transform.imagexy_to_transformedxy(mole["x"], mole["y"])
cv2.circle(mask, (x, y), mask_radius, 255, -1)
masked_faded = cv2.bitwise_and(image, image, mask=mask)
image = cv2.add(masked_faded, image)
highlight_mole = None
if self._highlight_uuid is not None:
for m in self.moles:
if m["uuid"] == self._highlight_uuid:
highlight_mole = m
break
for mole in self.moles:
x, y = transform.imagexy_to_transformedxy(mole["x"], mole["y"])
kind = mole.get("kind", None)
looks_like = mole.get("looks_like", None)
colour = (128, 0, 0)
if mole[mel.rotomap.moles.KEY_IS_CONFIRMED]:
colour = (255, 0, 0)
if kind == "mole":
if looks_like == "mole":
colour = (255, 255, 255)
elif looks_like == "non-mole":
colour = (255, 255, 0)
elif looks_like == "unsure":
colour = (255, 0, 128)
else:
raise Exception(f"Mole looks_like is invalid: {mole}")
elif kind == "non-mole":
if looks_like == "mole":
colour = (0, 255, 255)
elif looks_like == "non-mole":
colour = (0, 0, 255)
elif looks_like == "unsure":
colour = (128, 0, 255)
else:
raise Exception(f"Mole looks_like is invalid: {mole}")
cv2.circle(image, (x, y), mask_radius, colour, 2)
if mole is highlight_mole:
draw_crosshair(image, x, y)
return image
def _draw_markers(self, image, transform):
# Hide the moles that have been marked, showing markers
# distinctly from moles. This is good for marking moles that
# haven't been marked, without worrying about the ones that
# have been marked.
for mole in self.moles:
x, y = transform.imagexy_to_transformedxy(mole["x"], mole["y"])
draw_mole(image, x, y, [[255, 0, 0], [255, 128, 128], [255, 0, 0]])
return image
class BoundingAreaOverlay:
"""An overlay to show the bounding area, if any."""
def __init__(self):
self.bounding_box = None
def __call__(self, image, transform):
image //= 2
if self.bounding_box is not None:
color = (0, 0, 255)
size = 2
space = mel.lib.ellipsespace.Transform(self.bounding_box)
def toimage(point):
point = space.from_space((point))
point = transform.imagexy_to_transformedxy(*point)
return point
border = [
toimage((-1, -1)),
toimage((1, -1)),
toimage((1, 1)),
toimage((-1, 1)),
toimage((-1, -1)),
]
border = numpy.array(border)
centre = [
toimage((0, 0.1)),
toimage((0, -0.1)),
toimage((0.05, 0)),
toimage((0.1, 0)),
toimage((-0.1, 0)),
toimage((0, 0)),
toimage((0, 0.1)),
]
centre = numpy.array(centre)
cv2.drawContours(image, [border, centre], -1, color, size)
return image
class ZoomedImageTransform:
def __init__(self, image, pos, rect, scale):
self._pos = tuple(int(v * scale) for v in pos)
self._rect = rect
self._offset = mel.lib.image.calc_centering_offset(self._pos, rect)
self._scale = scale
self._image = mel.lib.image.scale_image(image, self._scale)
def render(self):
return mel.lib.image.centered_at(self._image, self._pos, self._rect)
def imagexy_to_transformedxy(self, x, y):
return ((numpy.array((x, y)) * self._scale) + self._offset).astype(int)
def transformedxy_to_imagexy(self, x, y):
return ((numpy.array((x, y)) - self._offset) / self._scale).astype(int)
class FittedImageTransform:
def __init__(self, image, fit_rect):
self._fit_rect = fit_rect
image_rect = mel.lib.image.get_image_rect(image)
letterbox = mel.lib.image.calc_letterbox(*image_rect, *self._fit_rect)
self._offset = numpy.array(letterbox[:2])
self._scale = image.shape[1] / letterbox[2]
self._image = image
def render(self):
return mel.lib.image.letterbox(self._image, *self._fit_rect)
def imagexy_to_transformedxy(self, x, y):
return (numpy.array((x, y)) / self._scale + self._offset).astype(int)
def transformedxy_to_imagexy(self, x, y):
return ((numpy.array((x, y)) - self._offset) * self._scale).astype(int)
class EditorMode(enum.Enum):
edit_mole = 1
edit_mask = 2
bounding_area = 3
mole_mark = 4
debug_automole = 0
class Editor:
def __init__(self, directory_list, screen):
self._uuid_to_tricolour = mel.rotomap.tricolour.UuidTriColourPicker()
self.display = Display(screen)
self.moledata_list = [MoleData(x.image_paths) for x in directory_list]
self._mode = EditorMode.edit_mole
self.moledata_index = 0
self.moledata = self.moledata_list[self.moledata_index]
self._follow = None
self._mole_overlay = MoleMarkerOverlay(self._uuid_to_tricolour)
self.marked_mole_overlay = MarkedMoleOverlay()
self.bounding_area_overlay = BoundingAreaOverlay()
self._status_overlay = StatusOverlay()
self.show_current()
self.masker_radius = DEFAULT_MASKER_RADIUS
def set_smaller_masker(self):
self.masker_radius //= 2
def set_larger_masker(self):
self.masker_radius *= 2
def set_default_masker(self):
self.masker_radius = DEFAULT_MASKER_RADIUS
def set_automoledebug_mode(self):
self._mode = EditorMode.debug_automole
self.show_current()
def set_editmole_mode(self):
self._mode = EditorMode.edit_mole
self.show_current()
def set_editmask_mode(self):
self._mode = EditorMode.edit_mask
self.show_current()
def set_boundingarea_mode(self):
self._mode = EditorMode.bounding_area
self.show_current()
def set_molemark_mode(self):
self._mode = EditorMode.mole_mark
self.show_current()
def set_status(self, text):
self._status_overlay.text = text
def visit(self, visit_target_str):
# Expect a string formatted like this:
#
# path/to/jpg:uuid
#
# Anything after the expected bits is ignored.
#
path, visit_uuid, *_ = visit_target_str.split(":")
print(path, visit_uuid)
for _ in range(len(self.moledata_list)):
if self.moledata.try_jump_to_path(str(path)):
for m in self.moledata.moles:
if m["uuid"] == visit_uuid:
self.moledata.get_image()
self._follow = visit_uuid
self._mole_overlay.set_highlight_uuid(self._follow)
self.marked_mole_overlay.set_highlight_uuid(
self._follow
)
self.show_zoomed_display(m["x"], m["y"])
return
self.show_current()
return
self.moledata_index += 1
self.moledata_index %= len(self.moledata_list)
self.moledata = self.moledata_list[self.moledata_index]
print("Could not find:", path, ":", visit_uuid, file=sys.stderr)
self.show_current()
def follow(self, uuid_to_follow):
self._follow = uuid_to_follow
self._mole_overlay.set_highlight_uuid(self._follow)
self.marked_mole_overlay.set_highlight_uuid(self._follow)
follow_mole = None
for m in self.moledata.moles:
if m["uuid"] == self._follow:
follow_mole = m
break
if follow_mole is not None:
self.show_zoomed_display(follow_mole["x"], follow_mole["y"])
def skip_to_mole(self, uuid_to_skip_to):
original_index = self.moledata.index()
done = False
while not done:
for m in self.moledata.moles:
if m["uuid"] == uuid_to_skip_to:
return
self.moledata.increment()
self.moledata.get_image()
if self.moledata.index() == original_index:
return
def toggle_markers(self):
self._mole_overlay.toggle_markers()
self.show_current()
def toggle_faded_markers(self):
self._mole_overlay.toggle_faded_markers()
self.show_current()
def set_mask(self, mouse_x, mouse_y, enable):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
value = 255 if enable else 0
radius = self.masker_radius
cv2.circle(self.moledata.mask, (image_x, image_y), radius, value, -1)
self.moledata.save_mask()
self.show_current()
def show_current(self):
self.display.set_title(self.moledata.current_image_path())
image = self.moledata.get_image()
if self._mode is EditorMode.edit_mole:
self._mole_overlay.moles = self.moledata.moles
self.display.show_current(
image,
make_composite_overlay(
self._mole_overlay, self._status_overlay
),
)
elif self._mode is EditorMode.debug_automole:
image = image[:]
image = mel.rotomap.detectmoles.draw_debug(
image, self.moledata.mask
)
self.display.show_current(image, None)
elif self._mode is EditorMode.edit_mask:
mask = self.moledata.mask
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray_image = cv2.cvtColor(gray_image, cv2.COLOR_GRAY2BGR)
gray_image[:, :, 2] = mask
self.display.show_current(gray_image, None)
elif self._mode is EditorMode.bounding_area:
box = self.moledata.metadata.get("ellipse", None)
self.bounding_area_overlay.bounding_box = box
self.display.show_current(image, self.bounding_area_overlay)
elif self._mode is EditorMode.mole_mark:
self.marked_mole_overlay.moles = self.moledata.moles
self.display.show_current(image, self.marked_mole_overlay)
else:
raise Exception("Unknown mode", self._mode)
def show_fitted(self):
self.display.set_fitted()
self.show_current()
def set_zoom_level(self, zoom_level):
self.display.set_zoom_level(zoom_level)
self.show_current()
def show_zoomed(self, mouse_x, mouse_y, zoom_level=None):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
self.display.set_zoomed(image_x, image_y, zoom_level)
self.show_current()
def show_zoomed_display(self, image_x, image_y, zoom_level=None):
self.display.set_zoomed(image_x, image_y, zoom_level)
self.show_current()
def show_prev_map(self):
def transition():
self.moledata_index -= 1
self.moledata_index %= len(self.moledata_list)
self.moledata = self.moledata_list[self.moledata_index]
self._adjusted_transition(transition)
self.show_current()
def show_next_map(self):
def transition():
self.moledata_index += 1
self.moledata_index %= len(self.moledata_list)
self.moledata = self.moledata_list[self.moledata_index]
self._adjusted_transition(transition)
self.show_current()
def show_prev(self):
self._adjusted_transition(self.moledata.decrement)
self.show_current()
def show_next(self):
self._adjusted_transition(self.moledata.increment)
self.show_current()
def _adjusted_transition(self, transition_func):
if self.display.is_zoomed() and "ellipse" in self.moledata.metadata:
pos = self.display.get_zoom_pos()
ellipse = self.moledata.metadata["ellipse"]
pos = mel.lib.ellipsespace.Transform(ellipse).to_space(pos)
transition_func()
self.moledata.ensure_loaded()
if "ellipse" in self.moledata.metadata:
ellipse = self.moledata.metadata["ellipse"]
pos = mel.lib.ellipsespace.Transform(ellipse).from_space(pos)
self.display.set_zoomed(pos[0], pos[1])
else:
transition_func()
def show_next_n(self, number_to_advance):
for i in range(number_to_advance):
self.moledata.increment()
self.moledata.get_image()
self.show_current()
def add_mole(self, mouse_x, mouse_y):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
mel.rotomap.moles.add_mole(self.moledata.moles, image_x, image_y)
self.moledata.save_moles()
self.show_current()
def add_mole_display(self, image_x, image_y, mole_uuid=None):
mel.rotomap.moles.add_mole(
self.moledata.moles, image_x, image_y, mole_uuid
)
self.moledata.save_moles()
self.show_current()
def confirm_mole(self, mouse_x, mouse_y):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
mole_uuid = mel.rotomap.moles.get_nearest_mole_uuid(
self.moledata.moles, image_x, image_y
)
mel.rotomap.moles.set_nearest_mole_uuid(
self.moledata.moles, image_x, image_y, mole_uuid, is_canonical=True
)
self.moledata.save_moles()
self.show_current()
def set_mole_uuid(self, mouse_x, mouse_y, mole_uuid, is_canonical=True):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
mel.rotomap.moles.set_nearest_mole_uuid(
self.moledata.moles, image_x, image_y, mole_uuid, is_canonical
)
self.moledata.save_moles()
self.show_current()
def get_mole_uuid(self, mouse_x, mouse_y):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
return mel.rotomap.moles.get_nearest_mole_uuid(
self.moledata.moles, image_x, image_y
)
def get_nearest_mole(self, mouse_x, mouse_y):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
nearest_index = mel.rotomap.moles.nearest_mole_index(
self.moledata.moles, image_x, image_y
)
mole = None
if nearest_index is not None:
mole = self.moledata.moles[nearest_index]
return mole
def move_nearest_mole(self, mouse_x, mouse_y):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
mel.rotomap.moles.move_nearest_mole(
self.moledata.moles, image_x, image_y
)
self.moledata.save_moles()
self.show_current()
def remove_mole(self, mouse_x, mouse_y):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
mel.rotomap.moles.remove_nearest_mole(
self.moledata.moles, image_x, image_y
)
self.moledata.save_moles()
self.show_current()
def crud_mole(self, mole_uuid, mouse_x, mouse_y):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
i = mel.rotomap.moles.uuid_mole_index(self.moledata.moles, mole_uuid)
if i is not None:
self.moledata.moles[i]["x"] = image_x
self.moledata.moles[i]["y"] = image_y
else:
mel.rotomap.moles.add_mole(
self.moledata.moles, image_x, image_y, mole_uuid
)
self.moledata.save_moles()
self.show_current()
def remap_uuid(self, from_uuid, to_uuid):
print(f"Remap globally {from_uuid} to {to_uuid}.")
self.moledata.remap_uuid(from_uuid, to_uuid)
self.show_current()
class MoleData:
def __init__(self, path_list):
# Make an instance-specific cache of images. Note that this means that
# mel will need to be re-run in order to pick up changes to mole
# images. This seems to be fine for use-cases to date, only the mole
# data seems to change from underneath really.
@functools.lru_cache()
def load_image(image_path):
return mel.lib.image.load_image(image_path)
self._load_image = load_image
self.moles = []
self.metadata = {}
self.image = None
self.mask = None
self._mask_path = None
self._path_list = path_list
self._list_index = 0
self._num_images = len(self._path_list)
self._loaded_index = None
self.ensure_loaded()
def get_image(self):
self.ensure_loaded()
return self.image
def reload(self):
self._loaded_index = None
self.ensure_loaded()
def ensure_loaded(self):
if self._loaded_index == self._list_index:
return
image_path = self._path_list[self._list_index]
self.image = self._load_image(image_path)
self.moles = mel.rotomap.moles.load_image_moles(image_path)
self.metadata = mel.rotomap.moles.load_image_metadata(image_path)
height, width = self.image.shape[:2]
self._mask_path = mel.rotomap.mask.path(image_path)
self.mask = mel.rotomap.mask.load_or_none(image_path)
if self.mask is None:
self.mask = numpy.zeros((height, width), numpy.uint8)
self._loaded_index = self._list_index
def remap_uuid(self, from_uuid, to_uuid):
for image_path in self._path_list:
moles = mel.rotomap.moles.load_image_moles(image_path)
for m in moles:
if m["uuid"] == from_uuid:
m["uuid"] = to_uuid
m[mel.rotomap.moles.KEY_IS_CONFIRMED] = True
mel.rotomap.moles.save_image_moles(moles, image_path)
image_path = self._path_list[self._list_index]
self.moles = mel.rotomap.moles.load_image_moles(image_path)
def decrement(self):
new_index = self._list_index + self._num_images - 1
self._list_index = new_index % self._num_images
def increment(self):
self._list_index = (self._list_index + 1) % self._num_images
def index(self):
return self._list_index
def save_mask(self):
mel.lib.common.write_image(self._mask_path, self.mask)
def save_moles(self):
image_path = self._path_list[self._list_index]
mel.rotomap.moles.normalise_moles(self.moles)
mel.rotomap.moles.save_image_moles(self.moles, image_path)
def current_image_path(self):
return self._path_list[self._list_index]
def try_jump_to_path(self, path):
for i, image_path in enumerate(self._path_list):
if str(path) == str(image_path):
if self._list_index != i:
self._list_index = i
self.ensure_loaded()
return True
return False
# -----------------------------------------------------------------------------
# Copyright (C) 2016-2018 Angelos Evripiotis.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
aevri/mel
|
mel/rotomap/display.py
|
Python
|
apache-2.0
| 26,545
|
[
"VisIt"
] |
84642d32cafce63a9ac5c986dde656fdd949be848bb067b3b51b5b4b828b3dcd
|
def vector_product3(a, b):
"""Computes and returns the vector product of vectors a and b."""
return [a[1] * b[2] - a[2] * b[1],
a[2] * b[0] - a[0] * b[2],
a[0] * b[1] - a[1] * b[0]]
def seq_mult_scalar(a, s):
"""Returns a list of the products of s with the values in list a."""
prod_as = []
for n in a:
prod_as.append(n*s)
return prod_as
def powers(n, k):
"""Computes and returns a list of n to the powers 0 through k."""
n_pows = []
for i in range(k+1):
n_pows.append(n**i)
return n_pows
def traffic_light(load):
"""Returns green, amber or red depending on the value of load."""
if load < 0.7:
return "green"
elif load < 0.9:
return "amber"
else:
return "red"
|
Xorgon/Computing-Labs
|
lab5.py
|
Python
|
mit
| 787
|
[
"Amber"
] |
38e5a6a7f41a6240440d88d209a130d28796157051b211f5f3f9143749ac215f
|
# -*- coding: utf-8 -*-
#
# inhibitory_network.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
This is the inhibitory network used as test case 2 (see figure 9 and 10) in
Hahne, J., Helias, M., Kunkel, S., Igarashi, J.,
Bolten, M., Frommer, A. and Diesmann, M.,
A unified framework for spiking and gap-junction interactions
in distributed neuronal network simulations,
Front. Neuroinform. 9:22. (2015),
doi: 10.3389/fninf.2015.00022
The network contains 500 hh_psc_alpha_gap neurons with random initial
membrane potentials between −40 and −80 mV. Each neuron receives 50
inhibitory synaptic inputs that are randomly selected from all other
neurons, each with synaptic weight JI = −50.0 pA and synaptic delay
d = 1.0 ms. Each neuron receives an excitatory external Poissonian
input of 500.0 Hz with synaptic weight JE = 300.0 pA and the same
delay d. In addition (60*500)/2 gap junctions are added randomly to the
network resulting in an average of 60 gap-junction connections per neuron.
"""
import pylab
import nest
import random
import numpy
n_neuron = 500
gap_per_neuron = 60
inh_per_neuron = 50
delay= 1.0
j_exc= 300.
j_inh= -50.
threads= 8
stepsize = 0.05
simtime = 501.
"""
Set gap weight here
"""
gap_weight = 0.32
random.seed(1)
nest.ResetKernel()
nest.SetKernelStatus({'resolution': 0.05,
'total_num_virtual_procs': threads,
'print_time': True,
# Settings for waveform relaxation
# 'use_wfr': False uses communication in every step
# instead of an iterative solution
'use_wfr': True,
'wfr_comm_interval': 1.0,
'wfr_tol': 0.0001,
'wfr_max_iterations': 15,
'wfr_interpolation_order': 3})
neurons = nest.Create('hh_psc_alpha_gap',n_neuron)
sd = nest.Create("spike_detector", params={'to_file': False, 'to_memory': True})
pg = nest.Create("poisson_generator",params={'rate': 500.0})
conn_dict = {'rule': 'fixed_indegree',
'indegree': inh_per_neuron,
'autapses': False,
'multapses': True}
syn_dict = {'model': 'static_synapse',
'weight': j_inh,
'delay': delay}
nest.Connect(neurons, neurons, conn_dict, syn_dict)
nest.Connect(pg,neurons,'all_to_all',syn_spec={'model': 'static_synapse',
'weight': j_exc,
'delay': delay})
nest.Connect(neurons,sd)
for i in range(n_neuron):
nest.SetStatus([neurons[i]], { 'V_m': (-40. - 40. * random.random()) })
"""
We must not use the 'fixed_indegree' oder 'fixed_outdegree' functionality of nest.Connect
to create the connections, as gap_junction connections are two-way connections and we
need to make sure that the same neurons are connected in both ways.
"""
# create gap_junction connections
n_connection = n_neuron * gap_per_neuron / 2
connections = numpy.transpose([random.sample(neurons,2) for _ in range(n_connection)])
# sources -> targets
nest.Connect(connections[0], connections[1],
'one_to_one',
{'model': 'gap_junction', 'weight': gap_weight})
# targets -> sources
nest.Connect(connections[1], connections[0],
'one_to_one',
{'model': 'gap_junction', 'weight': gap_weight})
nest.Simulate(simtime)
times = nest.GetStatus(sd, 'events')[0]['times']
spikes = nest.GetStatus(sd, 'events')[0]['senders']
n_spikes = nest.GetStatus(sd, 'n_events')[0]
hz_rate = (1000.0*n_spikes/simtime)/n_neuron
pylab.figure(1)
pylab.plot(times,spikes,'o')
pylab.title('Average spike rate (Hz): %.2f' % hz_rate)
pylab.xlabel('time (ms)')
pylab.ylabel('neuron no')
pylab.show()
|
uahic/nest-simulator
|
examples/nest/gap_junction/inhibitory_network.py
|
Python
|
gpl-2.0
| 4,388
|
[
"NEURON"
] |
9800c7201296116b8b8e8b70312ca556700d5706130e8391cca1223e5a94570f
|
# Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
from scipy._lib.six import string_types
__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly',
'unit_impulse']
def sawtooth(t, width=1):
"""
Return a periodic sawtooth or triangle waveform.
The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
Time.
width : array_like, optional
Width of the rising ramp as a proportion of the total cycle.
Default is 1, producing a rising ramp, while 0 produces a falling
ramp. `width` = 0.5 produces a triangle wave.
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500)
>>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
"""
t, w = asarray(t), asarray(width)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# take t modulo 2*pi
tmod = mod(t, 2 * pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
tsub = extract(mask2, tmod)
wsub = extract(mask2, w)
place(y, mask2, tsub / (pi * wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
tsub = extract(mask3, tmod)
wsub = extract(mask3, w)
place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period ``2*pi``, has value +1 from 0 to
``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
the interval [0,1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
The input time array.
duty : array_like, optional
Duty cycle. Default is 0.5 (50% duty cycle).
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the square waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500, endpoint=False)
>>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
>>> plt.ylim(-2, 2)
A pulse-width modulated sine wave:
>>> plt.figure()
>>> sig = np.sin(2 * np.pi * t)
>>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, sig)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, pwm)
>>> plt.ylim(-1.5, 1.5)
"""
t, w = asarray(t), asarray(duty)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# on the interval 0 to duty*2*pi function is 1
tmod = mod(t, 2 * pi)
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
place(y, mask2, 1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
place(y, mask3, -1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
retenv=False):
"""
Return a Gaussian modulated sinusoid:
``exp(-a t^2) exp(1j*2*pi*fc*t).``
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (e.g. Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (e.g. Hz).
Default is 0.5.
bwr : float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
Returns
-------
yI : ndarray
Real part of signal. Always returned.
yQ : ndarray
Imaginary part of signal. Only returned if `retquad` is True.
yenv : ndarray
Envelope of signal. Only returned if `retenv` is True.
See Also
--------
scipy.signal.morlet
Examples
--------
Plot real component, imaginary component, and envelope for a 5 Hz pulse,
sampled at 100 Hz for 2 seconds:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
>>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
>>> plt.plot(t, i, t, q, t, e, '--')
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
if isinstance(t, string_types):
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must "
"be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref) / a)
else:
raise ValueError("If `t` is a string, it must be 'cutoff'")
yenv = exp(-a * t * t)
yI = yenv * cos(2 * pi * fc * t)
yQ = yenv * sin(2 * pi * fc * t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per unit';
there is no requirement here that the unit is one second. The
important distinction is that the units of rotation are cycles, not
radians. Likewise, `t` could be a measurement of space instead of time.
Parameters
----------
t : array_like
Times at which to evaluate the waveform.
f0 : float
Frequency (e.g. Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (e.g. Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
y : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
(from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
See Also
--------
sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f0 and f1 must be nonzero.
Examples
--------
The following will be used in the examples:
>>> from scipy.signal import chirp, spectrogram
>>> import matplotlib.pyplot as plt
For the first example, we'll plot the waveform for a linear chirp
from 6 Hz to 1 Hz over 10 seconds:
>>> t = np.linspace(0, 10, 5001)
>>> w = chirp(t, f0=6, f1=1, t1=10, method='linear')
>>> plt.plot(t, w)
>>> plt.title("Linear Chirp, f(0)=6, f(10)=1")
>>> plt.xlabel('t (sec)')
>>> plt.show()
For the remaining examples, we'll use higher frequency ranges,
and demonstrate the result using `scipy.signal.spectrogram`.
We'll use a 10 second interval sampled at 8000 Hz.
>>> fs = 8000
>>> T = 10
>>> t = np.linspace(0, T, T*fs, endpoint=False)
Quadratic chirp from 1500 Hz to 250 Hz over 10 seconds
(vertex of the parabolic curve of the frequency is at t=0):
>>> w = chirp(t, f0=1500, f1=250, t1=10, method='quadratic')
>>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512,
... nfft=2048)
>>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r')
>>> plt.title('Quadratic Chirp, f(0)=1500, f(10)=250')
>>> plt.xlabel('t (sec)')
>>> plt.ylabel('Frequency (Hz)')
>>> plt.grid()
>>> plt.show()
Quadratic chirp from 1500 Hz to 250 Hz over 10 seconds
(vertex of the parabolic curve of the frequency is at t=10):
>>> w = chirp(t, f0=1500, f1=250, t1=10, method='quadratic',
... vertex_zero=False)
>>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512,
... nfft=2048)
>>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r')
>>> plt.title('Quadratic Chirp, f(0)=1500, f(10)=250\\n' +
... '(vertex_zero=False)')
>>> plt.xlabel('t (sec)')
>>> plt.ylabel('Frequency (Hz)')
>>> plt.grid()
>>> plt.show()
Logarithmic chirp from 1500 Hz to 250 Hz over 10 seconds:
>>> w = chirp(t, f0=1500, f1=250, t1=10, method='logarithmic')
>>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512,
... nfft=2048)
>>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r')
>>> plt.title('Logarithmic Chirp, f(0)=1500, f(10)=250')
>>> plt.xlabel('t (sec)')
>>> plt.ylabel('Frequency (Hz)')
>>> plt.grid()
>>> plt.show()
Hyperbolic chirp from 1500 Hz to 250 Hz over 10 seconds:
>>> w = chirp(t, f0=1500, f1=250, t1=10, method='hyperbolic')
>>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512,
... nfft=2048)
>>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r')
>>> plt.title('Hyperbolic Chirp, f(0)=1500, f(10)=250')
>>> plt.xlabel('t (sec)')
>>> plt.ylabel('Frequency (Hz)')
>>> plt.grid()
>>> plt.show()
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by `chirp` to generate its output.
See `chirp` for a description of the arguments.
"""
t = asarray(t)
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
elif method in ['quadratic', 'quad', 'q']:
beta = (f1 - f0) / (t1 ** 2)
if vertex_zero:
phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
else:
phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
elif method in ['logarithmic', 'log', 'lo']:
if f0 * f1 <= 0.0:
raise ValueError("For a logarithmic chirp, f0 and f1 must be "
"nonzero and have the same sign.")
if f0 == f1:
phase = 2 * pi * f0 * t
else:
beta = t1 / log(f1 / f0)
phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f0 == 0 or f1 == 0:
raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
"nonzero.")
if f0 == f1:
# Degenerate case: constant frequency.
phase = 2 * pi * f0 * t
else:
# Singular point: the instantaneous frequency blows up
# when t == sing.
sing = -f1 * t1 / (f0 - f1)
phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
" or 'hyperbolic', but a value of %r was given."
% method)
return phase
def sweep_poly(t, poly, phi=0):
"""
Frequency-swept cosine generator, with a time-dependent frequency.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1-D array_like or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees, Default: 0.
Returns
-------
sweep_poly : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
(from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
See Also
--------
chirp
Notes
-----
.. versionadded:: 0.8.0
If `poly` is a list or ndarray of length `n`, then the elements of
`poly` are the coefficients of the polynomial, and the instantaneous
frequency is:
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of `numpy.poly1d`, then the instantaneous
frequency is:
``f(t) = poly(t)``
Finally, the output `s` is:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
``f(t)`` as defined above.
Examples
--------
Compute the waveform with instantaneous frequency::
f(t) = 0.025*t**3 - 0.36*t**2 + 1.25*t + 2
over the interval 0 <= t <= 10.
>>> from scipy.signal import sweep_poly
>>> p = np.poly1d([0.025, -0.36, 1.25, 2.0])
>>> t = np.linspace(0, 10, 5001)
>>> w = sweep_poly(t, p)
Plot it:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, w)
>>> plt.title("Sweep Poly\\nwith frequency " +
... "$f(t) = 0.025t^3 - 0.36t^2 + 1.25t + 2$")
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, p(t), 'r', label='f(t)')
>>> plt.legend()
>>> plt.xlabel('t')
>>> plt.tight_layout()
>>> plt.show()
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2 * pi * polyval(intpoly, t)
return phase
def unit_impulse(shape, idx=None, dtype=float):
"""
Unit impulse signal (discrete delta function) or unit basis vector.
Parameters
----------
shape : int or tuple of int
Number of samples in the output (1-D), or a tuple that represents the
shape of the output (N-D).
idx : None or int or tuple of int or 'mid', optional
Index at which the value is 1. If None, defaults to the 0th element.
If ``idx='mid'``, the impulse will be centered at ``shape // 2`` in
all dimensions. If an int, the impulse will be at `idx` in all
dimensions.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
Returns
-------
y : ndarray
Output array containing an impulse signal.
Notes
-----
The 1D case is also known as the Kronecker delta.
.. versionadded:: 0.19.0
Examples
--------
An impulse at the 0th element (:math:`\\delta[n]`):
>>> from scipy import signal
>>> signal.unit_impulse(8)
array([ 1., 0., 0., 0., 0., 0., 0., 0.])
Impulse offset by 2 samples (:math:`\\delta[n-2]`):
>>> signal.unit_impulse(7, 2)
array([ 0., 0., 1., 0., 0., 0., 0.])
2-dimensional impulse, centered:
>>> signal.unit_impulse((3, 3), 'mid')
array([[ 0., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 0.]])
Impulse at (2, 2), using broadcasting:
>>> signal.unit_impulse((4, 4), 2)
array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 1., 0.],
[ 0., 0., 0., 0.]])
Plot the impulse response of a 4th-order Butterworth lowpass filter:
>>> imp = signal.unit_impulse(100, 'mid')
>>> b, a = signal.butter(4, 0.2)
>>> response = signal.lfilter(b, a, imp)
>>> import matplotlib.pyplot as plt
>>> plt.plot(np.arange(-50, 50), imp)
>>> plt.plot(np.arange(-50, 50), response)
>>> plt.margins(0.1, 0.1)
>>> plt.xlabel('Time [samples]')
>>> plt.ylabel('Amplitude')
>>> plt.grid(True)
>>> plt.show()
"""
out = zeros(shape, dtype)
shape = np.atleast_1d(shape)
if idx is None:
idx = (0,) * len(shape)
elif idx == 'mid':
idx = tuple(shape // 2)
elif not hasattr(idx, "__iter__"):
idx = (idx,) * len(shape)
out[idx] = 1
return out
|
gfyoung/scipy
|
scipy/signal/waveforms.py
|
Python
|
bsd-3-clause
| 21,071
|
[
"Gaussian"
] |
54ce0cc9a8db11d53911bca5897a3a81f1e129ae790faef488ce65781367c9bb
|
######################################################################
##
## Copyright 2010-2011 Ondrej Certik <ondrej@certik.cz>
## Copyright 2010-2011 Mateusz Paprocki <mattpap@gmail.com>
## Copyright 2011 Christian Iversen <ci@sikkerhed.org>
##
## Permission is hereby granted, free of charge, to any person
## obtaining a copy of this software and associated documentation
## files (the "Software"), to deal in the Software without
## restriction, including without limitation the rights to use,
## copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the
## Software is furnished to do so, subject to the following
## conditions:
##
## The above copyright notice and this permission notice shall be
## included in all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
## OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
## NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
## HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
## WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
## OTHER DEALINGS IN THE SOFTWARE.
##
######################################################################
import ast
import inspect
class JSError(Exception):
pass
class BaseCompiler(object):
name_map = {
'super' : 'Super',
'delete': '__delete',
'default': '__default',
}
import __builtin__
builtin = set([x for x in dir(__builtin__) if not x.startswith("__")])
def __init__(self, opts):
self.index_var = 0
# This is the name of the classes that we are currently in:
self._class_name = []
# This lists all variables in the local scope:
self._scope = []
self._classes = {}
self._exceptions = []
self.opts = opts
def alloc_var(self):
self.index_var += 1
return "$v%d" % self.index_var
def visit(self, node):
try:
visitor = getattr(self, 'visit_' + self.name(node))
except AttributeError:
raise JSError("syntax not supported (%s: %s)" % (node.__class__.__name__, node))
return visitor(node)
@staticmethod
def indent(stmts):
return [ " " + stmt for stmt in stmts ]
## Shared code
@staticmethod
def name(node):
return node.__class__.__name__
## Shared visit functions
def visit_AssignSimple(self, target, value):
raise NotImplementedError()
def visit_Assign(self, node):
if len(node.targets) > 1:
tmp = self.alloc_var()
q = ["var %s = %s" % (tmp, self.visit(node.value))]
for t in node.targets:
q.extend(self.visit_AssignSimple(t, tmp))
return q
else:
return self.visit_AssignSimple(node.targets[0], self.visit(node.value))
def _visit_Exec(self, node):
pass
def visit_Print(self, node):
assert node.dest is None
assert node.nl
values = [self.visit(v) for v in node.values]
values = ", ".join(values)
return ["__builtins__.PY$print(%s);" % values]
def visit_Module(self, node):
module = []
for stmt in node.body:
module.extend(self.visit(stmt))
return module
def visit_Assert(self, node):
test = self.visit(node.test)
if node.msg is not None:
return ["assert(%s, %s);" % (test, self.visit(node.msg))]
else:
return ["assert(%s);" % test]
def visit_Return(self, node):
if node.value is not None:
return ["return %s;" % self.visit(node.value)]
else:
return ["return;"]
def visit_Expr(self, node):
return [self.visit(node.value) + ";"]
def visit_Pass(self, node):
return ["/* pass */"]
def visit_Break(self, node):
return ["break;"]
def visit_Continue(self, node):
return ["continue;"]
def visit_arguments(self, node):
return ", ".join([self.visit(arg) for arg in node.args])
|
buchuki/pyjaco
|
pyjaco/compiler/__init__.py
|
Python
|
mit
| 4,278
|
[
"VisIt"
] |
99e3c21e7b3d23b75da05949a55bbd5b154753bbdb8adb7729d437f70703b821
|
import numpy as np
from gpaw import GPAW
from gpaw.dipole_correction import DipoleCorrection
from gpaw.poisson import PoissonSolver
from ase.structure import molecule
from gpaw.mpi import rank
from gpaw.utilities import h2gpts
"""
Test the dipole correction code by comparing this system:
H
z1 O z2
H
(where z1 and z2 denote points where the potential is probed)
Expected potential:
-----
/
/
----
to this system:
H H
z1 O z2 O
H H
Expected potential:
-------
/ \
/ \
----- ------
The height of the two potentials are tested to be the same.
Enable if-statement in the bottom for nice plots
"""
system1 = molecule('H2O')
system1.set_pbc((True, True, False))
system1.cell = 4.0 * np.array([[1.0, -1.5, 0.0], [1.0, 1.0, 0.0],
[0., 0., 1.]])
system1.center(vacuum=10.0, axis=2)
system2 = system1.copy()
system2.positions *= [1.0, 1.0, -1.0]
system2 += system1
system2.center(vacuum=6.0, axis=2)
convergence = dict(density=1e-5)
calc1 = GPAW(mode='lcao',
convergence=convergence,
gpts=h2gpts(0.25, system1.cell, idiv=8),
poissonsolver=DipoleCorrection(PoissonSolver(relax='GS',
eps=1e-11), 2))
system1.set_calculator(calc1)
system1.get_potential_energy()
v1 = calc1.get_effective_potential(pad=False)
calc2 = GPAW(mode='lcao',
convergence=convergence,
gpts=h2gpts(0.25, system2.cell, idiv=8),
poissonsolver=PoissonSolver(relax='GS', eps=1e-11))
system2.set_calculator(calc2)
system2.get_potential_energy()
v2 = calc2.get_effective_potential(pad=False)
def get_avg(v):
nx, ny, nz = v.shape
vyz = v.sum(axis=0) / nx
vz = vyz.sum(axis=0) / ny
return vz, vyz
if rank == 0:
vz1, vyz1 = get_avg(v1)
vz2, vyz2 = get_avg(v2)
# Compare values that are not right at the end of the array
# (at the end of the array things can "oscillate" a bit)
dvz1 = vz1[-5] - vz1[4]
dvz2 = vz2[4] - vz2[len(vz2) // 2]
print dvz1, dvz2
err1 = abs(dvz1 - dvz2)
# Comparison to what the values were when this test was last modified:
ref_value = 2.07342988218
err2 = abs(dvz1 - ref_value)
if 0:
import pylab as pl
pl.imshow(vyz1)
pl.figure()
pl.imshow(vyz2)
pl.figure()
pl.plot(vz1)
pl.plot(vz2)
pl.show()
print 'Ref value of previous calculation', ref_value
print 'Value in this calculation', dvz1
# fine grid needed to achieve convergence!
print 'Error', err1, err2
assert err1 < 5e-3, err1
assert err2 < 2e-4, err2
|
robwarm/gpaw-symm
|
gpaw/test/dipole.py
|
Python
|
gpl-3.0
| 2,744
|
[
"ASE",
"GPAW"
] |
cc1f72dc23f5891e606cf66cf629f49b98da44b636e18f17fb87017957c9c7be
|
# Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2004-2006 The Regents of The University of Michigan
# Copyright (c) 2010-20013 Advanced Micro Devices, Inc.
# Copyright (c) 2013 Mark D. Hill and David A. Wood
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Nathan Binkert
# Andreas Hansson
import sys
from types import FunctionType, MethodType, ModuleType
import m5
from m5.util import *
# Have to import params up top since Param is referenced on initial
# load (when SimObject class references Param to create a class
# variable, the 'name' param)...
from m5.params import *
# There are a few things we need that aren't in params.__all__ since
# normal users don't need them
from m5.params import ParamDesc, VectorParamDesc, \
isNullPointer, SimObjectVector, Port
from m5.proxy import *
from m5.proxy import isproxy
#####################################################################
#
# M5 Python Configuration Utility
#
# The basic idea is to write simple Python programs that build Python
# objects corresponding to M5 SimObjects for the desired simulation
# configuration. For now, the Python emits a .ini file that can be
# parsed by M5. In the future, some tighter integration between M5
# and the Python interpreter may allow bypassing the .ini file.
#
# Each SimObject class in M5 is represented by a Python class with the
# same name. The Python inheritance tree mirrors the M5 C++ tree
# (e.g., SimpleCPU derives from BaseCPU in both cases, and all
# SimObjects inherit from a single SimObject base class). To specify
# an instance of an M5 SimObject in a configuration, the user simply
# instantiates the corresponding Python object. The parameters for
# that SimObject are given by assigning to attributes of the Python
# object, either using keyword assignment in the constructor or in
# separate assignment statements. For example:
#
# cache = BaseCache(size='64KB')
# cache.hit_latency = 3
# cache.assoc = 8
#
# The magic lies in the mapping of the Python attributes for SimObject
# classes to the actual SimObject parameter specifications. This
# allows parameter validity checking in the Python code. Continuing
# the example above, the statements "cache.blurfl=3" or
# "cache.assoc='hello'" would both result in runtime errors in Python,
# since the BaseCache object has no 'blurfl' parameter and the 'assoc'
# parameter requires an integer, respectively. This magic is done
# primarily by overriding the special __setattr__ method that controls
# assignment to object attributes.
#
# Once a set of Python objects have been instantiated in a hierarchy,
# calling 'instantiate(obj)' (where obj is the root of the hierarchy)
# will generate a .ini file.
#
#####################################################################
# list of all SimObject classes
allClasses = {}
# dict to look up SimObjects based on path
instanceDict = {}
# Did any of the SimObjects lack a header file?
noCxxHeader = False
def public_value(key, value):
return key.startswith('_') or \
isinstance(value, (FunctionType, MethodType, ModuleType,
classmethod, type))
# The metaclass for SimObject. This class controls how new classes
# that derive from SimObject are instantiated, and provides inherited
# class behavior (just like a class controls how instances of that
# class are instantiated, and provides inherited instance behavior).
class MetaSimObject(type):
# Attributes that can be set only at initialization time
init_keywords = { 'abstract' : bool,
'cxx_class' : str,
'cxx_type' : str,
'cxx_header' : str,
'type' : str,
'cxx_bases' : list }
# Attributes that can be set any time
keywords = { 'check' : FunctionType }
# __new__ is called before __init__, and is where the statements
# in the body of the class definition get loaded into the class's
# __dict__. We intercept this to filter out parameter & port assignments
# and only allow "private" attributes to be passed to the base
# __new__ (starting with underscore).
def __new__(mcls, name, bases, dict):
assert name not in allClasses, "SimObject %s already present" % name
# Copy "private" attributes, functions, and classes to the
# official dict. Everything else goes in _init_dict to be
# filtered in __init__.
cls_dict = {}
value_dict = {}
for key,val in dict.items():
if public_value(key, val):
cls_dict[key] = val
else:
# must be a param/port setting
value_dict[key] = val
if 'abstract' not in value_dict:
value_dict['abstract'] = False
if 'cxx_bases' not in value_dict:
value_dict['cxx_bases'] = []
cls_dict['_value_dict'] = value_dict
cls = super(MetaSimObject, mcls).__new__(mcls, name, bases, cls_dict)
if 'type' in value_dict:
allClasses[name] = cls
return cls
# subclass initialization
def __init__(cls, name, bases, dict):
# calls type.__init__()... I think that's a no-op, but leave
# it here just in case it's not.
super(MetaSimObject, cls).__init__(name, bases, dict)
# initialize required attributes
# class-only attributes
cls._params = multidict() # param descriptions
cls._ports = multidict() # port descriptions
# class or instance attributes
cls._values = multidict() # param values
cls._hr_values = multidict() # human readable param values
cls._children = multidict() # SimObject children
cls._port_refs = multidict() # port ref objects
cls._instantiated = False # really instantiated, cloned, or subclassed
# We don't support multiple inheritance of sim objects. If you want
# to, you must fix multidict to deal with it properly. Non sim-objects
# are ok, though
bTotal = 0
for c in bases:
if isinstance(c, MetaSimObject):
bTotal += 1
if bTotal > 1:
raise TypeError, "SimObjects do not support multiple inheritance"
base = bases[0]
# Set up general inheritance via multidicts. A subclass will
# inherit all its settings from the base class. The only time
# the following is not true is when we define the SimObject
# class itself (in which case the multidicts have no parent).
if isinstance(base, MetaSimObject):
cls._base = base
cls._params.parent = base._params
cls._ports.parent = base._ports
cls._values.parent = base._values
cls._hr_values.parent = base._hr_values
cls._children.parent = base._children
cls._port_refs.parent = base._port_refs
# mark base as having been subclassed
base._instantiated = True
else:
cls._base = None
# default keyword values
if 'type' in cls._value_dict:
if 'cxx_class' not in cls._value_dict:
cls._value_dict['cxx_class'] = cls._value_dict['type']
cls._value_dict['cxx_type'] = '%s *' % cls._value_dict['cxx_class']
if 'cxx_header' not in cls._value_dict:
global noCxxHeader
noCxxHeader = True
warn("No header file specified for SimObject: %s", name)
# Export methods are automatically inherited via C++, so we
# don't want the method declarations to get inherited on the
# python side (and thus end up getting repeated in the wrapped
# versions of derived classes). The code below basicallly
# suppresses inheritance by substituting in the base (null)
# versions of these methods unless a different version is
# explicitly supplied.
for method_name in ('export_methods', 'export_method_cxx_predecls',
'export_method_swig_predecls'):
if method_name not in cls.__dict__:
base_method = getattr(MetaSimObject, method_name)
m = MethodType(base_method, cls, MetaSimObject)
setattr(cls, method_name, m)
# Now process the _value_dict items. They could be defining
# new (or overriding existing) parameters or ports, setting
# class keywords (e.g., 'abstract'), or setting parameter
# values or port bindings. The first 3 can only be set when
# the class is defined, so we handle them here. The others
# can be set later too, so just emulate that by calling
# setattr().
for key,val in cls._value_dict.items():
# param descriptions
if isinstance(val, ParamDesc):
cls._new_param(key, val)
# port objects
elif isinstance(val, Port):
cls._new_port(key, val)
# init-time-only keywords
elif cls.init_keywords.has_key(key):
cls._set_keyword(key, val, cls.init_keywords[key])
# default: use normal path (ends up in __setattr__)
else:
setattr(cls, key, val)
def _set_keyword(cls, keyword, val, kwtype):
if not isinstance(val, kwtype):
raise TypeError, 'keyword %s has bad type %s (expecting %s)' % \
(keyword, type(val), kwtype)
if isinstance(val, FunctionType):
val = classmethod(val)
type.__setattr__(cls, keyword, val)
def _new_param(cls, name, pdesc):
# each param desc should be uniquely assigned to one variable
assert(not hasattr(pdesc, 'name'))
pdesc.name = name
cls._params[name] = pdesc
if hasattr(pdesc, 'default'):
cls._set_param(name, pdesc.default, pdesc)
def _set_param(cls, name, value, param):
assert(param.name == name)
try:
hr_value = value
value = param.convert(value)
except Exception, e:
msg = "%s\nError setting param %s.%s to %s\n" % \
(e, cls.__name__, name, value)
e.args = (msg, )
raise
cls._values[name] = value
# if param value is a SimObject, make it a child too, so that
# it gets cloned properly when the class is instantiated
if isSimObjectOrVector(value) and not value.has_parent():
cls._add_cls_child(name, value)
# update human-readable values of the param if it has a literal
# value and is not an object or proxy.
if not (isSimObjectOrVector(value) or\
isinstance(value, m5.proxy.BaseProxy)):
cls._hr_values[name] = hr_value
def _add_cls_child(cls, name, child):
# It's a little funky to have a class as a parent, but these
# objects should never be instantiated (only cloned, which
# clears the parent pointer), and this makes it clear that the
# object is not an orphan and can provide better error
# messages.
child.set_parent(cls, name)
cls._children[name] = child
def _new_port(cls, name, port):
# each port should be uniquely assigned to one variable
assert(not hasattr(port, 'name'))
port.name = name
cls._ports[name] = port
# same as _get_port_ref, effectively, but for classes
def _cls_get_port_ref(cls, attr):
# Return reference that can be assigned to another port
# via __setattr__. There is only ever one reference
# object per port, but we create them lazily here.
ref = cls._port_refs.get(attr)
if not ref:
ref = cls._ports[attr].makeRef(cls)
cls._port_refs[attr] = ref
return ref
# Set attribute (called on foo.attr = value when foo is an
# instance of class cls).
def __setattr__(cls, attr, value):
# normal processing for private attributes
if public_value(attr, value):
type.__setattr__(cls, attr, value)
return
if cls.keywords.has_key(attr):
cls._set_keyword(attr, value, cls.keywords[attr])
return
if cls._ports.has_key(attr):
cls._cls_get_port_ref(attr).connect(value)
return
if isSimObjectOrSequence(value) and cls._instantiated:
raise RuntimeError, \
"cannot set SimObject parameter '%s' after\n" \
" class %s has been instantiated or subclassed" \
% (attr, cls.__name__)
# check for param
param = cls._params.get(attr)
if param:
cls._set_param(attr, value, param)
return
if isSimObjectOrSequence(value):
# If RHS is a SimObject, it's an implicit child assignment.
cls._add_cls_child(attr, coerceSimObjectOrVector(value))
return
# no valid assignment... raise exception
raise AttributeError, \
"Class %s has no parameter \'%s\'" % (cls.__name__, attr)
def __getattr__(cls, attr):
if attr == 'cxx_class_path':
return cls.cxx_class.split('::')
if attr == 'cxx_class_name':
return cls.cxx_class_path[-1]
if attr == 'cxx_namespaces':
return cls.cxx_class_path[:-1]
if cls._values.has_key(attr):
return cls._values[attr]
if cls._children.has_key(attr):
return cls._children[attr]
raise AttributeError, \
"object '%s' has no attribute '%s'" % (cls.__name__, attr)
def __str__(cls):
return cls.__name__
# See ParamValue.cxx_predecls for description.
def cxx_predecls(cls, code):
code('#include "params/$cls.hh"')
# See ParamValue.swig_predecls for description.
def swig_predecls(cls, code):
code('%import "python/m5/internal/param_$cls.i"')
# Hook for exporting additional C++ methods to Python via SWIG.
# Default is none, override using @classmethod in class definition.
def export_methods(cls, code):
pass
# Generate the code needed as a prerequisite for the C++ methods
# exported via export_methods() to be compiled in the _wrap.cc
# file. Typically generates one or more #include statements. If
# any methods are exported, typically at least the C++ header
# declaring the relevant SimObject class must be included.
def export_method_cxx_predecls(cls, code):
pass
# Generate the code needed as a prerequisite for the C++ methods
# exported via export_methods() to be processed by SWIG.
# Typically generates one or more %include or %import statements.
# If any methods are exported, typically at least the C++ header
# declaring the relevant SimObject class must be included.
def export_method_swig_predecls(cls, code):
pass
# Generate the declaration for this object for wrapping with SWIG.
# Generates code that goes into a SWIG .i file. Called from
# src/SConscript.
def swig_decl(cls, code):
class_path = cls.cxx_class.split('::')
classname = class_path[-1]
namespaces = class_path[:-1]
# The 'local' attribute restricts us to the params declared in
# the object itself, not including inherited params (which
# will also be inherited from the base class's param struct
# here).
params = cls._params.local.values()
ports = cls._ports.local
code('%module(package="m5.internal") param_$cls')
code()
code('%{')
code('#include "sim/sim_object.hh"')
code('#include "params/$cls.hh"')
for param in params:
param.cxx_predecls(code)
code('#include "${{cls.cxx_header}}"')
cls.export_method_cxx_predecls(code)
code('''\
/**
* This is a workaround for bug in swig. Prior to gcc 4.6.1 the STL
* headers like vector, string, etc. used to automatically pull in
* the cstddef header but starting with gcc 4.6.1 they no longer do.
* This leads to swig generated a file that does not compile so we
* explicitly include cstddef. Additionally, including version 2.0.4,
* swig uses ptrdiff_t without the std:: namespace prefix which is
* required with gcc 4.6.1. We explicitly provide access to it.
*/
#include <cstddef>
using std::ptrdiff_t;
''')
code('%}')
code()
for param in params:
param.swig_predecls(code)
cls.export_method_swig_predecls(code)
code()
if cls._base:
code('%import "python/m5/internal/param_${{cls._base}}.i"')
code()
for ns in namespaces:
code('namespace $ns {')
if namespaces:
code('// avoid name conflicts')
sep_string = '_COLONS_'
flat_name = sep_string.join(class_path)
code('%rename($flat_name) $classname;')
code()
code('// stop swig from creating/wrapping default ctor/dtor')
code('%nodefault $classname;')
code('class $classname')
if cls._base:
bases = [ cls._base.cxx_class ] + cls.cxx_bases
else:
bases = cls.cxx_bases
base_first = True
for base in bases:
if base_first:
code(' : public ${{base}}')
base_first = False
else:
code(' , public ${{base}}')
code('{')
code(' public:')
cls.export_methods(code)
code('};')
for ns in reversed(namespaces):
code('} // namespace $ns')
code()
code('%include "params/$cls.hh"')
# Generate the C++ declaration (.hh file) for this SimObject's
# param struct. Called from src/SConscript.
def cxx_param_decl(cls, code):
# The 'local' attribute restricts us to the params declared in
# the object itself, not including inherited params (which
# will also be inherited from the base class's param struct
# here).
params = cls._params.local.values()
ports = cls._ports.local
try:
ptypes = [p.ptype for p in params]
except:
print cls, p, p.ptype_str
print params
raise
class_path = cls._value_dict['cxx_class'].split('::')
code('''\
#ifndef __PARAMS__${cls}__
#define __PARAMS__${cls}__
''')
# A forward class declaration is sufficient since we are just
# declaring a pointer.
for ns in class_path[:-1]:
code('namespace $ns {')
code('class $0;', class_path[-1])
for ns in reversed(class_path[:-1]):
code('} // namespace $ns')
code()
# The base SimObject has a couple of params that get
# automatically set from Python without being declared through
# the normal Param mechanism; we slip them in here (needed
# predecls now, actual declarations below)
if cls == SimObject:
code('''
#ifndef PY_VERSION
struct PyObject;
#endif
#include <string>
''')
for param in params:
param.cxx_predecls(code)
for port in ports.itervalues():
port.cxx_predecls(code)
code()
if cls._base:
code('#include "params/${{cls._base.type}}.hh"')
code()
for ptype in ptypes:
if issubclass(ptype, Enum):
code('#include "enums/${{ptype.__name__}}.hh"')
code()
# now generate the actual param struct
code("struct ${cls}Params")
if cls._base:
code(" : public ${{cls._base.type}}Params")
code("{")
if not hasattr(cls, 'abstract') or not cls.abstract:
if 'type' in cls.__dict__:
code(" ${{cls.cxx_type}} create();")
code.indent()
if cls == SimObject:
code('''
SimObjectParams() {}
virtual ~SimObjectParams() {}
std::string name;
PyObject *pyobj;
''')
for param in params:
param.cxx_decl(code)
for port in ports.itervalues():
port.cxx_decl(code)
code.dedent()
code('};')
code()
code('#endif // __PARAMS__${cls}__')
return code
# This *temporary* definition is required to support calls from the
# SimObject class definition to the MetaSimObject methods (in
# particular _set_param, which gets called for parameters with default
# values defined on the SimObject class itself). It will get
# overridden by the permanent definition (which requires that
# SimObject be defined) lower in this file.
def isSimObjectOrVector(value):
return False
# This class holds information about each simobject parameter
# that should be displayed on the command line for use in the
# configuration system.
class ParamInfo(object):
def __init__(self, type, desc, type_str, example, default_val, access_str):
self.type = type
self.desc = desc
self.type_str = type_str
self.example_str = example
self.default_val = default_val
# The string representation used to access this param through python.
# The method to access this parameter presented on the command line may
# be different, so this needs to be stored for later use.
self.access_str = access_str
self.created = True
# Make it so we can only set attributes at initialization time
# and effectively make this a const object.
def __setattr__(self, name, value):
if not "created" in self.__dict__:
self.__dict__[name] = value
# The SimObject class is the root of the special hierarchy. Most of
# the code in this class deals with the configuration hierarchy itself
# (parent/child node relationships).
class SimObject(object):
# Specify metaclass. Any class inheriting from SimObject will
# get this metaclass.
__metaclass__ = MetaSimObject
type = 'SimObject'
abstract = True
cxx_header = "sim/sim_object.hh"
cxx_bases = [ "Drainable", "Serializable" ]
eventq_index = Param.UInt32(Parent.eventq_index, "Event Queue Index")
@classmethod
def export_method_swig_predecls(cls, code):
code('''
%include <std_string.i>
%import "python/swig/drain.i"
%import "python/swig/serialize.i"
''')
@classmethod
def export_methods(cls, code):
code('''
void init();
void loadState(Checkpoint *cp);
void initState();
void regStats();
void resetStats();
void regProbePoints();
void regProbeListeners();
void startup();
''')
# Returns a dict of all the option strings that can be
# generated as command line options for this simobject instance
# by tracing all reachable params in the top level instance and
# any children it contains.
def enumerateParams(self, flags_dict = {},
cmd_line_str = "", access_str = ""):
if hasattr(self, "_paramEnumed"):
print "Cycle detected enumerating params"
else:
self._paramEnumed = True
# Scan the children first to pick up all the objects in this SimObj
for keys in self._children:
child = self._children[keys]
next_cmdline_str = cmd_line_str + keys
next_access_str = access_str + keys
if not isSimObjectVector(child):
next_cmdline_str = next_cmdline_str + "."
next_access_str = next_access_str + "."
flags_dict = child.enumerateParams(flags_dict,
next_cmdline_str,
next_access_str)
# Go through the simple params in the simobject in this level
# of the simobject hierarchy and save information about the
# parameter to be used for generating and processing command line
# options to the simulator to set these parameters.
for keys,values in self._params.items():
if values.isCmdLineSettable():
type_str = ''
ex_str = values.example_str()
ptype = None
if isinstance(values, VectorParamDesc):
type_str = 'Vector_%s' % values.ptype_str
ptype = values
else:
type_str = '%s' % values.ptype_str
ptype = values.ptype
if keys in self._hr_values\
and keys in self._values\
and not isinstance(self._values[keys], m5.proxy.BaseProxy):
cmd_str = cmd_line_str + keys
acc_str = access_str + keys
flags_dict[cmd_str] = ParamInfo(ptype,
self._params[keys].desc, type_str, ex_str,
values.pretty_print(self._hr_values[keys]),
acc_str)
elif not keys in self._hr_values\
and not keys in self._values:
# Empty param
cmd_str = cmd_line_str + keys
acc_str = access_str + keys
flags_dict[cmd_str] = ParamInfo(ptype,
self._params[keys].desc,
type_str, ex_str, '', acc_str)
return flags_dict
# Initialize new instance. For objects with SimObject-valued
# children, we need to recursively clone the classes represented
# by those param values as well in a consistent "deep copy"-style
# fashion. That is, we want to make sure that each instance is
# cloned only once, and that if there are multiple references to
# the same original object, we end up with the corresponding
# cloned references all pointing to the same cloned instance.
def __init__(self, **kwargs):
ancestor = kwargs.get('_ancestor')
memo_dict = kwargs.get('_memo')
if memo_dict is None:
# prepare to memoize any recursively instantiated objects
memo_dict = {}
elif ancestor:
# memoize me now to avoid problems with recursive calls
memo_dict[ancestor] = self
if not ancestor:
ancestor = self.__class__
ancestor._instantiated = True
# initialize required attributes
self._parent = None
self._name = None
self._ccObject = None # pointer to C++ object
self._ccParams = None
self._instantiated = False # really "cloned"
# Clone children specified at class level. No need for a
# multidict here since we will be cloning everything.
# Do children before parameter values so that children that
# are also param values get cloned properly.
self._children = {}
for key,val in ancestor._children.iteritems():
self.add_child(key, val(_memo=memo_dict))
# Inherit parameter values from class using multidict so
# individual value settings can be overridden but we still
# inherit late changes to non-overridden class values.
self._values = multidict(ancestor._values)
self._hr_values = multidict(ancestor._hr_values)
# clone SimObject-valued parameters
for key,val in ancestor._values.iteritems():
val = tryAsSimObjectOrVector(val)
if val is not None:
self._values[key] = val(_memo=memo_dict)
# clone port references. no need to use a multidict here
# since we will be creating new references for all ports.
self._port_refs = {}
for key,val in ancestor._port_refs.iteritems():
self._port_refs[key] = val.clone(self, memo_dict)
# apply attribute assignments from keyword args, if any
for key,val in kwargs.iteritems():
setattr(self, key, val)
# "Clone" the current instance by creating another instance of
# this instance's class, but that inherits its parameter values
# and port mappings from the current instance. If we're in a
# "deep copy" recursive clone, check the _memo dict to see if
# we've already cloned this instance.
def __call__(self, **kwargs):
memo_dict = kwargs.get('_memo')
if memo_dict is None:
# no memo_dict: must be top-level clone operation.
# this is only allowed at the root of a hierarchy
if self._parent:
raise RuntimeError, "attempt to clone object %s " \
"not at the root of a tree (parent = %s)" \
% (self, self._parent)
# create a new dict and use that.
memo_dict = {}
kwargs['_memo'] = memo_dict
elif memo_dict.has_key(self):
# clone already done & memoized
return memo_dict[self]
return self.__class__(_ancestor = self, **kwargs)
def _get_port_ref(self, attr):
# Return reference that can be assigned to another port
# via __setattr__. There is only ever one reference
# object per port, but we create them lazily here.
ref = self._port_refs.get(attr)
if ref == None:
ref = self._ports[attr].makeRef(self)
self._port_refs[attr] = ref
return ref
def __getattr__(self, attr):
if self._ports.has_key(attr):
return self._get_port_ref(attr)
if self._values.has_key(attr):
return self._values[attr]
if self._children.has_key(attr):
return self._children[attr]
# If the attribute exists on the C++ object, transparently
# forward the reference there. This is typically used for
# SWIG-wrapped methods such as init(), regStats(),
# resetStats(), startup(), drain(), and
# resume().
if self._ccObject and hasattr(self._ccObject, attr):
return getattr(self._ccObject, attr)
err_string = "object '%s' has no attribute '%s'" \
% (self.__class__.__name__, attr)
if not self._ccObject:
err_string += "\n (C++ object is not yet constructed," \
" so wrapped C++ methods are unavailable.)"
raise AttributeError, err_string
# Set attribute (called on foo.attr = value when foo is an
# instance of class cls).
def __setattr__(self, attr, value):
# normal processing for private attributes
if attr.startswith('_'):
object.__setattr__(self, attr, value)
return
if self._ports.has_key(attr):
# set up port connection
self._get_port_ref(attr).connect(value)
return
param = self._params.get(attr)
if param:
try:
hr_value = value
value = param.convert(value)
except Exception, e:
msg = "%s\nError setting param %s.%s to %s\n" % \
(e, self.__class__.__name__, attr, value)
e.args = (msg, )
raise
self._values[attr] = value
# implicitly parent unparented objects assigned as params
if isSimObjectOrVector(value) and not value.has_parent():
self.add_child(attr, value)
# set the human-readable value dict if this is a param
# with a literal value and is not being set as an object
# or proxy.
if not (isSimObjectOrVector(value) or\
isinstance(value, m5.proxy.BaseProxy)):
self._hr_values[attr] = hr_value
return
# if RHS is a SimObject, it's an implicit child assignment
if isSimObjectOrSequence(value):
self.add_child(attr, value)
return
# no valid assignment... raise exception
raise AttributeError, "Class %s has no parameter %s" \
% (self.__class__.__name__, attr)
# this hack allows tacking a '[0]' onto parameters that may or may
# not be vectors, and always getting the first element (e.g. cpus)
def __getitem__(self, key):
if key == 0:
return self
raise IndexError, "Non-zero index '%s' to SimObject" % key
# this hack allows us to iterate over a SimObject that may
# not be a vector, so we can call a loop over it and get just one
# element.
def __len__(self):
return 1
# Also implemented by SimObjectVector
def clear_parent(self, old_parent):
assert self._parent is old_parent
self._parent = None
# Also implemented by SimObjectVector
def set_parent(self, parent, name):
self._parent = parent
self._name = name
# Return parent object of this SimObject, not implemented by SimObjectVector
# because the elements in a SimObjectVector may not share the same parent
def get_parent(self):
return self._parent
# Also implemented by SimObjectVector
def get_name(self):
return self._name
# Also implemented by SimObjectVector
def has_parent(self):
return self._parent is not None
# clear out child with given name. This code is not likely to be exercised.
# See comment in add_child.
def clear_child(self, name):
child = self._children[name]
child.clear_parent(self)
del self._children[name]
# Add a new child to this object.
def add_child(self, name, child):
child = coerceSimObjectOrVector(child)
if child.has_parent():
warn("add_child('%s'): child '%s' already has parent", name,
child.get_name())
if self._children.has_key(name):
# This code path had an undiscovered bug that would make it fail
# at runtime. It had been here for a long time and was only
# exposed by a buggy script. Changes here will probably not be
# exercised without specialized testing.
self.clear_child(name)
child.set_parent(self, name)
self._children[name] = child
# Take SimObject-valued parameters that haven't been explicitly
# assigned as children and make them children of the object that
# they were assigned to as a parameter value. This guarantees
# that when we instantiate all the parameter objects we're still
# inside the configuration hierarchy.
def adoptOrphanParams(self):
for key,val in self._values.iteritems():
if not isSimObjectVector(val) and isSimObjectSequence(val):
# need to convert raw SimObject sequences to
# SimObjectVector class so we can call has_parent()
val = SimObjectVector(val)
self._values[key] = val
if isSimObjectOrVector(val) and not val.has_parent():
warn("%s adopting orphan SimObject param '%s'", self, key)
self.add_child(key, val)
def path(self):
if not self._parent:
return '<orphan %s>' % self.__class__
ppath = self._parent.path()
if ppath == 'root':
return self._name
return ppath + "." + self._name
def __str__(self):
return self.path()
def config_value(self):
return self.path()
def ini_str(self):
return self.path()
def find_any(self, ptype):
if isinstance(self, ptype):
return self, True
found_obj = None
for child in self._children.itervalues():
visited = False
if hasattr(child, '_visited'):
visited = getattr(child, '_visited')
if isinstance(child, ptype) and not visited:
if found_obj != None and child != found_obj:
raise AttributeError, \
'parent.any matched more than one: %s %s' % \
(found_obj.path, child.path)
found_obj = child
# search param space
for pname,pdesc in self._params.iteritems():
if issubclass(pdesc.ptype, ptype):
match_obj = self._values[pname]
if found_obj != None and found_obj != match_obj:
raise AttributeError, \
'parent.any matched more than one: %s and %s' % (found_obj.path, match_obj.path)
found_obj = match_obj
return found_obj, found_obj != None
def find_all(self, ptype):
all = {}
# search children
for child in self._children.itervalues():
# a child could be a list, so ensure we visit each item
if isinstance(child, list):
children = child
else:
children = [child]
for child in children:
if isinstance(child, ptype) and not isproxy(child) and \
not isNullPointer(child):
all[child] = True
if isSimObject(child):
# also add results from the child itself
child_all, done = child.find_all(ptype)
all.update(dict(zip(child_all, [done] * len(child_all))))
# search param space
for pname,pdesc in self._params.iteritems():
if issubclass(pdesc.ptype, ptype):
match_obj = self._values[pname]
if not isproxy(match_obj) and not isNullPointer(match_obj):
all[match_obj] = True
return all.keys(), True
def unproxy(self, base):
return self
def unproxyParams(self):
for param in self._params.iterkeys():
value = self._values.get(param)
if value != None and isproxy(value):
try:
value = value.unproxy(self)
except:
print "Error in unproxying param '%s' of %s" % \
(param, self.path())
raise
setattr(self, param, value)
# Unproxy ports in sorted order so that 'append' operations on
# vector ports are done in a deterministic fashion.
port_names = self._ports.keys()
port_names.sort()
for port_name in port_names:
port = self._port_refs.get(port_name)
if port != None:
port.unproxy(self)
def print_ini(self, ini_file):
print >>ini_file, '[' + self.path() + ']' # .ini section header
instanceDict[self.path()] = self
if hasattr(self, 'type'):
print >>ini_file, 'type=%s' % self.type
if len(self._children.keys()):
print >>ini_file, 'children=%s' % \
' '.join(self._children[n].get_name() \
for n in sorted(self._children.keys()))
for param in sorted(self._params.keys()):
value = self._values.get(param)
if value != None:
print >>ini_file, '%s=%s' % (param,
self._values[param].ini_str())
for port_name in sorted(self._ports.keys()):
port = self._port_refs.get(port_name, None)
if port != None:
print >>ini_file, '%s=%s' % (port_name, port.ini_str())
print >>ini_file # blank line between objects
# generate a tree of dictionaries expressing all the parameters in the
# instantiated system for use by scripts that want to do power, thermal
# visualization, and other similar tasks
def get_config_as_dict(self):
d = attrdict()
if hasattr(self, 'type'):
d.type = self.type
if hasattr(self, 'cxx_class'):
d.cxx_class = self.cxx_class
# Add the name and path of this object to be able to link to
# the stats
d.name = self.get_name()
d.path = self.path()
for param in sorted(self._params.keys()):
value = self._values.get(param)
if value != None:
d[param] = value.config_value()
for n in sorted(self._children.keys()):
child = self._children[n]
# Use the name of the attribute (and not get_name()) as
# the key in the JSON dictionary to capture the hierarchy
# in the Python code that assembled this system
d[n] = child.get_config_as_dict()
for port_name in sorted(self._ports.keys()):
port = self._port_refs.get(port_name, None)
if port != None:
# Represent each port with a dictionary containing the
# prominent attributes
d[port_name] = port.get_config_as_dict()
return d
def getCCParams(self):
if self._ccParams:
return self._ccParams
cc_params_struct = getattr(m5.internal.params, '%sParams' % self.type)
cc_params = cc_params_struct()
cc_params.pyobj = self
cc_params.name = str(self)
param_names = self._params.keys()
param_names.sort()
for param in param_names:
value = self._values.get(param)
if value is None:
fatal("%s.%s without default or user set value",
self.path(), param)
value = value.getValue()
if isinstance(self._params[param], VectorParamDesc):
assert isinstance(value, list)
vec = getattr(cc_params, param)
assert not len(vec)
for v in value:
vec.append(v)
else:
setattr(cc_params, param, value)
port_names = self._ports.keys()
port_names.sort()
for port_name in port_names:
port = self._port_refs.get(port_name, None)
if port != None:
port_count = len(port)
else:
port_count = 0
setattr(cc_params, 'port_' + port_name + '_connection_count',
port_count)
self._ccParams = cc_params
return self._ccParams
# Get C++ object corresponding to this object, calling C++ if
# necessary to construct it. Does *not* recursively create
# children.
def getCCObject(self):
if not self._ccObject:
# Make sure this object is in the configuration hierarchy
if not self._parent and not isRoot(self):
raise RuntimeError, "Attempt to instantiate orphan node"
# Cycles in the configuration hierarchy are not supported. This
# will catch the resulting recursion and stop.
self._ccObject = -1
if not self.abstract:
params = self.getCCParams()
self._ccObject = params.create()
elif self._ccObject == -1:
raise RuntimeError, "%s: Cycle found in configuration hierarchy." \
% self.path()
return self._ccObject
def descendants(self):
yield self
for child in self._children.itervalues():
for obj in child.descendants():
yield obj
# Call C++ to create C++ object corresponding to this object
def createCCObject(self):
self.getCCParams()
self.getCCObject() # force creation
def getValue(self):
return self.getCCObject()
# Create C++ port connections corresponding to the connections in
# _port_refs
def connectPorts(self):
for portRef in self._port_refs.itervalues():
portRef.ccConnect()
# Function to provide to C++ so it can look up instances based on paths
def resolveSimObject(name):
obj = instanceDict[name]
return obj.getCCObject()
def isSimObject(value):
return isinstance(value, SimObject)
def isSimObjectClass(value):
return issubclass(value, SimObject)
def isSimObjectVector(value):
return isinstance(value, SimObjectVector)
def isSimObjectSequence(value):
if not isinstance(value, (list, tuple)) or len(value) == 0:
return False
for val in value:
if not isNullPointer(val) and not isSimObject(val):
return False
return True
def isSimObjectOrSequence(value):
return isSimObject(value) or isSimObjectSequence(value)
def isRoot(obj):
from m5.objects import Root
return obj and obj is Root.getInstance()
def isSimObjectOrVector(value):
return isSimObject(value) or isSimObjectVector(value)
def tryAsSimObjectOrVector(value):
if isSimObjectOrVector(value):
return value
if isSimObjectSequence(value):
return SimObjectVector(value)
return None
def coerceSimObjectOrVector(value):
value = tryAsSimObjectOrVector(value)
if value is None:
raise TypeError, "SimObject or SimObjectVector expected"
return value
baseClasses = allClasses.copy()
baseInstances = instanceDict.copy()
def clear():
global allClasses, instanceDict, noCxxHeader
allClasses = baseClasses.copy()
instanceDict = baseInstances.copy()
noCxxHeader = False
# __all__ defines the list of symbols that get exported when
# 'from config import *' is invoked. Try to keep this reasonably
# short to avoid polluting other namespaces.
__all__ = [ 'SimObject' ]
|
xiaoyuanW/gem5
|
src/python/m5/SimObject.py
|
Python
|
bsd-3-clause
| 47,683
|
[
"VisIt"
] |
654829b31bbff99502bc1763c4acf2f60296d14e33e6b547570a31fabfc1517f
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class Room(object):
def __init__(self, name, description):
self.name = name
self.description = description
self.paths = {}
def go(self, direction):
return self.paths.get(direction, None)
def add_paths(self, paths):
self.paths.update(paths)
central_corridor = Room("Central Corridor",
"""
The Gothons of Planet Percal #25 have invaded your ship and destroyed
your entire crew. You are the last surviving member and your last
mission is to get the neutron destruct bomb from the Weapons Armory,
put it in the bridge, and blow the ship up after getting into an
escape pod.
You're running down the central corridor to the Weapons Armory when
a Gothon jumps out, red scaly skin, dark grimy teeth, and evil clown costume
flowing around his hate filled body. He's blocking the door to the
Armory and about to pull a weapon to blast you.
""")
laser_weapon_armory = Room("Laser Weapon Armory",
"""
Lucky for you they made you learn Gothon insults in the academy.
You tell the one Gothon joke you know:
Lbhe zbgure vf fb sng, jura fur fvgf nebhaq gur ubhfr, fur fvgf nebhaq gur ubhfr.
The Gothon stops, tries not to laugh, then busts out laughing and can't move.
While he's laughing you run up and shoot him square in the head
putting him down, then jump through the Weapon Armory door.
You do a dive roll into the Weapon Armory, crouch and scan the room
for more Gothons that might be hiding. It's dead quiet, too quiet.
You stand up and run to the far side of the room and find the
neutron bomb in its container. There's a keypad lock on the box
and you need the code to get the bomb out. If you get the code
wrong 10 times then the lock closes forever and you can't
get the bomb. The code is 3 digits.
""")
the_bridge = Room("The Bridge",
"""
The container clicks open and the seal breaks, letting gas out.
You grab the neutron bomb and run as fast as you can to the
bridge where you must place it in the right spot.
You burst onto the Bridge with the netron destruct bomb
under your arm and surprise 5 Gothons who are trying to
take control of the ship. Each of them has an even uglier
clown costume than the last. They haven't pulled their
weapons out yet, as they see the active bomb under your
arm and don't want to set it off.
""")
escape_pod = Room("Escape Pod",
"""
You point your blaster at the bomb under your arm
and the Gothons put their hands up and start to sweat.
You inch backward to the door, open it, and then carefully
place the bomb on the floor, pointing your blaster at it.
You then jump back through the door, punch the close button
and blast the lock so the Gothons can't get out.
Now that the bomb is placed you run to the escape pod to
get off this tin can.
You rush through the ship desperately trying to make it to
the escape pod before the whole ship explodes. It seems like
hardly any Gothons are on the ship, so your run is clear of
interference. You get to the chamber with the escape pods, and
now need to pick one to take. Some of them could be damaged
but you don't have time to look. There's 5 pods, which one
do you take?
""")
the_end_winner = Room("The End",
"""
You jump into pod 2 and hit the eject button.
The pod easily slides out into space heading to
the planet below. As it flies to the planet, you look
back and see your ship implode then explode like a
bright star, taking out the Gothon ship at the same
time. You won!
""")
the_end_loser = Room("The End",
"""
You jump into a random pod and hit the eject button.
The pod escapes out into the void of space, then
implodes as the hull ruptures, crushing your body
into jam jelly.
""")
escape_pod.add_paths({
'2': the_end_winner,
'*': the_end_loser
})
generic_death = Room("death", "You died.")
the_bridge.add_paths({
'throw the bomb': generic_death,
'slowly place the bomb': escape_pod
})
laser_weapon_armory.add_paths({
'0132': the_bridge,
'*': generic_death
})
central_corridor.add_paths({
'shoot!': generic_death,
'dodge!': generic_death,
'tell a joke': laser_weapon_armory
})
START = central_corridor
|
renweizhukov/LearningPythonTheHardWay
|
projects/gothonweb/gothonweb/map.py
|
Python
|
lgpl-3.0
| 4,166
|
[
"BLAST"
] |
b0e5758651b7abcb000a4d85871ea5bce7629ffcdf6c3dbdfec64bcea3dea41f
|
""" The SystemAdministratorClient is a class representing the client of the DIRAC
SystemAdministrator service. It has also methods to update the Configuration
Service with the DIRAC components options
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from DIRAC.Core.Base.Client import Client, createClient
SYSADMIN_PORT = 9162
@createClient("Framework/SystemAdministrator")
class SystemAdministratorClient(Client):
def __init__(self, host, port=None, **kwargs):
"""Constructor function. Takes a mandatory host parameter"""
super(SystemAdministratorClient, self).__init__(**kwargs)
if not port:
port = SYSADMIN_PORT
self.setServer("dips://%s:%s/Framework/SystemAdministrator" % (host, port))
|
ic-hep/DIRAC
|
src/DIRAC/FrameworkSystem/Client/SystemAdministratorClient.py
|
Python
|
gpl-3.0
| 835
|
[
"DIRAC"
] |
01b8916e42eaff16d3d1d51ee8f544a010b10aeda37bd5dc1429a018ea41b5bd
|
#!/usr/bin/python2
#
# Copyright 2006 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A tool for applying a file of SQL statements to a database.
Usage: trickle_file_runner --dbspec host:user:pass:db --input FILENAME \\
[ --state_database DB ] [ --state_table TABLENAME ] \\
[ --utilization_percent D ] [ --cycle_time C ]
This utility will take a given file of SQL statements and apply them
to a database in such a way as to guarantee they are applied in order,
even across restarts. It also guarantees no statement will be
executed twice or skipped. It will throttle itself, depending upon
cycle time and utilization percent.
When executing the given file, groups of statements in the SQL file
will be merged into transactions whose size varies depending on the
trickle rate. If the file does not contain BEGIN/COMMIT blocks, then
each line is considered a standalone statement. If it does contain
such blocks, then those blocks will be executed in their entirety
(though they will likely be grouped in with other such blocks inside
of a transaction).
"""
__author__ = 'bbiskebo@google.com (Brian Biskeborn)'
# Original author: chip@google.com (Chip Turner)
import hashlib
import logging
import re
import gflags
from pylib import app
from pylib import db
from pylib import trickle_lib
FLAGS = gflags.FLAGS
gflags.DEFINE_string('state_database', 'admin',
'database name to store state table in (i.e., "scratch")')
gflags.DEFINE_string('state_table', 'TrickleFileState',
'table to store state in')
gflags.DEFINE_string('dbspec', '', 'database to execute statements in')
gflags.DEFINE_string('input', '', 'filename to process as input')
gflags.DEFINE_integer('cycle_time', 5,
'seconds each batch of statements should take')
gflags.DEFINE_integer('utilization_percent', 10,
'target time per cycle to be executing statements rows')
gflags.DEFINE_integer('artificial_batch_cap', None,
'never run more than this many lines per batch')
gflags.DEFINE_boolean('init_state_table', False,
'populate the state table with file checksums and exit')
gflags.DEFINE_boolean('allow_warnings', False,
'continue after sql statements that give warnings')
gflags.DEFINE_string('session_init', None,
'SQL commands to run to initialize the session, '
'e.g. SET LOG_BIN=0')
# regexes we use to help identify transaction blocks
valid_sql_re = re.compile(
r'(INSERT (IGNORE )?INTO|REPLACE INTO|UPDATE|DELETE)\s', re.IGNORECASE)
begin_re = re.compile(r'^BEGIN;?\s*$', re.IGNORECASE)
commit_re = re.compile(r'^COMMIT;?\s*$', re.IGNORECASE)
def ValidSqlLine(line):
"""Verify that a given line of text contains valid SQL for the filerunner.
For our purposes, valid means one of:
BEGIN, COMMIT, INSERT INTO, INSERT IGNORE INTO, REPLACE INTO,
DELETE FROM, UPDATE.
Args:
line: string to validate
Returns:
Boolean, True if the line is valid.
"""
return (valid_sql_re.match(line) or
begin_re.match(line) or
commit_re.match(line))
def _PrepareFile(filename):
"""Open a file, validate it and compute some identifying information.
Args:
filename: file to work on
Returns:
A file handle, the file size, and the file's SHA1 checksum.
The file handle is positioned at end of file.
"""
fh = open(filename, 'r')
checksummer = hashlib.sha1()
filesize = 0
for line in fh:
if not ValidSqlLine(line):
logging.fatal('File contains invalid sql: %s', line)
filesize += len(line)
checksummer.update(line)
return fh, filesize, checksummer.hexdigest()
def InitStateTable(dbh, state_db, state_table, fh, fsize, fchecksum):
"""Add file information to the state table.
If a row already exists for the specified file name, that row is replaced.
"""
dbh.ExecuteOrDie('REPLACE INTO %s.%s VALUES ("%s", %d, "%s", 0, NULL)' %
(state_db, state_table, fh.name, fsize, fchecksum))
class DbFileTrickler(trickle_lib.TrickledOperation):
"""A class to represent trickling a file of SQL statements into a database."""
def __init__(self, dbh, utilization_percent, cycle_time,
state_database, state_table, fh, size, checksum):
"""Constructor. Use _PrepareFile() to get the file handle/size/checksum.
Args:
dbh: pylib.db handle to the database
utilization_percent: percent of database time to try to use
cycle_time: duration of a cycle of insert/sleeps
state_database: database name to store the state table in
state_table: table name to store our state in
fh: handle to the file to trickle statements from
size: size of the open file in bytes
checksum: checksum of the open file
"""
trickle_lib.TrickledOperation.__init__(self, utilization_percent,
cycle_time)
self._db = dbh
self._filename = fh.name
self._fh = fh
self._size = size
self._checksum = checksum
self._offset_bytes = 0
self._state_database = state_database
self._state_table = state_table
self._VerifyStateDatabase()
def _GetProgress(self):
"""Report our progress for use in status messages."""
return "%d%% done" % (self._offset_bytes * 100 / self._size)
def _VerifyStateDatabase(self):
"""Check that the state table exists and create an entry for our input file.
If an entry for the input file already exists, verify its size and
checksum.
"""
rows = self._db.ExecuteOrDie(
'SELECT Checksum, Size, Offset FROM %s.%s WHERE Filename = "%s"' %
(self._state_database, self._state_table, self._filename))
if not rows:
logging.info('Creating row in state table')
with self._db.Transaction():
self._db.ExecuteOrDie(
'INSERT INTO %s.%s VALUES ("%s", %d, "%s", 0, NULL)' %
(self._state_database, self._state_table, self._filename,
self._size, self._checksum))
else:
if self._size != long(rows[0]['Size']):
logging.fatal('database filesize does not match actual file: %s vs %s',
self._size, rows[0]['Size'])
if self._checksum != rows[0]['Checksum']:
logging.fatal('SHA-1 checksum mismatch on file vs database')
self._offset_bytes = rows[0]['Offset']
logging.info('Resuming at offset %d', self._offset_bytes)
def _SetupTrickle(self):
"""Determine the current file offset based on our state table.
Called once before the first call to _PerformTrickle().
"""
row = self._db.ExecuteOrDie(
'SELECT Offset FROM %s.%s WHERE Filename = "%s"' %
(self._state_database, self._state_table, self._filename))
assert len(row) == 1 and len(row[0]) == 1
self._offset_bytes = long(row[0]['Offset'])
def _FinalizeTrickle(self):
pass
def _Finished(self):
"""Have we finished processing the file?"""
return self._offset_bytes == self._size
def _PerformTrickle(self, batch_size):
"""Execute batch_size blocks and update the state table in a transaction.
Args:
batch_size: number of blocks to execute. A block is either a single line
in the input file, or a group of lines between BEGIN/COMMIT
statements.
Returns:
Number of blocks actually executed.
"""
starting_offset = self._offset_bytes
self._fh.seek(self._offset_bytes)
# pre-read the data to avoid interleaving disk reads with database writes.
#
# also, break the input file into batches. if we see a BEGIN then
# we must read through to the next COMMIT even if we would be
# bigger than batch size.
# batch is a list of lists. each entry represents a transaction
# (ie, list of statements) to be applied to the database.
batch = []
while len(batch) < batch_size:
line = self._fh.readline()
if not line:
break
if not ValidSqlLine(line):
logging.fatal('Encountered invalid sql: %s', line)
if begin_re.match(line):
transaction = []
while 1:
next_line = self._fh.readline()
if not next_line:
logging.fatal('Input file terminated inside of BEGIN block')
elif not ValidSqlLine(next_line):
logging.fatal('Encountered invalid sql: %s', next_line)
elif begin_re.match(next_line):
logging.fatal('Attempt to nest transactions')
elif commit_re.match(next_line):
break
else:
transaction.append(next_line)
batch.append(transaction)
elif commit_re.match(line):
logging.fatal('Attempt to commit outside of transaction')
else:
batch.append([line])
self._offset_bytes = self._fh.tell()
with self._db.Transaction():
for transaction in batch:
for statement in transaction:
try:
self._db.ExecuteOrDie(statement)
except db.QueryWarningsException as e:
logging.warn('SQL generated warning: %s, %s', str(e), statement)
if not FLAGS.allow_warnings:
raise
result = self._db.ExecuteOrDie(
'UPDATE %s.%s SET Offset = %d WHERE Filename = "%s" AND Offset = %d' %
(self._state_database, self._state_table,
self._offset_bytes, self._filename, starting_offset))
if result.GetRowsAffected() != 1:
logging.fatal('Attempt to update database state but something '
'already changed it')
return len(batch)
def main(unused_argv):
if not (FLAGS.state_database and FLAGS.dbspec and FLAGS.input):
app.usage(shorthelp=1)
return 1
logging.info('Checking file %s', FLAGS.input)
fh, size, checksum = _PrepareFile(FLAGS.input)
dbspec = db.Spec.Parse(FLAGS.dbspec)
with dbspec.Connect() as dbh:
if FLAGS.session_init:
logging.info('Initializing session with: %s', FLAGS.session_init)
dbh.ExecuteOrDie(FLAGS.session_init)
if FLAGS.init_state_table:
logging.info('Setting up the state table')
InitStateTable(dbh, FLAGS.state_database, FLAGS.state_table,
fh, size, checksum)
else:
logging.info('Starting the trickle')
trickler = DbFileTrickler(dbh, FLAGS.utilization_percent,
FLAGS.cycle_time, FLAGS.state_database,
FLAGS.state_table, fh, size, checksum)
if FLAGS.artificial_batch_cap:
trickler.SetBatchSizeLimit(FLAGS.artificial_batch_cap)
trickler.Trickle()
logging.info('Done')
if __name__ == '__main__':
FLAGS.logbuflevel = -1
app.run()
|
sdgdsffdsfff/google-mysql-tools
|
trickle_file_runner.py
|
Python
|
apache-2.0
| 11,334
|
[
"Brian"
] |
fb7f3916251498f58aca531405c85d00f2f760b1f02aa93fc200856508e48a65
|
# -*- coding: utf-8 -*-
# Code for Life
#
# Copyright (C) 2015, Ocado Innovation Limited
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ADDITIONAL TERMS – Section 7 GNU General Public Licence
#
# This licence does not grant any right, title or interest in any “Ocado” logos,
# trade names or the trademark “Ocado” or any other trademarks or domain names
# owned by Ocado Innovation Limited or the Ocado group of companies or any other
# distinctive brand features of “Ocado” as may be secured from time to time. You
# must not distribute any modification of this program using the trademark
# “Ocado” or claim any affiliation or association with Ocado or its employees.
#
# You are not authorised to use the name Ocado (or any of its trade names) or
# the names of any author or contributor in advertising or for publicity purposes
# pertaining to the distribution of this program, without the prior written
# authorisation of Ocado.
#
# Any propagation, distribution or conveyance of this program must include this
# copyright notice and these terms. You must not misrepresent the origins of this
# program; modified versions of the program must be marked as such and not
# identified as the original program.
from django.utils.translation import ugettext
def youtubeLink(width, height, url, border):
return "<iframe width='" + str(width) + "' height='" + str(height) + "' src='" + str(url) \
+ "?rel=0" + "' frameborder='" + str(border) \
+ "' allowfullscreen class='video'></iframe><br>"
def noPermissionMessage():
return ugettext("You have no permission to see this.")
def notSharedLevel():
return ugettext("This level is private. You can only see the public levels and the ones "
+ "created by other users only if they share them with you.")
""" Strings used in the scoreboard. """
def noPermissionTitle():
return ugettext("No permission ")
def noPermissionScoreboard():
return ugettext("Scoreboard is only visible to school students and teachers. Log in if you "
+ "think you should be able to see it. ")
def noDataToShow():
return ugettext("There is no data to show. Please contact your administrator if this is "
+ "unexpected. ")
""" Strings used on the level moderation page. """
def noPermissionLevelModerationTitle():
return ugettext("No permission ")
def noPermissionLevelModerationPage():
return ugettext("Level moderation is only visible to teachers. Log in if you "
+ "think you should be able to see it. ")
def noPermissionLevelModerationClass():
return ugettext("You do not teach this class. Please contact your administrator if this "
+ "is unexpected.")
def noPermissionLevelModerationStudent():
return ugettext("You do not teach this student. Please contact your administrator if this "
+ "is unexpected.")
def noDataToShowLevelModeration():
return ugettext("You have not created any classes and therefore is no data to show. " +
"Please contact your administrator if this is unexpected.")
""" String messages used on the settings page. """
def shareTitle():
return ugettext("Level Share")
def shareSuccessfulPerson(name, surname):
return ugettext("You shared your level with {0} {1} successfully! ".format(name, surname))
def shareSuccessfulClass(className):
return ugettext("You shared your level with class {0} successfully! ".format(className))
def shareUnsuccessfulPerson(first_name, last_name):
return ugettext("We were unable to find %(name)s %(surname)s. "
% {'name': first_name, 'surname': last_name}
+ "Are you sure you got their name right?")
def shareUnsuccessfulClass(className):
return ugettext("We were unable to find class %(class)s. Are you sure you got it right?"
% {'class': className})
def noLevelsToShow():
return ugettext("It seems that you have not created any levels. How about creating one "
+ "now? ")
def levelsMessage():
return ugettext("All the levels you have created so far. Click on them to play them or "
+ "share them with your friends. ")
def sharedLevelsMessage():
return ugettext("All the levels created by others that were shared with you. Click on "
+ "them to play them")
def noSharedLevels():
return ugettext("No one shared a level with you yet. ")
""" Strings used in the class view. """
def chooseClass():
return ugettext("Choose a class you want to see. ")
def noPermission():
return ugettext("You don't have permissions to see this. ")
"""
"""
""" String messages used as level tips in the game view.
"""
"""
"""
def title_night_mode():
return 'Can you find your way in the dark?'
def build_description(title, message):
return "<b>" + title + "</b><br><br>" + message
def title_level_default():
return ugettext(" ")
def description_level_default():
message = ugettext("Can you find the shortest route? ")
return message
def hint_level_default():
message = ugettext("Think back to earlier levels. What did you learn? ")
return message
def title_level1():
return ugettext("Can you help the van get to the house? ")
def description_level1():
message = ugettext("Choose the right blocks to tell the van where to go. <br> Drag the "
+ "blocks under the <b>Start</b> block to attach them. <br> To remove a "
+ "block, drag it into the bin in the bottom right of the screen. "
+ "<br> When you are happy with your sequence, press <b>Go</b>! ")
return build_description(title_level1(), message)
def hint_level1():
message = ugettext("Drag the <b>Move forwards</b> block so that it is under the <b>Start</b> "
+ "block - close enough to be touching. <br><br>"
+ "Clicking on the arrows next to the <b>Go</b> button will also drag the "
+ "blocks into a sequence for you. <br><br>"
+ "Don't forget to press <b>Go</b> when you are done. ")
return message
def title_level2():
return ugettext("This time the house is further away. ")
def description_level2():
message = ugettext("A block can be placed next to or under another, like a jigsaw. A second "
+ "<b>Move forwards</b> block can be placed under the first <b>Move "
+ "forwards</b> block. <br> To remove a block, drag it back to the "
+ "left of the screen or drop it in the bin. <br> When you are happy with "
+ "your sequence, press <b>Go</b>! ")
return build_description(title_level2(), message)
def hint_level2():
message = ugettext("A second <b>Move forwards</b> block can be placed under the first <b>Move "
+ "forwards</b> block. <br><br>"
+ "The arrows next to the <b>Go</b> button will drag the blocks into a "
+ "sequence for you. ")
return message
def title_level3():
return ugettext("Can you make the van turn right? ")
def description_level3():
message = ugettext("This time, the van has to turn right to reach the house. Make sure you use "
+ "the <b>Turn right</b> block in your sequence. <br> Drag the blocks "
+ "and attach them under the <b>Start</b> block like before. To remove a "
+ "block, drag it back to the left of the screen or drop it in the bin. "
+ "<br> When you are happy with your sequence, press <b>Go</b>! ")
return build_description(title_level3(), message)
def hint_level3():
message = ugettext("A <b>Turn right</b> block can be placed under the first <b>Move "
+ "forwards</b> block. <br><br> The arrows next to the <b>Go</b> button "
+ "will drag the blocks into a sequence for you. ")
return message
def title_level4():
return ugettext("You are getting good at this! Let's try turning left. ")
def description_level4():
message = ugettext("This time the van has to go left. Make sure you use the <b>Turn left</b> "
+ "block in your sequence. <br> Drag and attach the blocks like before. "
+ "<br> When you are happy with your sequence, press <b>Go</b>! ")
return build_description(title_level4(), message)
def hint_level4():
message = ugettext("A <b>Turn left</b> block can be placed under a series of <b>Move "
+ "forwards</b> blocks. <br> The arrows next to the <b>Go</b> button will "
+ "drag the blocks into a sequence for you. ")
return message
def title_level5():
return ugettext("Good work! You are ready for something harder. ")
def description_level5():
message = ugettext("You already know how to make the van turn left or right. This time "
+ "the van has to make lots of turns to reach the house. <br> Drag and "
+ "attach the blocks to make your sequence."
+ "<br> When you are happy with your sequence, press <b>Go</b>! ")
return build_description(title_level5(), message)
def hint_level5():
message = ugettext("This road starts by curving to the <b>left</b>. Then it curves to the "
+ "<b>right</b>. <br><br> The arrows next to the <b>Go</b> button will drag "
+ "the blocks into a sequence for you. ")
return message
def title_level6():
return ugettext("Well done! Let's use all three blocks. ")
def description_level6():
message = ugettext("This time the van has to <b>Move forwards</b>, <b>Turn left</b> and "
+ "<b>Turn right</b>. <br><br> Drag and attach the blocks like before. "
+ "<br> When you are happy with your sequence, press <b>Go</b>! ")
return build_description(title_level6(), message)
def hint_level6():
message = ugettext("Follow the road around. How many <b>Move forwards</b> do you need? <br><br>"
+ "The arrows next to the <b>Go</b> button will drag the blocks into a "
+ "sequence for you. ")
return message
def title_level7():
return ugettext("This road is more complicated. ")
def description_level7():
message = ugettext("Practise your new skills on this road by helping the driver to arrive at "
+ "the house. <br> When you are happy with your sequence, press <b>Go</b>! ")
return build_description(title_level7(), message)
def hint_level7():
message = ugettext("Follow the road around. Don't forget to <b>Turn left</b> first. <br><br>"
+ "The arrows next to the <b>Go</b> button will drag the blocks into a "
+ "sequence for you.")
return message
def title_level8():
return ugettext("The warehouse is not always in the same place. ")
def description_level8():
message = ugettext("This time the warehouse is somewhere else on the screen but you still need "
+ "to use the <b>Move forwards</b> block. <br> Can you use the <b>Move "
+ "forwards</b> block correctly even when it looks like the van goes in a "
+ "different direction? "
+ "<br> When you are happy with your sequence, press <b>Go</b>! ")
return build_description(title_level8(), message)
def hint_level8():
message = ugettext("On screen, the van looks like it follows the road down. If you were in the "
+ "van, it would look like you should <b>Move forwards</b>, then <b>Turn "
+ "right</b>. ")
return message
def title_level9():
return ugettext("Can you go from right to left? ")
def description_level9():
message = ugettext("Practise your new skills on this road by helping the driver to arrive "
+ "at the house. "
+ "<br> When you are happy with your sequence, press <b>Go</b>! ")
return build_description(title_level9(), message)
def hint_level9():
message = ugettext("How many times do you have to <b>Move forwards</b> before you "
+ "<b>Turn left</b>? ")
return message
def title_level10():
return ugettext("Well done! How about another go? ")
def description_level10():
message = ugettext("You've done really well so far. Try to get the van to the house. "
+ "<br> When you are happy with your sequence, press <b>Go</b>! ")
return build_description(title_level10(), message)
def hint_level10():
message = ugettext("This map is not so hard. Notice that to you it looks like the road goes "
+ "up, but if you were in the in the van, you would see the road goes "
+ "right. <br><br> Do you know which turn the van will take next? <br><br> "
+ "The arrows next to the <b>Go</b> button will drag the blocks into a "
+ "sequence for you. ")
return message
def title_level11():
return ugettext("Snail maze! ")
def description_level11():
message = ugettext("Uh oh, a tricky snail maze! Can you take the van through it? "
+ "<br> When you are happy with your sequence, press <b>Go</b>! ")
return build_description(title_level11(), message)
def hint_level11():
message = ugettext("The maze looks a bit like a snail, doesn't it? That means that for most of "
+ "the time the van should only <b>Move forwards</b> and <b>Turn right</b>. "
+ "<br><br> The arrows next to the <b>Go</b> button will drag the blocks "
+ "into a sequence for you. ")
return message
def title_level12():
return ugettext("This road is more complicated. ")
def description_level12():
message = ugettext("Good work, by now you are able to solve quite complicated levels. Prove "
+ "your skills! "
+ "<br> When you are happy with your sequence, press <b>Go</b>! ")
return build_description(title_level12(), message)
def hint_level12():
message = ugettext("This road might look much longer and more complicated, but it's not that "
+ "hard. <br> Start by using <b>Move forwards</b> a few steps and <b>Move "
+ "left</b>. ")
return message
def title_level13():
return ugettext("Multiple routes")
def description_level13():
message = ugettext("Often there is more than one way to get to the house. The route that needs "
+ "the fewest directions is usually best. <br> Help the van find the "
+ "shortest route to the house. <br> You can press the <b>Go</b> or "
+ "<b>Play</b> buttons to start the van. ")
return build_description(title_level13(), message)
def hint_level13():
message = ugettext("Try taking the route that starts by turning left then turns right. Do you "
+ "know what follows next? ")
return message
def title_level14():
return ugettext("Can you spot the shortest route? ")
def description_level14():
message = ugettext("So many options to choose from! <br> Do you know which is the shortest "
+ "route to get the van to house? ")
return build_description(title_level14(), message)
def hint_level14():
message = ugettext("The middle route seems to be shortest. Do you know what sequence of "
+ "instructions will make the van follow it?")
return message
def title_level15():
return ugettext("What if there is more than one delivery? ")
def description_level15():
message = ugettext("Our vans often need to go to more than one house. To make the van deliver "
+ "to a house use the <b>Deliver</b> block. <br> Make sure your sequence "
+ "gets the van to travel the shortest route! ")
return build_description(title_level15(), message)
def hint_level15():
message = ugettext("Make the van turn left and go directly to the closest house first. This is "
+ "the shortest route. <br><br> The <b>Deliver</b> block is not needed when "
+ "the van is only going to one house, but you need it when the van is "
+ "going to two or more houses. <br><br> Use the <b>Deliver</b> block every "
+ "time the van gets to a house. ")
return message
def title_level16():
return ugettext("This time there are even more houses. ")
def description_level16():
message = ugettext("Well done! You have done really well to get so far - let's take it to the "
+ "next level and add another house. <br> Can you work out the shortest, "
+ "most efficient route to each house? ")
return build_description(title_level16(), message)
def hint_level16():
message = ugettext("Although the <b>Deliver</b> block is not needed when there is only one "
+ "house, you need it when there are more houses, like now. <br><br>"
+ "Once the van is at a house, make sure you use the <b>Deliver</b> block. "
+ "Do that for each house. ")
return message
def title_level17():
return ugettext("House overload! ")
def description_level17():
message = ugettext("Well done, you're getting a hang of it! Can you do the same for even more "
+ "houses?<br> Don't forget to use the <b>Deliver</b> block at each house. ")
return build_description(title_level17(), message)
def hint_level17():
message = ugettext("Test your sequence to make sure that the van takes the shortest route to "
+ "visit all the houses on the way. <br><br> Use the <b>Deliver</b> block "
+ "every time the van gets to a house. ")
return message
def title_level18():
return ugettext("This one is quite a tangle. ")
def description_level18():
message = ugettext("Practise your new skills on this road by getting the van to <b>Deliver</b> "
+ "to each of the houses. ")
return build_description(title_level18(), message)
def hint_level18():
message = ugettext("To make sure the van takes the shortest route, first turn left. <br><br> "
+ "Use the <b>Deliver</b> block every time the van gets to a house. ")
return message
def title_level19():
return ugettext("Repeating yourself is boring.")
def description_level19():
message = youtubeLink(600, 400, "//www.youtube.com/embed/vFGd0v3msRE", 0)
message += ugettext("Attach a block inside the <b>Repeat</b> block to make the van repeats "
+ "that instruction. <br> This means you can use one block instead of lots "
+ "of blocks to do the same thing over and over again. <br> How many times "
+ "do you want the instruction repeated? Type the number into the "
+ "<b>Repeat</b> block. <br> The repeated sets of blocks make a 'loop'. "
+ "<br><br> When you are ready, press <b>Play</b>! ")
return build_description(title_level19(), message)
def hint_level19():
message = ugettext("A <b>Move forwards</b> block can be placed inside a <b>Repeat</b> block "
+ "(to the right of the word 'Do'). <br><br> Don't forget to change the "
+ "number of times you need to repeat the instruction. ")
return message
def title_level20():
return ugettext("Use the <b>Repeat</b> block to make your sequence shorter and simpler. ")
def description_level20():
message = ugettext("You drove the van down this road on Level 5. This time, use the "
+ "<b>Repeat</b> block to get the van to the house. <br> This will make "
+ "your sequence shorter and simpler than last time.")
return build_description(title_level20(), message)
def hint_level20():
message = ugettext("This level can be broken down into three repeated sets of: <b>Turn "
+ "left</b>, then <b>Turn right</b>. <br><br> These repeated steps make a "
+ "'loop'. ")
return message
def title_level21():
return ugettext("Four leaf clover.")
def description_level21():
message = ugettext("This path looks a bit like a four leaf clover. Can you take the driver "
+ "through it? ")
return build_description(title_level21(), message)
def hint_level21():
message = ugettext("This level can be broken down into repeated sets of: <b>Move forwards</b>, "
+ "<b>Turn left</b>, <b>Turn right<b>, <b>Turn left</b>. ")
return message
def title_level22():
return ugettext("Now things are getting quite long and complicated. ")
def description_level22():
message = ugettext("An algorithm (a set of instructions in a particular order) to get the van "
+ "to the house might not be very simple, but it can be made shorter by "
+ "using the <b>Repeat</b> blocks. <br> Are you up for this challenge? ")
return build_description(title_level22(), message)
def hint_level22():
message = ugettext("Look to see where you have used <b>Move forwards</b>, <b>Turn "
+ "left</b> and <b>Turn right</b> blocks. Are any blocks next to them the "
+ "same? Put them into one <b>Repeat</b> block. Don't forget to change the "
+ "number of times you need to repeat the instruction. ")
return message
def title_level23():
return ugettext("Sssssssssnake!")
def description_level23():
message = ugettext("This road seems to be winding just like a snake! Can you find a nice and "
+ "simple route to get the van to the house? ")
return build_description(title_level23(), message)
def hint_level23():
message = ugettext("How about using <b>Repeat</b> inside another <b>Repeat</b>? <br><br> This "
+ "level can be broken down into sets of: "
+ "<li> a set (nested loop) of <b>Move forwards</b>, </li> "
+ "<li> two <b>Turn left</b>s, </li> "
+ "<li> a set (nested loop) of <b>Move forwards</b>, </li> "
+ "<li> two <b>Turn right</b>s. </li>")
return message
def title_level24():
return ugettext("The road is very long and very bendy.")
def description_level24():
message = ugettext("Wow! Look at that! It won't get more complicated than this, we promise.")
return build_description(title_level24(), message)
def hint_level24():
message = ugettext("With all these twists and turns, you will have to think hard about what "
+ "sets of repeated instructions to use. <br><br>")
return message
def title_level25():
return ugettext("Waterfall level. ")
def description_level25():
message = ugettext("Since you did so well with the repeat loops, have a go at this level. ")
return build_description(title_level25(), message)
def hint_level25():
message = ugettext("Most of the program will consist of repeated sets of <b>Move forwards</b> "
+ "and a set of <b>Turn right</b> and <b>Turn left</b>. ")
return message
def title_level26():
return ugettext("Winter wonderland!")
def description_level26():
message = ugettext("Notice the snow! You can create new levels with different 'themes' of "
+ "backgrounds and decorations in the Level Editor. But first, try getting "
+ "the van to the house! ")
return build_description(title_level26(), message)
def hint_level26():
message = ugettext("Break the program into two <b>Repeat</b>s with a <b>Turn left</b> in "
+ "between them. ")
return message
def title_level27():
return ugettext("Farmyard")
def description_level27():
message = ugettext("What a muddy road! Can you help Dee find her way from the barn to the "
+ "house? ")
return build_description(title_level27(), message)
def hint_level27():
message = ugettext("Make sure you drag the correct turns into your <b>Repeat</b> block. ")
return message
def title_level28():
return ugettext("The big city")
def description_level28():
message = ugettext("Can you get the van from the warehouse to the house? Don't stop at any "
+ "shops on the way! ")
return build_description(title_level28(), message)
def hint_level28():
message = ugettext("Make sure you drag the correct turns into your <b>Repeat</b> block.")
return message
def title_level29():
return ugettext("No need for numbers. ")
def description_level29():
message = youtubeLink(600, 400, "//www.youtube.com/embed/EDwc80X_LQI", 0)
message += ugettext("Drag a block inside a <b>Repeat until</b> block to make the van repeat an "
+ "instruction. <br> Attach a 'condition' so the van knows when to stop "
+ "repeating the instruction. <br> Here, you want the van to repeat your "
+ "instruction until it is at the destination. <br> Doing this means "
+ "you don't have to work out how many times the van should repeat your "
+ "instruction. ")
return build_description(title_level29(), message)
def hint_level29():
message = ugettext("The blocks should read like a sentence: '<b>Repeat <b>until</b> <b>at "
+ "destination do: Move forwards</b>'. ")
return message
def title_level30():
return ugettext("Can you do that again? ")
def description_level30():
message = ugettext("Well done, you did it! Now have a go at using the <b>Repeat until<b> block "
+ "on a road with lots of turns. ")
return build_description(title_level30(), message)
def hint_level30():
message = ugettext("The blocks should read like a sentence: '<b>Repeat until at "
+ "destination</b> <b>do</b>: <b>Turn left</b>, <b>Turn right</b>'. ")
return message
def title_level31():
return ugettext("Practice makes perfect. ")
def description_level31():
message = ugettext("Have another go to make sure you have got the hang of it. ")
return build_description(title_level31(), message)
def hint_level31():
message = ugettext("This program can be broken into repeated sets of <b>Turn left</b>, <b>Turn "
+ "right</b> and two <b>Move forwards</b>. ")
return message
def title_level32():
return ugettext("Uh oh, it's <b>Until</b> fever! ")
def description_level32():
message = ugettext("Good job! Can you help the driver reach the destination again? ")
return build_description(title_level32(), message)
def hint_level32():
message = ugettext("This program is quite similar to the one you just solved. Do you remember "
+ "the solution you came up with back then? ")
return message
def title_level33():
return ugettext("Now it's time to try the <b>If</b> block. ")
def description_level33():
message = youtubeLink(600, 400, "//www.youtube.com/embed/O0RXbJyYq8o", 0)
message += ugettext("Another way of telling the van what to do is to use the <b>If</b> block. "
+ "For example, <b>If</b> the <b>road exists forwards do</b> <b>Move "
+ "forwards</b>. <br> This is called an 'if statement'. <br> Try "
+ "using the <b>If</b> block and the <b>Repeat</b> block together. <br> "
+ "The <b>Repeat</b> block will stretch if you attach the <b>If</b> block "
+ "inside it. ")
return build_description(title_level33(), message)
def hint_level33():
message = ugettext("We say that the road 'exists' in a direction. For example, if the road "
+ "goes forwards we say that it 'exists forwards'. <br><br> "
+ "<b>If</b> a <b>road exists forwards</b> then <b>do Move forwards</b>."
+ "<br><br>Repeat this set to get the van to the house. ")
return message
def title_level34():
return ugettext("Multiple <b>If</b>s")
def description_level34():
message = ugettext("It can be handy to use <b>If</b> to give your van choices, so you don't "
+ "have to give the van new instructions at every step. <br> For "
+ "example: Tell the van <b>If</b> the <b>road exists forwards do Move "
+ "forwards,</b> but <b>If</b> the <b>road exists left do Turn left</b>. "
+ "<br> The van will choose correctly from the <b>Move forwards</b> and "
+ "<b>Turn left</b> instructions depending on the road. <br> Use an 'if "
+ "statement' in a 'loop' to drive the van down this bendy road. ")
return build_description(title_level34(), message)
def hint_level34():
message = ugettext("At each bend the van can either <b>Move forwards</b> or <b>Turn left</b>. "
+ "Create a loop so it can make the correct choice. <br><br> We say that "
+ "the road 'exists' in a direction. For example, if the road goes forwards "
+ "we say that it 'exists forwards'. ")
return message
def title_level35():
return ugettext("Let's put it all together!")
def description_level35():
message = ugettext("You have discovered the magic of 'if statements'. Can you make a program "
+ "that uses <b>Move forwards</b>, <b>Turn left</b> and <b>Turn right</b> "
+ "to get the van to the house. ")
return build_description(title_level35(), message)
def hint_level35():
message = ugettext("At each bend the van can either <b>Move forwards</b> or <b>Turn left</b>. "
+ "Create a loop so it can make the correct choice. <br><br> We say that "
+ "the road 'exists' in a direction. For example, if the road goes forwards "
+ "we say that it 'exists forwards'. ")
return message
def title_level36():
return ugettext("What else? If-else, that's what! ")
def description_level36():
message = youtubeLink(600, 400, "//www.youtube.com/embed/GUUJSRuAyU0", 0)
message += ugettext("You can change the <b>If</b> block to make more choices. Click on the "
+ "star in the <b>If</b> block and add <b>Else if</b>. <br> This will tell "
+ "the van what to do if the first <b>If</b> direction can't be done. "
+ "<br> For example, tell the van to <b>Turn left</b> <b>If</b> the "
+ "<b>road exists left</b>. Add <b>Else if</b> the <b>road exists right"
+ "</b>, <b>Turn right</b>. <br> This uses fewer blocks and makes sure "
+ "that only one step is taken in each loop. <br> This type of "
+ "algorithm is called a 'general algorithm' as it can be used with most "
+ "simple routes. ")
return build_description(title_level36(), message)
def hint_level36():
message = ugettext("The program should be a simple set of: <b>If road exists forwards do</b> "
+ "<b>Move forwards</b>, <b>Else if road exists left do Turn left</b>, "
+ "<b>Else if road exists right do Turn right</b>. <br><br> You can find "
+ "<b>Else if</b> by clicking the star on the <b>If</b> block and adding "
+ "the <b>Else if</b>.<br><br> If the first 'condition' is true (this means "
+ "if the road exists in the direction you put first) the van will follow "
+ "the blocks after <b>If</b>. <br><br> If not, the van will check to see "
+ "if it can follow the direction you put after <b>Else if</b>. It will "
+ "keep checking until it has a direction it can take. ")
return message
def title_level37():
return ugettext("A bit longer.")
def description_level37():
message = ugettext("Let's see if we can go further - this road is longer. Notice that the "
+ "length of the road does not change the length of your program! ")
return build_description(title_level37(), message)
def hint_level37():
message = ugettext("Think back to the solutions you produced using 'if statements' before. ")
return message
def title_level38():
return ugettext("Third time lucky! ")
def description_level38():
message = ugettext("Well done! You've got so far. <br> Can you apply the knowledge you "
+ "gained going through this part of the game to this level? ")
return build_description(title_level38(), message)
def hint_level38():
message = ugettext("Think back to the solutions you produced using 'if statements' before. ")
return message
def title_level39():
return ugettext("Dead ends! ")
def description_level39():
message = ugettext("Can you change the 'general algorithm' so that the van takes a shorter "
+ "route? <br> What if you change the order the van checks for "
+ "directions? <br> Keep an eye on the fuel level - try to use as "
+ "little as possible. ")
return build_description(title_level39(), message)
def hint_level39():
message = ugettext("Make the van check if the road exists right before it checks if the road "
+ "exists left. <br><br> Then it will be able to reach the destination "
+ "using the 'general algorithm'. Can you see why? ")
return message
def title_level40():
return ugettext("Adjust your previous solution.")
def description_level40():
message = ugettext("Can you think of a way you could change the 'general algorithm' you have "
+ "implemented earlier to make sure the van driver reaches the house having "
+ "travelled the shortest route? ")
return build_description(title_level40(), message)
def hint_level40():
message = ugettext("The 'general algorithm' will work here. <br><br> Make sure you change the "
+ "order the van checks for directions to take the shortest route to the "
+ "destination. ")
return message
def title_level41():
return ugettext("Decision time. ")
def description_level41():
message = ugettext("Do you think changes to the 'general algorithm' will help the van find the "
+ "shortest route? <br> Or do you have to come up with a different "
+ "solution? <br> Time to make a decision... ")
return build_description(title_level41(), message)
def hint_level41():
message = ugettext("Psst! You can simply make a change to the 'general algorithm'. <br><br> "
+ "If you make the van check for turns before it checks the road exists "
+ "forwards, you will come up with the perfect solution. <br><br>"
+ "Notice that here it doesn't matter which turn you check for first - it "
+ "will change the route but provide you with the same score. ")
return message
def title_level42():
return ugettext("What do you think this time? ")
def description_level42():
message = ugettext("Can you use the 'general algorithm' here? <br> Can it be changed so that "
+ "it finds a shorter route, or will you need a new solution? ")
return build_description(title_level42(), message)
def hint_level42():
message = ugettext("Uh oh, moving around the blocks in your 'general algorithm' won't help. "
+ "<br> How about creating a simple solution without 'if statements' that "
+ "will help the van reach the house? ")
return message
def title_level43():
return ugettext("Good work! What else can you do? ")
def description_level43():
message = ugettext("You should be really good at this by now. Can you manage this complicated "
+ "road? ")
return build_description(title_level43(), message)
def hint_level43():
message = ugettext("This road cannot be solved by a 'general algorithm'. Can you solve it "
+ "without 'if statements'? <br><br> Remember to choose the shortest route "
+ "and an algorithm which is as short as possible. ")
return message
def title_level44():
return ugettext("Oh no! Traffic lights! ")
def description_level44():
message = youtubeLink(600, 400, "//www.youtube.com/embed/EDwc80X_LQI", 0)
message += ugettext("The light varies from red to green. <br>"
+ "The van must check which colour the traffic light is when it reaches them "
+ "- if it goes past a red light it will break the Highway Code."
+ "<br> Here, you want the van to repeat the wait instruction while the traffic light is red. "
+ "Drag a block inside a <b>Repeat while</b> block to make the van repeat an instruction. "
+ "<br> Attach a 'condition' so the van knows when to repeat the instruction. ")
return build_description(title_level44(), message)
def hint_level44():
message = ugettext("Don't worry about the 'general algorithm' here. Just go forwards. <br><br>"
+ "Once the van is right under the traffic light, make it wait for a green "
+ "light by adding a <b>Wait</b> block. ")
return message
def title_level45():
return ugettext("Green for go, red for wait. ")
def description_level45():
message = ugettext("Can you write a program so the van moves forwards on a green light but "
+ "waits at a red light? ")
return build_description(title_level45(), message)
def hint_level45():
message = ugettext("Use an 'if statement' to tell the van <b>If traffic light is red, Wait, "
+ "Else Move forwards</b>. <br><br> Remember to repeat that until you get "
+ "to the destination. ")
return message
def title_level46():
return ugettext("Well done - you've made it really far! ")
def description_level46():
message = ugettext("Let's practise what you've learnt so far. <br> Don't forget to add a "
+ "turn and to make the van wait at a traffic light. ")
return build_description(title_level46(), message)
def hint_level46():
message = ugettext("Be careful about the order you put your <b>If</b> blocks in. <br><br>"
+ "If you make the van check the road exists forwards before checking for a "
+ "light, it might break the Highway Code. ")
return message
def title_level47():
return ugettext("What a mess! But can you spot a route? ")
def description_level47():
message = ugettext("Put your knowledge to test. Create an algorithm to lead the van to the "
+ "house. <br> Don't forget to add a turn and to make the van wait at a "
+ "traffic light. ")
return build_description(title_level47(), message)
def hint_level47():
message = ugettext("Use an 'if statement' and check if the light is red. <br><br> "
+ "<b>If traffic light is red, wait, Else if road exists forwards, Move "
+ "forwards, Else Turn left</b>. <br><br> Remember to repeat that until you "
+ "get to the destination! ")
return message
def title_level48():
return ugettext("Put all that hard work to the test. ")
def description_level48():
message = ugettext("Congratulations - you've made it really far! <br> Can you create a "
+ "'general algorithm' that will help the van reach the destination in the "
+ "shortest way but stop at a traffic light? ")
return build_description(title_level48(), message)
def hint_level48():
message = ugettext("You need to check: "
+ "<li> if the lights are red </li>"
+ "<li> if the road exists right </li>"
+ "<li> if the road exists forwards </li> "
+ "<li> if the road exists left </li>"
+ "<li> if it is a dead end </li>"
+ "Make sure you put the checks in the right order. ")
return message
def title_level49():
return ugettext("Amazing! Have another go! ")
def description_level49():
message = ugettext("Can you change the 'general algorithm' you created before to make the van "
+ "take the shortest route to the destination? ")
return build_description(title_level49(), message)
def hint_level49():
message = ugettext("You need to check: "
+ "<li> if the light is red </li>"
+ "<li> if the road exists left </li>"
+ "<li> if the road exists forwards </li>"
+ "<li> or if the road exists right </li>"
+ "Do you think you need to check for a dead end? <br> Make sure you put "
+ "the checks in the right order. ")
return message
def title_level50():
return ugettext("Light maze. ")
def description_level50():
message = ugettext("Well this is tricky. Look at all those lights! <br> Can you find the "
+ "shortest route to the destination? It would be good if the van doesn't "
+ "have to wait at too many lights. ")
return build_description(title_level50(), message)
def hint_level50():
message = ugettext("Don't worry about the algorithm you've already come up with. Take the "
+ "first turn left which has fewer traffic lights. <br><br> Once your van "
+ "is right under the traffic lights, make sure it waits for a green "
+ "light. ")
return message
def title_level51():
return ugettext("Back to basics with a twist")
def description_level51():
message = ugettext("Can you come up with a solution to this level using the limited number of blocks we provide at the start?")
return build_description(title_level51(), message)
def hint_level51():
message = ugettext("Think back to earlier levels - what did you learn?")
return message
def title_level52():
return ugettext("A Bit more Tricky")
def description_level52():
message = ugettext("Well done so far! Can you find a solution to this road? You have to move forward, but you have no forward block to use. Do you know how to help the van get to the destination?")
return build_description(title_level52(), message)
def hint_level52():
message = ugettext("Don't forget to use the repeat loop.")
return message
def title_level53():
return ugettext("Choose your blocks wisely")
def description_level53():
message = ugettext("Can you find the shortest route? Use your blocks carefully and don't forget the <b>repeat</b> loop.")
return build_description(title_level53(), message)
def hint_level53():
message = ugettext("Think back to earlier levels - what did you learn")
return message
def title_level54():
return ugettext("Round and Round")
def description_level54():
message = ugettext("Can you find the shortest route? Use your blocks carefully and don't forget the <b>repeat</b> loop.")
return build_description(title_level54(), message)
def hint_level54():
message = ugettext("Think back to earlier levels - what did you learn")
return message
def title_level55():
return ugettext("Wonky Fish!")
def description_level55():
message = ugettext("Use <b>repeat until</b> and the <b>if</b> statement to find your way around the Wonky Fish.")
return build_description(title_level55(), message)
def hint_level55():
message = ugettext("Think back to earlier levels - what did you learn.")
return message
def title_level56():
return ugettext("Concrete Wasteland")
def description_level56():
message = ugettext("Use <b>repeat until</b> and the <b>if</b> statement to find your way around the Concrete Wasteland")
return build_description(title_level56(), message)
def hint_level56():
message = ugettext("Think back to earlier levels - what did you learn.")
return message
def title_level57():
return ugettext("This is <b>not...</b> the same")
def description_level57():
message = ugettext("Like <b>repeat until</b>, <b>repeat while</b> is the opposite. Here, you want the van to repeat your instructions while it is not at the destination.<br />Doing this means you don't have to work out how many times the van should repeat your instructions.")
return build_description(title_level57(), message)
def hint_level57():
message = ugettext("The blocks should read like a sentence. Repeat while not at destination then add your instructions using the blocks provided.")
return message
def title_level58():
return ugettext("Snow snake")
def description_level58():
message = ugettext("Combining what you have just learnt using <b>repeat while</b> with the repeat loop, can you find your way around the snow snake?")
return build_description(title_level58(), message)
def hint_level58():
message = ugettext("The blocks should read like a sentence: <b>repeat while not at destination</b> then using the <b>repeat</b> add your instructions")
return message
def title_level59():
return ugettext("Tricky turnaround")
def description_level59():
message = ugettext("Use your blocks carefully not forgetting the <b>turnaround</b>.")
return build_description(title_level59(), message)
def hint_level59():
message = ugettext("Inside the repeat <b>repeat until</b> block, <b>turn left</b>, <b>turn around</b> and <b>turn left<b> again should do it.")
return message
def title_level60():
return ugettext("Right around the block")
def description_level60():
message = ugettext("Can you find your way around this puzzle?")
return build_description(title_level60(), message)
def hint_level60():
message = ugettext("The trick to this level is to <b>turn right</b> then <b>turn around</b>.")
return message
def title_level61():
return ugettext("Can you create the 'Wiggle' procedure?")
def description_level61():
message = ugettext("Procedures are groups of instructions that can be executed multiple times without being rewritten. For example, if you want to instruct the van to follow a repeated pattern in the road, you can create a specific procedure. To create a procedure, simply choose the correct blocks and put them in the right order inside the <b>Define do</b> block. Once you have done that, give it a name eg wiggle.<br />Now you're ready! Attach the <b>Call</b> block where you want your 'wiggle' procedure to be executed. Don't forget to put the name in it!")
return build_description(title_level61(), message)
def hint_level61():
message = ugettext("Don't forget to use <b>Define</b>. Name your procedure and attach the blocks in the right order. Start with <b>move forwards</b>, <b>turn left</b>, you can add repeat loops to a procedure and ending with <b>turn left</b>. Call your procedure under your start block and off you go...")
return message
def title_level62():
return ugettext("Lots of Traffic Lights!")
def description_level62():
message = ugettext("Create a procedure which tells the van to wait until the traffic lights are green.")
return build_description(title_level62(), message)
def hint_level62():
message = ugettext("Don't forget to name your procedure eg 'lights' and every time you want the van to check the traffic lights you need to '<b>call</b>' it.")
return message
def title_level63():
return ugettext("Wiggle Wiggle")
def description_level63():
message = ugettext("Can you find the repeating pattern here and create a new 'wiggle' procedure? And do the Wiggle Wiggle!")
return build_description(title_level63(), message)
def hint_level63():
message = ugettext("Can you see the repeating pattern in the path? The 'wiggle' consisting of a <b>turn left</b>, <b>move forwards</b>, <b>turn right</b>, <b>turn right</b>, <b>turn left</b> can be put in a <b>Define</b> block to create a procedure. Once you have named it, attach the <b>Call block with the procedure's name in the text box to execute it.")
return message
def title_level64():
return ugettext("Muddy Patterns with Phil")
def description_level64():
message = ugettext("Can you spot a pattern here? Create several procedures, it can save time when writing a program. Don't forget to clearly name your procedures and then call them.")
return build_description(title_level64(), message)
def hint_level64():
message = ugettext("One procedure could be <b>turn left</b>, <b>turn right</b>, <b>move forwards</b>, <b>turn right</b> and <b>turn left</b>. Don't forget you can create a repeat loop in your procedures.")
return message
def title_level65():
return ugettext("Complicated roads.")
def description_level65():
message = ugettext("This road might be a bit more complicated, but the procedures you could come up with are quite simple. Have a go and find out yourself!")
return build_description(title_level65(), message)
def hint_level65():
message = ugettext("Your first procedure could be <b>turn left</b> and <b>turn right</b> 'left-right' The second procedure could be <b>turn right</b> <b>turn left</b>, 'right-left'.")
return message
def title_level66():
return ugettext("Dee's snowy walk")
def description_level66():
message = ugettext("Did you know procedures can call other procedures?")
return build_description(title_level66(), message)
def hint_level66():
message = ugettext("Create 2 procedures. The first one should read <b>move forwards</b>, <b>move forwards</b>, <b>turn right</b>. The second <b>move forwards</b> then <b>call</b> your first procedure")
return message
def title_level67():
return ugettext("Crazy Farm")
def description_level67():
message = ugettext("This one will really test what you have learnt.")
return build_description(title_level67(), message)
def hint_level67():
message = ugettext("It might be easier to write the program without repeats or procedures then create 3 separate procedures from the patterns that your see.")
return message
def title_level68():
return ugettext("T - time")
def description_level68():
message = ugettext("Can you find the shortest route?")
return build_description(title_level68(), message)
def hint_level68():
message = ugettext("Think back to earlier levels - what did you learn?")
return message
def title_level69():
return ugettext("Duck pond dodge")
def description_level69():
message = ugettext("Can you find the shortest route?")
return build_description(title_level69(), message)
def hint_level69():
message = ugettext("Think back to earlier levels - what did you learn?")
return message
def title_level70():
return ugettext("Winter wonderland")
def description_level70():
message = ugettext("Can you find the shortest route?")
return build_description(title_level70(), message)
def hint_level70():
message = ugettext("Think back to earlier levels - what did you learn?")
return message
def title_level71():
return ugettext("Frozen challenge")
def description_level71():
message = ugettext("Can you find the shortest route?")
return build_description(title_level71(), message)
def hint_level71():
message = ugettext("Think back to earlier levels - what did you learn?")
return message
def title_level72():
return ugettext("Can Wes Find his lunch?")
def description_level72():
message = ugettext("Can you find the shortest route?")
return build_description(title_level72(), message)
def hint_level72():
message = ugettext("Think back to earlier levels - what did you learn?")
return message
def title_level73():
return ugettext("Traffic light freeze up!")
def description_level73():
message = ugettext("Can you find the shortest algorithm?")
return build_description(title_level73(), message)
def hint_level73():
message = ugettext("Think back to earlier levels - what did you learn?")
return message
def title_level74():
return ugettext("Pandemonium")
def description_level74():
message = ugettext("Can you find the shortest route?")
return build_description(title_level74(), message)
def hint_level74():
message = ugettext("Think back to earlier levels - what did you learn?")
return message
def title_level75():
return ugettext("Kirsty's maze time")
def description_level75():
message = ugettext("Can you find the shortest route?")
return build_description(title_level75(), message)
def hint_level75():
message = ugettext("Think back to earlier levels - what did you learn?")
return message
def title_level76():
return ugettext("Cannot turn left!")
def description_level76():
message = ugettext("Can you find the shortest route?")
return build_description(title_level76(), message)
def hint_level76():
message = ugettext("What is that? A barn for ANTS!?")
return message
def title_level77():
return ugettext("G Force")
def description_level77():
message = ugettext("Can you get the van to the house?")
return build_description(title_level77(), message)
def hint_level77():
message = ugettext("Heard of recursion?")
return message
def title_level78():
return ugettext("Wandering Phil")
def description_level78():
message = ugettext("Can you get Phil to the house?")
return build_description(title_level78(), message)
def hint_level78():
message = ugettext("Repeat while not dead end... turn around...")
return message
def title_level79():
return ugettext("Muddy Mayhem")
def description_level79():
message = ugettext("Can you find the shortest route?")
return build_description(title_level79(), message)
def hint_level79():
message = ugettext("Think back to earlier levels - what did you learn?")
return message
def title_level80():
return ugettext("Here's Python!")
def description_level80():
message = ugettext("As you create your program using Blockly see what it looks like in the Python programming language. Can you tell which Python statement matches which block?")
return build_description(title_level80(), message)
def hint_level80():
return ""
def title_level81():
return ugettext("Matching Blockly")
def description_level81():
message = ugettext("As you create your program using Blockly see what it looks like in the Python programming language. Can you tell which Python statement matches which block?")
return build_description(title_level81(), message)
def hint_level81():
return ""
def title_level82():
return ugettext("Don't forget to find the shortest route")
def description_level82():
message = ugettext("As you create your program using Blockly see what it looks like in the Python programming language. Can you tell which Python statement matches which block?")
return build_description(title_level82(), message)
def hint_level82():
return ""
def title_level83():
return ugettext("Repeating yourself in Python looks different")
def description_level83():
message = ugettext("As you create your program using Blockly see what it looks like in the Python programming language. Try adding a <b>repeat</b> block and watch what happens in Python.")
return build_description(title_level83(), message)
def hint_level83():
return ""
def title_level84():
return ugettext("Repeat and watch.")
def description_level84():
message = ugettext("As you create your program using Blockly see what it looks like in the Python programming language. Try adding a <b>repeat</b> block and watch what happens in Python.")
return build_description(title_level84(), message)
def hint_level84():
return ""
def title_level85():
return ugettext("Looks easy but use repeat until and see what happens?")
def description_level85():
message = ugettext("As you create your program using Blockly see what it looks like in the Python programming language. Try adding a <b>repeat</b> until block and watch what happens in Python.")
return build_description(title_level85(), message)
def hint_level85():
return ""
def title_level86():
return ugettext("See what the if blocks looks like in Python")
def description_level86():
message = ugettext("As you create your program using Blockly see what it looks like in the Python programming language. Try adding an <b>if</b> block and watch what happens in Python.")
return build_description(title_level86(), message)
def hint_level86():
return ""
def title_level87():
return ugettext("Don't forget to use else if")
def description_level87():
message = ugettext("As you create your program using Blockly see what it looks like in the Python programming language. Try adding an <b>if</b> block and watch what happens in Python particularly with <b>else if</b> and <b>else</b> statements.")
return build_description(title_level87(), message)
def hint_level87():
return ""
def title_level88():
return ugettext("See what happens when you add Traffic lights")
def description_level88():
message = ugettext("As you create your program using Blockly see what it looks like in the Python programming language. Try adding an <b>if</b> block and watch what happens in Python particularly with <b>else if</b> and <b>else</b> statements.")
return build_description(title_level88(), message)
def hint_level88():
return ""
def title_level89():
return ugettext("Watch carefully as you have another go")
def description_level89():
message = ugettext("As you create your program using Blockly see what it looks like in the Python programming language. Try adding an <b>if</b> block and watch what happens in Python particularly with <b>else if</b> and <b>else</b> statements.")
return build_description(title_level89(), message)
def hint_level89():
return ""
def title_level90():
return ugettext("Have a go at procedures - what do they look like in Python?")
def description_level90():
message = ugettext("As you create your program using Blockly see what it looks like in the Python language. Try adding a procedure and watch what happens in Python.")
return build_description(title_level90(), message)
def hint_level90():
message = ugettext("Don't forget to name your procedure and see what happens in Python.")
return message
def title_level91():
return ugettext("Put it all together")
def description_level91():
message = ugettext("As you create your program using Blockly see what it looks like in the Python language. Try adding a procedure and watch what happens in Python.")
return build_description(title_level91(), message)
def hint_level91():
message = ugettext("Don't forget to name your procedure and see what happens in Python.")
return message
def title_level92():
return ugettext("Start with the basics, <b>forward</b>, <b>left</b> and <b>right</b>")
def description_level92():
message = ugettext("Now you are coding in Python! This is what real developers do!! To start you off, the van object has been created for you already. Under this you need to add the correct Python statements to instruct the van to drive to the destination.<br />For more information about coding in Python refer to <a href='http://www.diveintopython.net/' target='_blank'>www.diveintopython.net</a>.")
return build_description(title_level92(), message)
def hint_level92():
message = ugettext("""Try using the following commands:<br /><pre>v.move_forwards()<br />v.turn_left()<br />v.turn_right()</pre>""")
return message
def title_level93():
return ugettext("Keep it simple")
def description_level93():
message = ugettext("Try this road. Under the van object you need to add the correct Python statements to instruct the van to drive to the destination.")
return build_description(title_level93(), message)
def hint_level93():
message = ugettext("""Try using the following commands:
<pre>v.move_forwards()
v.turn_left()
v.turn_right()</pre>""")
return message.replace('\n','<br />')
def title_level94():
return ugettext("Watch out for the ice!")
def description_level94():
message = ugettext("You're getting good at this! Can you drive the van along this road using the correct Python statements.")
return build_description(title_level94(), message)
def hint_level94():
message = ugettext("""Try using the following commands:
<pre>v.move_forwards()
v.turn_left()
v.turn_right()</pre>""")
return message.replace('\n','<br />')
def title_level95():
return ugettext("Count and repeat")
def description_level95():
message = ugettext("Now try to use a <b>repeat</b> loop to solve this level. Look back at level 83 to see what this could look like in Python.")
return build_description(title_level95(), message)
def hint_level95():
message = ugettext("""To repeat some statements a set number of times you can use something like the following:
<pre>for count in range(3):
v.turn left
print count</pre>
The print statement will output the value of count to the console.""")
return message.replace('\n','<br />')
def title_level96():
return ugettext("Count and repeat is easy")
def description_level96():
message = ugettext("Now try to use a <b>repeat loop</b> to solve this level. Look back at level 83 to see what this could look like in Python. This time you could use 2 loops, 1 for each straight piece of road.")
return build_description(title_level96(), message)
def hint_level96():
message = ugettext("""To repeat some statements a set number of times you can use something like the following:
<pre>for count in range(3):
v.turn left
print count</pre>
The print statement will output the value of count to the console.""")
return message.replace('\n','<br />')
def title_level97():
return ugettext("Loop the loop")
def description_level97():
message = ugettext("Now try to use a loop within a loop, known as a 'nested loop'. Look back at level 84 to see what this could look like in Python.")
return build_description(title_level97(), message)
def hint_level97():
message = ugettext("""To repeat within a repeats a set number of times you can use something like the following:
<pre>for i in range(3):
for j in range(5):
v.turn left
print count</pre>
The print statement will output the value of count to the console.""")
return message.replace('\n','<br />')
def title_level98():
return ugettext("Repeat and check")
def description_level98():
message = ugettext("Try to solve this level by repeatedly moving until the van is at the destination. Also, check whether the van can move forward or else must turn left. Now try and write the Python code. Look back at level 86 to give you an idea of what this could look like.")
return build_description(title_level98(), message)
def hint_level98():
message = ugettext("""To repeat while a condition is met you can use something like the following:
<pre>while not v.at_destination():
v.move_forwards()</pre>
To check whether a condition is met you can use something like the following:
<pre>if v.is_road_forward():
v.move_forwards()</pre>
You may also need to use the <b>else</b> statement.""")
return message.replace('\n','<br />')
def title_level99():
return ugettext("Find a general solution")
def description_level99():
message = ugettext("Now try using what you have just learnt to solve this level. You could also try using the <b>if</b>, <b>elif</b> and <b>else</b> statements. Look back at level 86 to give you an idea of what this could look like.")
return build_description(title_level99(), message)
def hint_level99():
message = ugettext("""To repeat while a condition is met you can use something like the following:
<pre>while not v.at_destination():
v.move_forwards()</pre>
To check whether a condition is met you can use something like the following:
<pre>if v.is_road_forward():
v.move_forwards()</pre>
You may also need to use the <b>elif</b> and <b>else</b> statements.""")
return message.replace('\n','<br />')
def title_level100():
return ugettext("Watch out for the dead end!")
def description_level100():
message = ugettext("Practice your new Python skills on this road to get the van to the destination. Look back at level 88 for a dead end check.")
return build_description(title_level100(), message)
def hint_level100():
message = ugettext("Try using<br /><pre>if v.at_dead_end():</pre><br />to check if the van is at a dead end.")
return message
def title_level101():
return ugettext("Function or Junction?")
def description_level101():
message = ugettext("Try defining your own procedure to solve this level. In Python procedures are generally called functions. Look back at level 90 for an example of how to define a function in Python.")
return build_description(title_level101(), message)
def hint_level101():
message = ugettext("""To define a function in Python you could do something like:
<pre>def my_function():
print 'test'</pre>
To call a defined function you could do something like:
<pre>my_function()</pre>
Remember, you must define a function before you call it.""")
return message.replace('\n','<br />')
def title_level102():
return ugettext("Watch for the patterns")
def description_level102():
message = ugettext("For this level try defining more than one function. Try to look for a repeating pattern to simplify your program.")
return build_description(title_level102(), message)
def hint_level102():
message = ugettext("""To define a function in Python you could do something like:
<pre>def my_function():
print 'test'</pre>
To call a defined function you could do something like:
<pre>my_function()</pre>""")
return message.replace('\n','<br />')
def title_level103():
return ugettext("Patterns within patterns.")
def description_level103():
message = ugettext("For this level try to define 2 or more functions where inside one function you call another function.")
return build_description(title_level103(), message)
def hint_level103():
message = ugettext("""To define a function that calls another function you could do something like:
<pre>def my_function():
print 'test'
def my_other_function():
for i in range(3):
my_function()
my_other_function()</pre>""")
return message.replace('\n','<br />')
def title_level104():
return ugettext("Can you see the repeating pattern?")
def description_level104():
message = ugettext("For this level try to define 2 or more functions where inside one function you call another function.")
return build_description(title_level104(), message)
def hint_level104():
message = ugettext("""To define a function that calls another function you could do something like:
<pre>def my_function():
print 'test'
def my_other_function():
for i in range(3):
my_function()
my_other_function()</pre>""")
return message.replace('\n','<br />')
def title_level105():
return ugettext("Find the shortest route.")
def description_level105():
message = ugettext("For this level try to implement a general algorithm. Keep the van going until it arrives at the destination, checking for traffic lights and junctions.")
return build_description(title_level105(), message)
def hint_level105():
message = ugettext("For this you will have to use a combination of the <b>while</b> and <b>if</b> statements.")
return message
def title_level106():
return ugettext("Spiral and add")
def description_level106():
message = ugettext("For this level the van needs to travel in a spiral. The number of grid squares the van has to move keeps increasing by 1 on each turn. To do this you can have a loop that makes use of a variable to track the length of the road you need to travel after each turn.")
return build_description(title_level106(), message)
def hint_level106():
message = ugettext("""To use a variable to store the number of grid squares the van has to move you can do something like the following:
<pre>n = 1
while not v.at_destination():
print n
n += 1</pre>
Variables can be used in place of constants when calling functions. For example to repeat something n times you can do something like the following:
<pre>for count in range(n):</pre>""")
return message.replace('\n','<br />')
def title_level107():
return ugettext("Spiral and double")
def description_level107():
message = ugettext("For this level try something similar to what you have just learnt. This time the straight sections of road are doubling in length after each turn.")
return build_description(title_level107(), message)
def hint_level107():
message = ugettext("To double the value of a variable you can do something like the following:<br /><pre>n *= 2</pre>")
return message
def title_level108():
return ugettext("Think less")
def description_level108():
message = ugettext("This time the straight sections of road decrease in length by 2 after each turn.")
return build_description(title_level108(), message)
def hint_level108():
message = ugettext("To decrease the value of a variable by an amount you can do something like the following:<br /><pre>n -= 5</pre>")
return message
def title_level109():
return ugettext("Final challenge!")
def description_level109():
message = ugettext("For the last challenge, the road straight line sections of road start off increasing by 1 after each turn and then switch to dividing by 2 with a twist!")
return build_description(title_level109(), message)
def hint_level109():
message = ugettext("To halve the value of a variable you can do something like the following:<br /><pre>n /= 2</pre>")
return message
|
mikebryant/rapid-router
|
game/messages.py
|
Python
|
agpl-3.0
| 72,358
|
[
"VisIt"
] |
5e5b5bfcdc66ce2e0a6e36239a826f7f7501fdb82d33b7d261cc14aa9a1f01a2
|
# -*- coding: utf-8 -*-
"""
Regression tests for the Test Client, especially the customized assertions.
"""
from __future__ import unicode_literals
import os
import itertools
from django.core.urlresolvers import reverse, NoReverseMatch
from django.template import TemplateSyntaxError, Context, Template
import django.template.context
from django.test import Client, TestCase, override_settings
from django.test.client import encode_file, RequestFactory
from django.test.utils import ContextList, str_prefix
from django.template.response import SimpleTemplateResponse
from django.utils._os import upath
from django.utils.translation import ugettext_lazy
from django.http import HttpResponse
from django.contrib.auth.signals import user_logged_out, user_logged_in
from django.contrib.auth.models import User
from .models import CustomUser
from .views import CustomTestException
@override_settings(
TEMPLATE_DIRS=(os.path.join(os.path.dirname(upath(__file__)), 'templates'),),
ROOT_URLCONF='test_client_regress.urls',
)
class AssertContainsTests(TestCase):
def test_contains(self):
"Responses can be inspected for content, including counting repeated substrings"
response = self.client.get('/no_template_view/')
self.assertNotContains(response, 'never')
self.assertContains(response, 'never', 0)
self.assertContains(response, 'once')
self.assertContains(response, 'once', 1)
self.assertContains(response, 'twice')
self.assertContains(response, 'twice', 2)
try:
self.assertContains(response, 'text', status_code=999)
except AssertionError as e:
self.assertIn("Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertContains(response, 'text', status_code=999, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'text', status_code=999)
except AssertionError as e:
self.assertIn("Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'text', status_code=999, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'once')
except AssertionError as e:
self.assertIn("Response should not contain 'once'", str(e))
try:
self.assertNotContains(response, 'once', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response should not contain 'once'", str(e))
try:
self.assertContains(response, 'never', 1)
except AssertionError as e:
self.assertIn("Found 0 instances of 'never' in response (expected 1)", str(e))
try:
self.assertContains(response, 'never', 1, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 0 instances of 'never' in response (expected 1)", str(e))
try:
self.assertContains(response, 'once', 0)
except AssertionError as e:
self.assertIn("Found 1 instances of 'once' in response (expected 0)", str(e))
try:
self.assertContains(response, 'once', 0, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 1 instances of 'once' in response (expected 0)", str(e))
try:
self.assertContains(response, 'once', 2)
except AssertionError as e:
self.assertIn("Found 1 instances of 'once' in response (expected 2)", str(e))
try:
self.assertContains(response, 'once', 2, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 1 instances of 'once' in response (expected 2)", str(e))
try:
self.assertContains(response, 'twice', 1)
except AssertionError as e:
self.assertIn("Found 2 instances of 'twice' in response (expected 1)", str(e))
try:
self.assertContains(response, 'twice', 1, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 2 instances of 'twice' in response (expected 1)", str(e))
try:
self.assertContains(response, 'thrice')
except AssertionError as e:
self.assertIn("Couldn't find 'thrice' in response", str(e))
try:
self.assertContains(response, 'thrice', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't find 'thrice' in response", str(e))
try:
self.assertContains(response, 'thrice', 3)
except AssertionError as e:
self.assertIn("Found 0 instances of 'thrice' in response (expected 3)", str(e))
try:
self.assertContains(response, 'thrice', 3, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 0 instances of 'thrice' in response (expected 3)", str(e))
def test_unicode_contains(self):
"Unicode characters can be found in template context"
# Regression test for #10183
r = self.client.get('/check_unicode/')
self.assertContains(r, 'さかき')
self.assertContains(r, b'\xe5\xb3\xa0'.decode('utf-8'))
def test_unicode_not_contains(self):
"Unicode characters can be searched for, and not found in template context"
# Regression test for #10183
r = self.client.get('/check_unicode/')
self.assertNotContains(r, 'はたけ')
self.assertNotContains(r, b'\xe3\x81\xaf\xe3\x81\x9f\xe3\x81\x91'.decode('utf-8'))
def test_binary_contains(self):
r = self.client.get('/check_binary/')
self.assertContains(r, b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e')
with self.assertRaises(AssertionError):
self.assertContains(r, b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e', count=2)
def test_binary_not_contains(self):
r = self.client.get('/check_binary/')
self.assertNotContains(r, b'%ODF-1.4\r\n%\x93\x8c\x8b\x9e')
with self.assertRaises(AssertionError):
self.assertNotContains(r, b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e')
def test_nontext_contains(self):
r = self.client.get('/no_template_view/')
self.assertContains(r, ugettext_lazy('once'))
def test_nontext_not_contains(self):
r = self.client.get('/no_template_view/')
self.assertNotContains(r, ugettext_lazy('never'))
def test_assert_contains_renders_template_response(self):
""" Test that we can pass in an unrendered SimpleTemplateReponse
without throwing an error.
Refs #15826.
"""
response = SimpleTemplateResponse(Template('Hello'), status=200)
self.assertContains(response, 'Hello')
def test_assert_contains_using_non_template_response(self):
""" Test that auto-rendering does not affect responses that aren't
instances (or subclasses) of SimpleTemplateResponse.
Refs #15826.
"""
response = HttpResponse('Hello')
self.assertContains(response, 'Hello')
def test_assert_not_contains_renders_template_response(self):
""" Test that we can pass in an unrendered SimpleTemplateReponse
without throwing an error.
Refs #15826.
"""
response = SimpleTemplateResponse(Template('Hello'), status=200)
self.assertNotContains(response, 'Bye')
def test_assert_not_contains_using_non_template_response(self):
""" Test that auto-rendering does not affect responses that aren't
instances (or subclasses) of SimpleTemplateResponse.
Refs #15826.
"""
response = HttpResponse('Hello')
self.assertNotContains(response, 'Bye')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='test_client_regress.urls',)
class AssertTemplateUsedTests(TestCase):
fixtures = ['testdata.json']
def test_no_context(self):
"Template usage assertions work then templates aren't in use"
response = self.client.get('/no_template_view/')
# Check that the no template case doesn't mess with the template assertions
self.assertTemplateNotUsed(response, 'GET Template')
try:
self.assertTemplateUsed(response, 'GET Template')
except AssertionError as e:
self.assertIn("No templates used to render the response", str(e))
try:
self.assertTemplateUsed(response, 'GET Template', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: No templates used to render the response", str(e))
with self.assertRaises(AssertionError) as context:
self.assertTemplateUsed(response, 'GET Template', count=2)
self.assertIn(
"No templates used to render the response",
str(context.exception))
def test_single_context(self):
"Template assertions work when there is a single context"
response = self.client.get('/post_view/', {})
try:
self.assertTemplateNotUsed(response, 'Empty GET Template')
except AssertionError as e:
self.assertIn("Template 'Empty GET Template' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateNotUsed(response, 'Empty GET Template', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Template 'Empty GET Template' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateUsed(response, 'Empty POST Template')
except AssertionError as e:
self.assertIn("Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template", str(e))
try:
self.assertTemplateUsed(response, 'Empty POST Template', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template", str(e))
with self.assertRaises(AssertionError) as context:
self.assertTemplateUsed(response, 'Empty GET Template', count=2)
self.assertIn(
"Template 'Empty GET Template' was expected to be rendered 2 "
"time(s) but was actually rendered 1 time(s).",
str(context.exception))
with self.assertRaises(AssertionError) as context:
self.assertTemplateUsed(
response, 'Empty GET Template', msg_prefix='abc', count=2)
self.assertIn(
"abc: Template 'Empty GET Template' was expected to be rendered 2 "
"time(s) but was actually rendered 1 time(s).",
str(context.exception))
def test_multiple_context(self):
"Template assertions work when there are multiple contexts"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
try:
self.assertTemplateNotUsed(response, "form_view.html")
except AssertionError as e:
self.assertIn("Template 'form_view.html' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateNotUsed(response, 'base.html')
except AssertionError as e:
self.assertIn("Template 'base.html' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateUsed(response, "Valid POST Template")
except AssertionError as e:
self.assertIn("Template 'Valid POST Template' was not a template used to render the response. Actual template(s) used: form_view.html, base.html", str(e))
with self.assertRaises(AssertionError) as context:
self.assertTemplateUsed(response, 'base.html', count=2)
self.assertIn(
"Template 'base.html' was expected to be rendered 2 "
"time(s) but was actually rendered 1 time(s).",
str(context.exception))
def test_template_rendered_multiple_times(self):
"""Template assertions work when a template is rendered multiple times."""
response = self.client.get('/render_template_multiple_times/')
self.assertTemplateUsed(response, 'base.html', count=2)
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class AssertRedirectsTests(TestCase):
def test_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/permanent_redirect_view/')
try:
self.assertRedirects(response, '/get_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
try:
self.assertRedirects(response, '/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
def test_lost_query(self):
"An assertion is raised if the redirect location doesn't preserve GET parameters"
response = self.client.get('/redirect_view/', {'var': 'value'})
try:
self.assertRedirects(response, '/get_view/')
except AssertionError as e:
self.assertIn("Response redirected to 'http://testserver/get_view/?var=value', expected 'http://testserver/get_view/'", str(e))
try:
self.assertRedirects(response, '/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response redirected to 'http://testserver/get_view/?var=value', expected 'http://testserver/get_view/'", str(e))
def test_incorrect_target(self):
"An assertion is raised if the response redirects to another target"
response = self.client.get('/permanent_redirect_view/')
try:
# Should redirect to get_view
self.assertRedirects(response, '/some_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
def test_target_page(self):
"An assertion is raised if the response redirect target cannot be retrieved as expected"
response = self.client.get('/double_redirect_view/')
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, 'http://testserver/permanent_redirect_view/')
except AssertionError as e:
self.assertIn("Couldn't retrieve redirection page '/permanent_redirect_view/': response code was 301 (expected 200)", str(e))
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, 'http://testserver/permanent_redirect_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't retrieve redirection page '/permanent_redirect_view/': response code was 301 (expected 200)", str(e))
def test_redirect_chain(self):
"You can follow a redirect chain of multiple redirects"
response = self.client.get('/redirects/further/more/', {}, follow=True)
self.assertRedirects(response, '/no_template_view/',
status_code=301, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0], ('http://testserver/no_template_view/', 301))
def test_multiple_redirect_chain(self):
"You can follow a redirect chain of multiple redirects"
response = self.client.get('/redirects/', {}, follow=True)
self.assertRedirects(response, '/no_template_view/',
status_code=301, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 3)
self.assertEqual(response.redirect_chain[0], ('http://testserver/redirects/further/', 301))
self.assertEqual(response.redirect_chain[1], ('http://testserver/redirects/further/more/', 301))
self.assertEqual(response.redirect_chain[2], ('http://testserver/no_template_view/', 301))
def test_redirect_chain_to_non_existent(self):
"You can follow a chain to a non-existent view"
response = self.client.get('/redirect_to_non_existent_view2/', {}, follow=True)
self.assertRedirects(response, '/non_existent_view/',
status_code=301, target_status_code=404)
def test_redirect_chain_to_self(self):
"Redirections to self are caught and escaped"
response = self.client.get('/redirect_to_self/', {}, follow=True)
# The chain of redirects stops once the cycle is detected.
self.assertRedirects(response, '/redirect_to_self/',
status_code=301, target_status_code=301)
self.assertEqual(len(response.redirect_chain), 2)
def test_circular_redirect(self):
"Circular redirect chains are caught and escaped"
response = self.client.get('/circular_redirect_1/', {}, follow=True)
# The chain of redirects will get back to the starting point, but stop there.
self.assertRedirects(response, '/circular_redirect_2/',
status_code=301, target_status_code=301)
self.assertEqual(len(response.redirect_chain), 4)
def test_redirect_chain_post(self):
"A redirect chain will be followed from an initial POST post"
response = self.client.post('/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_head(self):
"A redirect chain will be followed from an initial HEAD request"
response = self.client.head('/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_options(self):
"A redirect chain will be followed from an initial OPTIONS request"
response = self.client.options('/redirects/',
follow=True)
self.assertRedirects(response,
'/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_put(self):
"A redirect chain will be followed from an initial PUT request"
response = self.client.put('/redirects/',
follow=True)
self.assertRedirects(response,
'/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_delete(self):
"A redirect chain will be followed from an initial DELETE request"
response = self.client.delete('/redirects/',
follow=True)
self.assertRedirects(response,
'/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_to_different_host(self):
"The test client will preserve scheme, host and port changes"
response = self.client.get('/redirect_other_host/', follow=True)
self.assertRedirects(response,
'https://otherserver:8443/no_template_view/',
status_code=301, target_status_code=200)
# We can't use is_secure() or get_host()
# because response.request is a dictionary, not an HttpRequest
self.assertEqual(response.request.get('wsgi.url_scheme'), 'https')
self.assertEqual(response.request.get('SERVER_NAME'), 'otherserver')
self.assertEqual(response.request.get('SERVER_PORT'), '8443')
def test_redirect_chain_on_non_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/get_view/', follow=True)
try:
self.assertRedirects(response, '/get_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
try:
self.assertRedirects(response, '/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
def test_redirect_on_non_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/get_view/')
try:
self.assertRedirects(response, '/get_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
try:
self.assertRedirects(response, '/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
def test_redirect_scheme(self):
"An assertion is raised if the response doesn't have the scheme specified in expected_url"
# Assure that original request scheme is preserved if no scheme specified in the redirect location
response = self.client.get('/redirect_view/', secure=True)
self.assertRedirects(response, 'https://testserver/get_view/')
# For all possible True/False combinations of follow and secure
for follow, secure in itertools.product([True, False], repeat=2):
# always redirects to https
response = self.client.get('/https_redirect_view/', follow=follow, secure=secure)
# no scheme to compare too, always succeeds
self.assertRedirects(response, '/secure_view/', status_code=301)
# the goal scheme is https
self.assertRedirects(response, 'https://testserver/secure_view/', status_code=301)
with self.assertRaises(AssertionError):
self.assertRedirects(response, 'http://testserver/secure_view/', status_code=301)
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class AssertFormErrorTests(TestCase):
def test_unknown_form(self):
"An assertion is raised if the form name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.')
except AssertionError as e:
self.assertIn("The form 'wrong_form' was not used to render the response", str(e))
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The form 'wrong_form' was not used to render the response", str(e))
def test_unknown_field(self):
"An assertion is raised if the field name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.')
except AssertionError as e:
self.assertIn("The form 'form' in context 0 does not contain the field 'some_field'", str(e))
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The form 'form' in context 0 does not contain the field 'some_field'", str(e))
def test_noerror_field(self):
"An assertion is raised if the field doesn't have any errors"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'value', 'Some error.')
except AssertionError as e:
self.assertIn("The field 'value' on form 'form' in context 0 contains no errors", str(e))
try:
self.assertFormError(response, 'form', 'value', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The field 'value' on form 'form' in context 0 contains no errors", str(e))
def test_unknown_error(self):
"An assertion is raised if the field doesn't contain the provided error"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'email', 'Some error.')
except AssertionError as e:
self.assertIn(str_prefix("The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [%(_)s'Enter a valid email address.'])"), str(e))
try:
self.assertFormError(response, 'form', 'email', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn(str_prefix("abc: The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [%(_)s'Enter a valid email address.'])"), str(e))
def test_unknown_nonfield_error(self):
"""
Checks that an assertion is raised if the form's non field errors
doesn't contain the provided error.
"""
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', None, 'Some error.')
except AssertionError as e:
self.assertIn("The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )", str(e))
try:
self.assertFormError(response, 'form', None, 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )", str(e))
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class AssertFormsetErrorTests(TestCase):
msg_prefixes = [("", {}), ("abc: ", {"msg_prefix": "abc"})]
def setUp(self):
"""Makes response object for testing field and non-field errors"""
# For testing field and non-field errors
self.response_form_errors = self.getResponse({
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '2',
'form-0-text': 'Raise non-field error',
'form-0-email': 'not an email address',
'form-0-value': 37,
'form-0-single': 'b',
'form-0-multi': ('b', 'c', 'e'),
'form-1-text': 'Hello World',
'form-1-email': 'email@domain.com',
'form-1-value': 37,
'form-1-single': 'b',
'form-1-multi': ('b', 'c', 'e'),
})
# For testing non-form errors
self.response_nonform_errors = self.getResponse({
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '2',
'form-0-text': 'Hello World',
'form-0-email': 'email@domain.com',
'form-0-value': 37,
'form-0-single': 'b',
'form-0-multi': ('b', 'c', 'e'),
'form-1-text': 'Hello World',
'form-1-email': 'email@domain.com',
'form-1-value': 37,
'form-1-single': 'b',
'form-1-multi': ('b', 'c', 'e'),
})
def getResponse(self, post_data):
response = self.client.post('/formset_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
return response
def test_unknown_formset(self):
"An assertion is raised if the formset name is unknown"
for prefix, kwargs in self.msg_prefixes:
with self.assertRaises(AssertionError) as cm:
self.assertFormsetError(self.response_form_errors,
'wrong_formset',
0,
'Some_field',
'Some error.',
**kwargs)
self.assertIn(prefix + "The formset 'wrong_formset' was not "
"used to render the response",
str(cm.exception))
def test_unknown_field(self):
"An assertion is raised if the field name is unknown"
for prefix, kwargs in self.msg_prefixes:
with self.assertRaises(AssertionError) as cm:
self.assertFormsetError(self.response_form_errors,
'my_formset',
0,
'Some_field',
'Some error.',
**kwargs)
self.assertIn(prefix + "The formset 'my_formset', "
"form 0 in context 0 "
"does not contain the field 'Some_field'",
str(cm.exception))
def test_no_error_field(self):
"An assertion is raised if the field doesn't have any errors"
for prefix, kwargs in self.msg_prefixes:
with self.assertRaises(AssertionError) as cm:
self.assertFormsetError(self.response_form_errors,
'my_formset',
1,
'value',
'Some error.',
**kwargs)
self.assertIn(prefix + "The field 'value' "
"on formset 'my_formset', form 1 "
"in context 0 contains no errors",
str(cm.exception))
def test_unknown_error(self):
"An assertion is raised if the field doesn't contain the specified error"
for prefix, kwargs in self.msg_prefixes:
with self.assertRaises(AssertionError) as cm:
self.assertFormsetError(self.response_form_errors,
'my_formset',
0,
'email',
'Some error.',
**kwargs)
self.assertIn(str_prefix(prefix + "The field 'email' "
"on formset 'my_formset', form 0 in context 0 does not "
"contain the error 'Some error.' (actual errors: "
"[%(_)s'Enter a valid email address.'])"),
str(cm.exception))
def test_field_error(self):
"No assertion is raised if the field contains the provided error"
for prefix, kwargs in self.msg_prefixes:
self.assertFormsetError(self.response_form_errors,
'my_formset',
0,
'email',
['Enter a valid email address.'],
**kwargs)
def test_no_nonfield_error(self):
"An assertion is raised if the formsets non-field errors doesn't contain any errors."
for prefix, kwargs in self.msg_prefixes:
with self.assertRaises(AssertionError) as cm:
self.assertFormsetError(self.response_form_errors,
'my_formset',
1,
None,
'Some error.',
**kwargs)
self.assertIn(prefix + "The formset 'my_formset', form 1 in "
"context 0 does not contain any "
"non-field errors.",
str(cm.exception))
def test_unknown_nonfield_error(self):
"An assertion is raised if the formsets non-field errors doesn't contain the provided error."
for prefix, kwargs in self.msg_prefixes:
with self.assertRaises(AssertionError) as cm:
self.assertFormsetError(self.response_form_errors,
'my_formset',
0,
None,
'Some error.',
**kwargs)
self.assertIn(str_prefix(prefix +
"The formset 'my_formset', form 0 in context 0 does not "
"contain the non-field error 'Some error.' (actual errors: "
"[%(_)s'Non-field error.'])"), str(cm.exception))
def test_nonfield_error(self):
"No assertion is raised if the formsets non-field errors contains the provided error."
for prefix, kwargs in self.msg_prefixes:
self.assertFormsetError(self.response_form_errors,
'my_formset',
0,
None,
'Non-field error.',
**kwargs)
def test_no_nonform_error(self):
"An assertion is raised if the formsets non-form errors doesn't contain any errors."
for prefix, kwargs in self.msg_prefixes:
with self.assertRaises(AssertionError) as cm:
self.assertFormsetError(self.response_form_errors,
'my_formset',
None,
None,
'Some error.',
**kwargs)
self.assertIn(prefix + "The formset 'my_formset' in context 0 "
"does not contain any non-form errors.",
str(cm.exception))
def test_unknown_nonform_error(self):
"An assertion is raised if the formsets non-form errors doesn't contain the provided error."
for prefix, kwargs in self.msg_prefixes:
with self.assertRaises(AssertionError) as cm:
self.assertFormsetError(self.response_nonform_errors,
'my_formset',
None,
None,
'Some error.',
**kwargs)
self.assertIn(str_prefix(prefix +
"The formset 'my_formset' in context 0 does not contain the "
"non-form error 'Some error.' (actual errors: [%(_)s'Forms "
"in a set must have distinct email addresses.'])"), str(cm.exception))
def test_nonform_error(self):
"No assertion is raised if the formsets non-form errors contains the provided error."
for prefix, kwargs in self.msg_prefixes:
self.assertFormsetError(self.response_nonform_errors,
'my_formset',
None,
None,
'Forms in a set must have distinct email '
'addresses.',
**kwargs)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='test_client_regress.urls',)
class LoginTests(TestCase):
fixtures = ['testdata']
def test_login_different_client(self):
"Check that using a different test client doesn't violate authentication"
# Create a second client, and log in.
c = Client()
login = c.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Get a redirection page with the second client.
response = c.get("/login_protected_redirect_view/")
# At this points, the self.client isn't logged in.
# Check that assertRedirects uses the original client, not the
# default client.
self.assertRedirects(response, "http://testserver/get_view/")
@override_settings(
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
SESSION_ENGINE='test_client_regress.session',
ROOT_URLCONF='test_client_regress.urls',
)
class SessionEngineTests(TestCase):
fixtures = ['testdata']
def test_login(self):
"A session engine that modifies the session key can be used to log in"
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Try to access a login protected page.
response = self.client.get("/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
@override_settings(ROOT_URLCONF='test_client_regress.urls',)
class URLEscapingTests(TestCase):
def test_simple_argument_get(self):
"Get a view that has a simple string argument"
response = self.client.get(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Howdy, Slartibartfast')
def test_argument_with_space_get(self):
"Get a view that has a string argument that requires escaping"
response = self.client.get(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Hi, Arthur')
def test_simple_argument_post(self):
"Post for a view that has a simple string argument"
response = self.client.post(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Howdy, Slartibartfast')
def test_argument_with_space_post(self):
"Post for a view that has a string argument that requires escaping"
response = self.client.post(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Hi, Arthur')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='test_client_regress.urls',)
class ExceptionTests(TestCase):
fixtures = ['testdata.json']
def test_exception_cleared(self):
"#5836 - A stale user exception isn't re-raised by the test client."
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
try:
self.client.get("/staff_only/")
self.fail("General users should not be able to visit this page")
except CustomTestException:
pass
# At this point, an exception has been raised, and should be cleared.
# This next operation should be successful; if it isn't we have a problem.
login = self.client.login(username='staff', password='password')
self.assertTrue(login, 'Could not log in')
try:
self.client.get("/staff_only/")
except CustomTestException:
self.fail("Staff should be able to visit this page")
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class TemplateExceptionTests(TestCase):
@override_settings(
TEMPLATE_DIRS=(os.path.join(os.path.dirname(upath(__file__)), 'bad_templates'),)
)
def test_bad_404_template(self):
"Errors found when rendering 404 error templates are re-raised"
try:
self.client.get("/no_such_view/")
except TemplateSyntaxError:
pass
else:
self.fail("Should get error about syntax error in template")
# We need two different tests to check URLconf substitution - one to check
# it was changed, and another one (without self.urls) to check it was reverted on
# teardown. This pair of tests relies upon the alphabetical ordering of test execution.
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class UrlconfSubstitutionTests(TestCase):
def test_urlconf_was_changed(self):
"TestCase can enforce a custom URLconf on a per-test basis"
url = reverse('arg_view', args=['somename'])
self.assertEqual(url, '/arg_view/somename/')
# This test needs to run *after* UrlconfSubstitutionTests; the zz prefix in the
# name is to ensure alphabetical ordering.
class zzUrlconfSubstitutionTests(TestCase):
def test_urlconf_was_reverted(self):
"""URLconf is reverted to original value after modification in a TestCase
This will not find a match as the default ROOT_URLCONF is empty.
"""
with self.assertRaises(NoReverseMatch):
reverse('arg_view', args=['somename'])
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='test_client_regress.urls',)
class ContextTests(TestCase):
fixtures = ['testdata']
def test_single_context(self):
"Context variables can be retrieved from a single context"
response = self.client.get("/request_data/", data={'foo': 'whiz'})
self.assertEqual(response.context.__class__, Context)
self.assertIn('get-foo', response.context)
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['data'], 'sausage')
try:
response.context['does-not-exist']
self.fail('Should not be able to retrieve non-existent key')
except KeyError as e:
self.assertEqual(e.args[0], 'does-not-exist')
def test_inherited_context(self):
"Context variables can be retrieved from a list of contexts"
response = self.client.get("/request_data_extended/", data={'foo': 'whiz'})
self.assertEqual(response.context.__class__, ContextList)
self.assertEqual(len(response.context), 2)
self.assertIn('get-foo', response.context)
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['data'], 'bacon')
try:
response.context['does-not-exist']
self.fail('Should not be able to retrieve non-existent key')
except KeyError as e:
self.assertEqual(e.args[0], 'does-not-exist')
def test_contextlist_keys(self):
c1 = Context()
c1.update({'hello': 'world', 'goodbye': 'john'})
c1.update({'hello': 'dolly', 'dolly': 'parton'})
c2 = Context()
c2.update({'goodbye': 'world', 'python': 'rocks'})
c2.update({'goodbye': 'dolly'})
l = ContextList([c1, c2])
# None, True and False are builtins of BaseContext, and present
# in every Context without needing to be added.
self.assertEqual({'None', 'True', 'False', 'hello', 'goodbye',
'python', 'dolly'},
l.keys())
def test_15368(self):
# Need to insert a context processor that assumes certain things about
# the request instance. This triggers a bug caused by some ways of
# copying RequestContext.
try:
django.template.context._standard_context_processors = (lambda request: {'path': request.special_path},)
response = self.client.get("/request_context_view/")
self.assertContains(response, 'Path: /request_context_view/')
finally:
django.template.context._standard_context_processors = None
def test_nested_requests(self):
"""
response.context is not lost when view call another view.
"""
response = self.client.get("/nested_view/")
self.assertEqual(response.context.__class__, Context)
self.assertEqual(response.context['nested'], 'yes')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='test_client_regress.urls',)
class SessionTests(TestCase):
fixtures = ['testdata.json']
def test_session(self):
"The session isn't lost if a user logs in"
# The session doesn't exist to start.
response = self.client.get('/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'NO')
# This request sets a session variable.
response = self.client.get('/set_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'set_session')
# Check that the session has been modified
response = self.client.get('/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'YES')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Session should still contain the modified value
response = self.client.get('/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'YES')
def test_session_initiated(self):
session = self.client.session
session['session_var'] = 'foo'
session.save()
response = self.client.get('/check_session/')
self.assertEqual(response.content, b'foo')
def test_logout(self):
"""Logout should work whether the user is logged in or not (#9978)."""
self.client.logout()
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
self.client.logout()
self.client.logout()
def test_logout_with_user(self):
"""Logout should send user_logged_out signal if user was logged in."""
def listener(*args, **kwargs):
listener.executed = True
self.assertEqual(kwargs['sender'], User)
listener.executed = False
user_logged_out.connect(listener)
self.client.login(username='testclient', password='password')
self.client.logout()
user_logged_out.disconnect(listener)
self.assertTrue(listener.executed)
@override_settings(AUTH_USER_MODEL='test_client_regress.CustomUser')
def test_logout_with_custom_user(self):
"""Logout should send user_logged_out signal if custom user was logged in."""
def listener(*args, **kwargs):
self.assertEqual(kwargs['sender'], CustomUser)
listener.executed = True
listener.executed = False
u = CustomUser.custom_objects.create(email='test@test.com')
u.set_password('password')
u.save()
user_logged_out.connect(listener)
self.client.login(username='test@test.com', password='password')
self.client.logout()
user_logged_out.disconnect(listener)
self.assertTrue(listener.executed)
@override_settings(AUTHENTICATION_BACKENDS=(
'django.contrib.auth.backends.ModelBackend',
'test_client_regress.auth_backends.CustomUserBackend'))
def test_logout_with_custom_auth_backend(self):
"Request a logout after logging in with custom authentication backend"
def listener(*args, **kwargs):
self.assertEqual(kwargs['sender'], CustomUser)
listener.executed = True
listener.executed = False
u = CustomUser.custom_objects.create(email='test@test.com')
u.set_password('password')
u.save()
user_logged_out.connect(listener)
self.client.login(username='test@test.com', password='password')
self.client.logout()
user_logged_out.disconnect(listener)
self.assertTrue(listener.executed)
def test_logout_without_user(self):
"""Logout should send signal even if user not authenticated."""
def listener(user, *args, **kwargs):
listener.user = user
listener.executed = True
listener.executed = False
user_logged_out.connect(listener)
self.client.login(username='incorrect', password='password')
self.client.logout()
user_logged_out.disconnect(listener)
self.assertTrue(listener.executed)
self.assertIsNone(listener.user)
def test_login_with_user(self):
"""Login should send user_logged_in signal on successful login."""
def listener(*args, **kwargs):
listener.executed = True
listener.executed = False
user_logged_in.connect(listener)
self.client.login(username='testclient', password='password')
user_logged_out.disconnect(listener)
self.assertTrue(listener.executed)
def test_login_without_signal(self):
"""Login shouldn't send signal if user wasn't logged in"""
def listener(*args, **kwargs):
listener.executed = True
listener.executed = False
user_logged_in.connect(listener)
self.client.login(username='incorrect', password='password')
user_logged_in.disconnect(listener)
self.assertFalse(listener.executed)
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class RequestMethodTests(TestCase):
def test_get(self):
"Request a view via request method GET"
response = self.client.get('/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: GET')
def test_post(self):
"Request a view via request method POST"
response = self.client.post('/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: POST')
def test_head(self):
"Request a view via request method HEAD"
response = self.client.head('/request_methods/')
self.assertEqual(response.status_code, 200)
# A HEAD request doesn't return any content.
self.assertNotEqual(response.content, b'request method: HEAD')
self.assertEqual(response.content, b'')
def test_options(self):
"Request a view via request method OPTIONS"
response = self.client.options('/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: OPTIONS')
def test_put(self):
"Request a view via request method PUT"
response = self.client.put('/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: PUT')
def test_delete(self):
"Request a view via request method DELETE"
response = self.client.delete('/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: DELETE')
def test_patch(self):
"Request a view via request method PATCH"
response = self.client.patch('/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: PATCH')
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class RequestMethodStringDataTests(TestCase):
def test_post(self):
"Request a view with string data via request method POST"
# Regression test for #11371
data = '{"test": "json"}'
response = self.client.post('/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: POST')
def test_put(self):
"Request a view with string data via request method PUT"
# Regression test for #11371
data = '{"test": "json"}'
response = self.client.put('/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: PUT')
def test_patch(self):
"Request a view with string data via request method PATCH"
# Regression test for #17797
data = '{"test": "json"}'
response = self.client.patch('/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: PATCH')
def test_empty_string_data(self):
"Request a view with empty string data via request method GET/POST/HEAD"
# Regression test for #21740
response = self.client.get('/body/', data='', content_type='application/json')
self.assertEqual(response.content, b'')
response = self.client.post('/body/', data='', content_type='application/json')
self.assertEqual(response.content, b'')
response = self.client.head('/body/', data='', content_type='application/json')
self.assertEqual(response.content, b'')
@override_settings(ROOT_URLCONF='test_client_regress.urls',)
class QueryStringTests(TestCase):
def test_get_like_requests(self):
# See: https://code.djangoproject.com/ticket/10571.
for method_name in ('get', 'head'):
# A GET-like request can pass a query string as data
method = getattr(self.client, method_name)
response = method("/request_data/", data={'foo': 'whiz'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
# A GET-like request can pass a query string as part of the URL
response = method("/request_data/?foo=whiz")
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
# Data provided in the URL to a GET-like request is overridden by actual form data
response = method("/request_data/?foo=whiz", data={'foo': 'bang'})
self.assertEqual(response.context['get-foo'], 'bang')
self.assertEqual(response.context['request-foo'], 'bang')
response = method("/request_data/?foo=whiz", data={'bar': 'bang'})
self.assertEqual(response.context['get-foo'], None)
self.assertEqual(response.context['get-bar'], 'bang')
self.assertEqual(response.context['request-foo'], None)
self.assertEqual(response.context['request-bar'], 'bang')
def test_post_like_requests(self):
# A POST-like request can pass a query string as data
response = self.client.post("/request_data/", data={'foo': 'whiz'})
self.assertEqual(response.context['get-foo'], None)
self.assertEqual(response.context['post-foo'], 'whiz')
# A POST-like request can pass a query string as part of the URL
response = self.client.post("/request_data/?foo=whiz")
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['post-foo'], None)
self.assertEqual(response.context['request-foo'], 'whiz')
# POST data provided in the URL augments actual form data
response = self.client.post("/request_data/?foo=whiz", data={'foo': 'bang'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['post-foo'], 'bang')
self.assertEqual(response.context['request-foo'], 'bang')
response = self.client.post("/request_data/?foo=whiz", data={'bar': 'bang'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['get-bar'], None)
self.assertEqual(response.context['post-foo'], None)
self.assertEqual(response.context['post-bar'], 'bang')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['request-bar'], 'bang')
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class UnicodePayloadTests(TestCase):
def test_simple_unicode_payload(self):
"A simple ASCII-only unicode JSON document can be POSTed"
# Regression test for #10571
json = '{"english": "mountain pass"}'
response = self.client.post("/parse_unicode_json/", json,
content_type="application/json")
self.assertEqual(response.content, json.encode())
def test_unicode_payload_utf8(self):
"A non-ASCII unicode data encoded as UTF-8 can be POSTed"
# Regression test for #10571
json = '{"dog": "собака"}'
response = self.client.post("/parse_unicode_json/", json,
content_type="application/json; charset=utf-8")
self.assertEqual(response.content, json.encode('utf-8'))
def test_unicode_payload_utf16(self):
"A non-ASCII unicode data encoded as UTF-16 can be POSTed"
# Regression test for #10571
json = '{"dog": "собака"}'
response = self.client.post("/parse_unicode_json/", json,
content_type="application/json; charset=utf-16")
self.assertEqual(response.content, json.encode('utf-16'))
def test_unicode_payload_non_utf(self):
"A non-ASCII unicode data as a non-UTF based encoding can be POSTed"
# Regression test for #10571
json = '{"dog": "собака"}'
response = self.client.post("/parse_unicode_json/", json,
content_type="application/json; charset=koi8-r")
self.assertEqual(response.content, json.encode('koi8-r'))
class DummyFile(object):
def __init__(self, filename):
self.name = filename
def read(self):
return b'TEST_FILE_CONTENT'
class UploadedFileEncodingTest(TestCase):
def test_file_encoding(self):
encoded_file = encode_file('TEST_BOUNDARY', 'TEST_KEY', DummyFile('test_name.bin'))
self.assertEqual(b'--TEST_BOUNDARY', encoded_file[0])
self.assertEqual(b'Content-Disposition: form-data; name="TEST_KEY"; filename="test_name.bin"', encoded_file[1])
self.assertEqual(b'TEST_FILE_CONTENT', encoded_file[-1])
def test_guesses_content_type_on_file_encoding(self):
self.assertEqual(b'Content-Type: application/octet-stream',
encode_file('IGNORE', 'IGNORE', DummyFile("file.bin"))[2])
self.assertEqual(b'Content-Type: text/plain',
encode_file('IGNORE', 'IGNORE', DummyFile("file.txt"))[2])
self.assertIn(encode_file('IGNORE', 'IGNORE', DummyFile("file.zip"))[2], (
b'Content-Type: application/x-compress',
b'Content-Type: application/x-zip',
b'Content-Type: application/x-zip-compressed',
b'Content-Type: application/zip',))
self.assertEqual(b'Content-Type: application/octet-stream',
encode_file('IGNORE', 'IGNORE', DummyFile("file.unknown"))[2])
@override_settings(ROOT_URLCONF='test_client_regress.urls',)
class RequestHeadersTest(TestCase):
def test_client_headers(self):
"A test client can receive custom headers"
response = self.client.get("/check_headers/", HTTP_X_ARG_CHECK='Testing 123')
self.assertEqual(response.content, b"HTTP_X_ARG_CHECK: Testing 123")
self.assertEqual(response.status_code, 200)
def test_client_headers_redirect(self):
"Test client headers are preserved through redirects"
response = self.client.get("/check_headers_redirect/", follow=True, HTTP_X_ARG_CHECK='Testing 123')
self.assertEqual(response.content, b"HTTP_X_ARG_CHECK: Testing 123")
self.assertRedirects(response, '/check_headers/',
status_code=301, target_status_code=200)
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class ReadLimitedStreamTest(TestCase):
"""
Tests that ensure that HttpRequest.body, HttpRequest.read() and
HttpRequest.read(BUFFER) have proper LimitedStream behavior.
Refs #14753, #15785
"""
def test_body_from_empty_request(self):
"""HttpRequest.body on a test client GET request should return
the empty string."""
self.assertEqual(self.client.get("/body/").content, b'')
def test_read_from_empty_request(self):
"""HttpRequest.read() on a test client GET request should return the
empty string."""
self.assertEqual(self.client.get("/read_all/").content, b'')
def test_read_numbytes_from_empty_request(self):
"""HttpRequest.read(LARGE_BUFFER) on a test client GET request should
return the empty string."""
self.assertEqual(self.client.get("/read_buffer/").content, b'')
def test_read_from_nonempty_request(self):
"""HttpRequest.read() on a test client PUT request with some payload
should return that payload."""
payload = b'foobar'
self.assertEqual(self.client.put(
"/read_all/",
data=payload,
content_type='text/plain').content, payload)
def test_read_numbytes_from_nonempty_request(self):
"""HttpRequest.read(LARGE_BUFFER) on a test client PUT request with
some payload should return that payload."""
payload = b'foobar'
self.assertEqual(
self.client.put("/read_buffer/",
data=payload,
content_type='text/plain').content, payload)
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class RequestFactoryStateTest(TestCase):
"""Regression tests for #15929."""
# These tests are checking that certain middleware don't change certain
# global state. Alternatively, from the point of view of a test, they are
# ensuring test isolation behavior. So, unusually, it doesn't make sense to
# run the tests individually, and if any are failing it is confusing to run
# them with any other set of tests.
def common_test_that_should_always_pass(self):
request = RequestFactory().get('/')
request.session = {}
self.assertFalse(hasattr(request, 'user'))
def test_request(self):
self.common_test_that_should_always_pass()
def test_request_after_client(self):
# apart from the next line the three tests are identical
self.client.get('/')
self.common_test_that_should_always_pass()
def test_request_after_client_2(self):
# This test is executed after the previous one
self.common_test_that_should_always_pass()
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class RequestFactoryEnvironmentTests(TestCase):
"""
Regression tests for #8551 and #17067: ensure that environment variables
are set correctly in RequestFactory.
"""
def test_should_set_correct_env_variables(self):
request = RequestFactory().get('/path/')
self.assertEqual(request.META.get('REMOTE_ADDR'), '127.0.0.1')
self.assertEqual(request.META.get('SERVER_NAME'), 'testserver')
self.assertEqual(request.META.get('SERVER_PORT'), '80')
self.assertEqual(request.META.get('SERVER_PROTOCOL'), 'HTTP/1.1')
self.assertEqual(request.META.get('SCRIPT_NAME') +
request.META.get('PATH_INFO'), '/path/')
|
PolicyStat/django
|
tests/test_client_regress/tests.py
|
Python
|
bsd-3-clause
| 65,886
|
[
"VisIt"
] |
0e7df31d209be5a92830ab7dbc5434ed921bbceb8c340ae6d57e0e9dcc6ab2b4
|
from models.sampler import BlockGibbsSampler
from models.distribution import GaussianBinary
from models.optimizer import SGD
from models.rbm import RBM
from data.mnist.path import *
from utils.utils import prepare_batches
from matplotlib import pyplot
import sklearn.preprocessing as pre
import pandas, numpy, time
SIZE_BATCH = 10
EPOCHS = 10
SIZE_HIDDEN = 500
SIZE_VISIBLE = 784
# load binary mnist sample dataset
dataset = pandas.read_csv(MNIST_TRAIN, delimiter=',', dtype=numpy.float64, header=None)
# leave the first column out since it contains the labels
# dataset must be normalized to have unit variance by column (sigma_i == 1)
dataset = pre.scale(dataset.values[:,1:], axis=0)
# compute batch set
idx = prepare_batches(len(dataset), SIZE_BATCH)
# load distribution
gaussian = GaussianBinary(SIZE_VISIBLE, SIZE_HIDDEN)
gibbs = BlockGibbsSampler(gaussian, sampling_steps=1)
sgd = SGD(gaussian, learning_rate=0.001, weight_decay=0, momentum=0)
rbm = RBM(gaussian, gibbs, sgd)
# pyplot.figure(1)
# pyplot.ion()
# pyplot.show()
# vmin = numpy.min(dataset)
# vmax = numpy.max(dataset)
for epoch in range(EPOCHS):
for b_idx in idx:
batch = dataset[b_idx[0]:b_idx[1], :]
d_weight_update, _, _ = rbm.train_batch(batch)
rec_probs, rec_state = rbm.reconstruct(batch,steps=10)
pyplot.clf()
img = numpy.reshape(rec_state[-1,:], newshape=(28,28))
print "Max: " + str(numpy.max(img)) + " Min: " + str(numpy.min(img))
# pyplot.hist(d_weight_update)
# pyplot.draw()
# pyplot.matshow(img, fignum=0, cmap=pyplot.cm.gray, vmin=vmin , vmax=vmax)
# pyplot.draw()
# time.sleep(0.1)
raw_input()
|
deprofundis/deprofundis
|
models/scripts/example_gaussian.py
|
Python
|
mit
| 1,682
|
[
"Gaussian"
] |
6a02348e8641fb2d52192a9747d1805e270270a12c5741a1901c62c3fe749c44
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`~openlp.core.lib.spelltextedit` module contains a classes to add spell checking to an edit widget.
"""
import logging
import re
try:
import enchant
from enchant import DictNotFoundError
from enchant.errors import Error
ENCHANT_AVAILABLE = True
except ImportError:
ENCHANT_AVAILABLE = False
# based on code from
# http://john.nachtimwald.com/2009/08/22/qplaintextedit-with-in-line-spell-check
from PyQt4 import QtCore, QtGui
from openlp.core.lib import translate, FormattingTags
from openlp.core.lib.ui import create_action
log = logging.getLogger(__name__)
class SpellTextEdit(QtGui.QPlainTextEdit):
"""
Spell checking widget based on QPlanTextEdit.
"""
def __init__(self, parent=None, formattingTagsAllowed=True):
"""
Constructor.
"""
global ENCHANT_AVAILABLE
QtGui.QPlainTextEdit.__init__(self, parent)
self.formattingTagsAllowed = formattingTagsAllowed
# Default dictionary based on the current locale.
if ENCHANT_AVAILABLE:
try:
self.dictionary = enchant.Dict()
self.highlighter = Highlighter(self.document())
self.highlighter.spellingDictionary = self.dictionary
except (Error, DictNotFoundError):
ENCHANT_AVAILABLE = False
log.debug(u'Could not load default dictionary')
def mousePressEvent(self, event):
"""
Handle mouse clicks within the text edit region.
"""
if event.button() == QtCore.Qt.RightButton:
# Rewrite the mouse event to a left button event so the cursor is
# moved to the location of the pointer.
event = QtGui.QMouseEvent(QtCore.QEvent.MouseButtonPress,
event.pos(), QtCore.Qt.LeftButton, QtCore.Qt.LeftButton, QtCore.Qt.NoModifier)
QtGui.QPlainTextEdit.mousePressEvent(self, event)
def contextMenuEvent(self, event):
"""
Provide the context menu for the text edit region.
"""
popupMenu = self.createStandardContextMenu()
# Select the word under the cursor.
cursor = self.textCursor()
# only select text if not already selected
if not cursor.hasSelection():
cursor.select(QtGui.QTextCursor.WordUnderCursor)
self.setTextCursor(cursor)
# Add menu with available languages.
if ENCHANT_AVAILABLE:
lang_menu = QtGui.QMenu(
translate('OpenLP.SpellTextEdit', 'Language:'))
for lang in enchant.list_languages():
action = create_action(lang_menu, lang, text=lang, checked=lang == self.dictionary.tag)
lang_menu.addAction(action)
popupMenu.insertSeparator(popupMenu.actions()[0])
popupMenu.insertMenu(popupMenu.actions()[0], lang_menu)
QtCore.QObject.connect(lang_menu, QtCore.SIGNAL(u'triggered(QAction*)'), self.setLanguage)
# Check if the selected word is misspelled and offer spelling
# suggestions if it is.
if ENCHANT_AVAILABLE and self.textCursor().hasSelection():
text = self.textCursor().selectedText()
if not self.dictionary.check(text):
spell_menu = QtGui.QMenu(translate('OpenLP.SpellTextEdit', 'Spelling Suggestions'))
for word in self.dictionary.suggest(text):
action = SpellAction(word, spell_menu)
action.correct.connect(self.correctWord)
spell_menu.addAction(action)
# Only add the spelling suggests to the menu if there are
# suggestions.
if spell_menu.actions():
popupMenu.insertMenu(popupMenu.actions()[0], spell_menu)
tagMenu = QtGui.QMenu(translate('OpenLP.SpellTextEdit', 'Formatting Tags'))
if self.formattingTagsAllowed:
for html in FormattingTags.get_html_tags():
action = SpellAction(html[u'desc'], tagMenu)
action.correct.connect(self.htmlTag)
tagMenu.addAction(action)
popupMenu.insertSeparator(popupMenu.actions()[0])
popupMenu.insertMenu(popupMenu.actions()[0], tagMenu)
popupMenu.exec_(event.globalPos())
def setLanguage(self, action):
"""
Changes the language for this spelltextedit.
``action``
The action.
"""
self.dictionary = enchant.Dict(action.text())
self.highlighter.spellingDictionary = self.dictionary
self.highlighter.highlightBlock(self.toPlainText())
self.highlighter.rehighlight()
def correctWord(self, word):
"""
Replaces the selected text with word.
"""
cursor = self.textCursor()
cursor.beginEditBlock()
cursor.removeSelectedText()
cursor.insertText(word)
cursor.endEditBlock()
def htmlTag(self, tag):
"""
Replaces the selected text with word.
"""
for html in FormattingTags.get_html_tags():
if tag == html[u'desc']:
cursor = self.textCursor()
if self.textCursor().hasSelection():
text = cursor.selectedText()
cursor.beginEditBlock()
cursor.removeSelectedText()
cursor.insertText(html[u'start tag'])
cursor.insertText(text)
cursor.insertText(html[u'end tag'])
cursor.endEditBlock()
else:
cursor = self.textCursor()
cursor.insertText(html[u'start tag'])
cursor.insertText(html[u'end tag'])
class Highlighter(QtGui.QSyntaxHighlighter):
"""
Provides a text highlighter for pointing out spelling errors in text.
"""
WORDS = u'(?iu)[\w\']+'
def __init__(self, *args):
"""
Constructor
"""
QtGui.QSyntaxHighlighter.__init__(self, *args)
self.spellingDictionary = None
def highlightBlock(self, text):
"""
Highlight misspelt words in a block of text
"""
if not self.spellingDictionary:
return
text = unicode(text)
charFormat = QtGui.QTextCharFormat()
charFormat.setUnderlineColor(QtCore.Qt.red)
charFormat.setUnderlineStyle(QtGui.QTextCharFormat.SpellCheckUnderline)
for word_object in re.finditer(self.WORDS, text):
if not self.spellingDictionary.check(word_object.group()):
self.setFormat(word_object.start(),
word_object.end() - word_object.start(), charFormat)
class SpellAction(QtGui.QAction):
"""
A special QAction that returns the text in a signal.
"""
correct = QtCore.pyqtSignal(unicode)
def __init__(self, *args):
"""
Constructor
"""
QtGui.QAction.__init__(self, *args)
self.triggered.connect(lambda x: self.correct.emit(self.text()))
|
marmyshev/transitions
|
openlp/core/lib/spelltextedit.py
|
Python
|
gpl-2.0
| 9,166
|
[
"Brian"
] |
b95d1fe83779415534c0c2904d427fcf66ac5cf3f7241fcbc4ca7c97f85bebfa
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import shutil
import os
import ansible.constants as C
from ansible.galaxy import Galaxy
from ansible.galaxy.role import GalaxyRole
from ansible.playbook.role.requirement import RoleRequirement
import ruamel.yaml
from ruamel.yaml.comments import CommentedMap
from .temp import MakeTempDir
from container import exceptions
from container.utils.visibility import getLogger
logger = getLogger(__name__)
ANSIBLE_CONTAINER_PATH = '/_src'
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class InCaseOfFail(object):
def __init__(self, temp_dir):
self.temp_dir = temp_dir
def __enter__(self):
for yml_file in ['container.yml', 'requirements.yml']:
if os.path.isfile(os.path.join(ANSIBLE_CONTAINER_PATH, yml_file)):
shutil.copyfile(os.path.join(ANSIBLE_CONTAINER_PATH, yml_file),
os.path.join(self.temp_dir, yml_file))
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
logger.info('Undoing changes to .yml files')
for yml_file in ['container.yml', 'requirements.yml']:
if os.path.isfile(os.path.join(self.temp_dir, yml_file)):
shutil.copyfile(
os.path.join(self.temp_dir, yml_file),
os.path.join(ANSIBLE_CONTAINER_PATH, yml_file))
class AnsibleContainerGalaxy(object):
_galaxy = None
def install(self, roles):
roles_to_install = list(roles)
with MakeTempDir() as temp_dir:
self._galaxy = Galaxy(AttrDict(api_server=C.GALAXY_SERVER,
ignore_certs=C.GALAXY_IGNORE_CERTS,
ignore_errors=False,
no_deps=False,
roles_path=[temp_dir],
token=None)) # FIXME: support tokens
roles_processed = []
role_failure = False
with InCaseOfFail(temp_dir):
while roles_to_install:
try:
role_to_install = roles_to_install.pop()
role_obj, installed = self._role_to_temp_space(role_to_install)
if installed:
deps = role_obj.metadata.get('dependencies', [])
for dep in deps:
if dep not in roles_to_install + roles_processed:
roles_to_install.append(dep)
self._update_container_yml(role_obj)
self._update_requirements_yml(role_obj)
roles_processed.append(role_to_install)
except exceptions.AnsibleContainerGalaxyFatalException as exc:
logger.error(exc)
raise
except exceptions.AnsibleContainerGalaxyRoleException as exc:
logger.error(exc)
role_failure = True
continue
if role_failure:
raise exceptions.AnsibleContainerGalaxyRoleException('One or more roles failed.')
def _role_to_temp_space(self, role_req):
role_req_kwargs = RoleRequirement.role_yaml_parse(role_req.strip())
role_obj = GalaxyRole(self._galaxy, **role_req_kwargs)
installed = role_obj.install()
return role_obj, installed
@staticmethod
def _get_container_yml_snippet(role_obj):
container_yml_path = os.path.join(role_obj.path, 'meta', 'container.yml')
snippet = None
if not os.path.exists(container_yml_path):
logger.debug('No %s found for %s, not containerized' % (container_yml_path, role_obj.name))
return snippet
try:
snippet = ruamel.yaml.round_trip_load(open(container_yml_path))
except Exception:
logger.exception('Error loading container.yml snippet for %s' % role_obj.name)
return None
logger.debug('Role %s is containerized', role_obj)
try:
assert isinstance(snippet, dict) and len(snippet) > 0
except AssertionError:
logger.exception('Role %s container.yml is malformed' % role_obj.name)
return None
return snippet
@staticmethod
def _get_knobs_and_dials(role_obj):
defaults_yml_path = os.path.join(role_obj.path, 'defaults', 'main.yml')
if os.path.exists(defaults_yml_path):
try:
defaults = ruamel.yaml.round_trip_load(open(defaults_yml_path))
except Exception as exc:
logger.exception('Error loading defaults/main.yml for %s - %s' % (role_obj.name, str(exc)))
else:
if not defaults:
defaults = CommentedMap()
return defaults
return CommentedMap()
def _update_container_yml(self, role_obj):
snippet = self._get_container_yml_snippet(role_obj)
if not snippet:
return None
container_yml_path = os.path.join(ANSIBLE_CONTAINER_PATH, 'container.yml')
try:
container_yml = ruamel.yaml.round_trip_load(open(container_yml_path))
except Exception as exc:
raise exceptions.AnsibleContainerGalaxyFatalException('Failed to load container.yml: %s' % str(exc))
if not container_yml['services']:
container_yml['services'] = {}
services = container_yml['services']
new_service_key = role_obj.name.split('.', 1)[-1]
if new_service_key in services:
raise exceptions.AnsibleContainerGalaxyRoleException(
'Role defines service %s, but container.yml already has a service with this name' % new_service_key)
# Add role name to the service's list of roles
services[new_service_key] = {}
if not services[new_service_key].get('roles'):
services[new_service_key]['roles'] = []
if role_obj.name not in services[new_service_key]['roles']:
services[new_service_key]['roles'].append(role_obj.name)
try:
ruamel.yaml.round_trip_dump(container_yml,
stream=open(container_yml_path, 'w'))
except Exception as exc:
raise exceptions.AnsibleContainerGalaxyFatalException('Error updating container.yml - %s' % str(exc))
return new_service_key
def _update_requirements_yml(self, role_obj):
requirements_yml_path = os.path.join(ANSIBLE_CONTAINER_PATH, 'requirements.yml')
requirements = None
if os.path.exists(requirements_yml_path):
try:
requirements = ruamel.yaml.round_trip_load(open(requirements_yml_path)) or []
except Exception as exc:
raise exceptions.AnsibleContainerGalaxyFatalException(
'Could not load project requirements.yml - %s' % str(exc))
if not requirements:
requirements = []
for req in requirements:
if req.get('src', '') == role_obj.src:
logger.warning('Requirement %s already found in requirements.yml' % role_obj.name)
return
role_def = {}
role_def[u'src'] = role_obj.src
if role_obj.version and role_obj.version != 'master':
role_def[u'version'] = role_obj.version
if role_obj.scm:
role_def[u'scm'] = role_obj.scm
if role_obj.name and role_obj.name != role_obj.src:
role_def[u'name'] = role_obj.name
requirements.append(role_def)
try:
ruamel.yaml.round_trip_dump(requirements,
stream=open(requirements_yml_path, 'w'))
except Exception as exc:
raise exceptions.AnsibleContainerGalaxyFatalException('Error updating requirements.yml')
|
chouseknecht/ansible-container
|
container/utils/galaxy.py
|
Python
|
lgpl-3.0
| 8,139
|
[
"Galaxy"
] |
64f98b24f5b7304d10fa16afebb3930521075a5706be67169a88f61b65617141
|
# Maked by Mr. Have fun! Version 0.2
# Shadow Weapon Coupons contributed by BiTi for the Official L2J Datapack Project
# Visit http://forum.l2jdp.com for more details
import sys
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "405_PathToCleric"
LETTER_OF_ORDER1 = 1191
LETTER_OF_ORDER2 = 1192
BOOK_OF_LEMONIELL = 1193
BOOK_OF_VIVI = 1194
BOOK_OF_SIMLON = 1195
BOOK_OF_PRAGA = 1196
CERTIFICATE_OF_GALLINT = 1197
PENDANT_OF_MOTHER = 1198
NECKLACE_OF_MOTHER = 1199
LEMONIELLS_COVENANT = 1200
MARK_OF_FAITH = 1201
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
htmltext = event
level = st.getPlayer().getLevel()
classId = st.getPlayer().getClassId().getId()
if event == "1" :
st.set("id","0")
if level >= 19 and classId == 0x0a and st.getQuestItemsCount(MARK_OF_FAITH) == 0 :
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
st.giveItems(LETTER_OF_ORDER1,1)
htmltext = "30022-05.htm"
elif classId != 0x0a :
if classId == 0x0f :
htmltext = "30022-02a.htm"
else:
htmltext = "30022-02.htm"
elif level<19 and classId == 0x0a :
htmltext = "30022-03.htm"
elif level >= 19 and classId == 0x0a and st.getQuestItemsCount(MARK_OF_FAITH) == 1 :
htmltext = "30022-04.htm"
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
if npcId != 30022 and id != STARTED : return htmltext
npcId = npc.getNpcId()
htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
id = st.getState()
if id == CREATED :
st.setState(STARTING)
st.set("cond","0")
st.set("onlyone","0")
st.set("id","0")
if npcId == 30022 and st.getInt("cond")==0 :
if st.getInt("cond")<15 :
if st.getQuestItemsCount(MARK_OF_FAITH) == 0 :
htmltext = "30022-01.htm"
return htmltext
else:
htmltext = "30022-04.htm"
else:
htmltext = "30022-04.htm"
elif npcId == 30022 and st.getInt("cond") and st.getQuestItemsCount(LETTER_OF_ORDER2)==1 and st.getQuestItemsCount(LEMONIELLS_COVENANT)==0 :
htmltext = "30022-07.htm"
elif npcId == 30022 and st.getInt("cond") and st.getQuestItemsCount(LETTER_OF_ORDER2)==1 and st.getQuestItemsCount(LEMONIELLS_COVENANT)==1 :
htmltext = "30022-09.htm"
st.takeItems(LEMONIELLS_COVENANT,1)
st.takeItems(LETTER_OF_ORDER2,1)
st.giveItems(MARK_OF_FAITH,1)
st.set("cond","0")
st.setState(COMPLETED)
st.playSound("ItemSound.quest_finish")
elif npcId == 30022 and st.getInt("cond") and st.getQuestItemsCount(LETTER_OF_ORDER1)==1 :
if st.getQuestItemsCount(BOOK_OF_VIVI) == 1 and st.getQuestItemsCount(BOOK_OF_SIMLON)>0 and st.getQuestItemsCount(BOOK_OF_PRAGA) == 1 :
htmltext = "30022-08.htm"
st.takeItems(BOOK_OF_PRAGA,1)
st.takeItems(BOOK_OF_VIVI,1)
st.takeItems(BOOK_OF_SIMLON,3)
st.takeItems(LETTER_OF_ORDER1,1)
st.giveItems(LETTER_OF_ORDER2,1)
st.set("cond","3")
else:
htmltext = "30022-06.htm"
elif npcId == 30253 and st.getInt("cond") and st.getQuestItemsCount(LETTER_OF_ORDER1)==1 :
if st.getQuestItemsCount(BOOK_OF_SIMLON) == 0 :
htmltext = "30253-01.htm"
st.giveItems(BOOK_OF_SIMLON,3)
elif st.getQuestItemsCount(BOOK_OF_SIMLON)>0 :
htmltext = "30253-02.htm"
elif npcId == 30030 and st.getInt("cond") and st.getQuestItemsCount(LETTER_OF_ORDER1)==1 :
if st.getQuestItemsCount(BOOK_OF_VIVI) == 0 :
htmltext = "30030-01.htm"
st.giveItems(BOOK_OF_VIVI,1)
elif st.getQuestItemsCount(BOOK_OF_VIVI) == 1 :
htmltext = "30030-02.htm"
elif npcId == 30333 and st.getInt("cond") and st.getQuestItemsCount(LETTER_OF_ORDER1)==1 :
if st.getQuestItemsCount(BOOK_OF_PRAGA) == 0 and st.getQuestItemsCount(NECKLACE_OF_MOTHER) == 0 :
htmltext = "30333-01.htm"
st.giveItems(NECKLACE_OF_MOTHER,1)
elif st.getQuestItemsCount(BOOK_OF_PRAGA) == 0 and st.getQuestItemsCount(NECKLACE_OF_MOTHER) == 1 and st.getQuestItemsCount(PENDANT_OF_MOTHER) == 0 :
htmltext = "30333-02.htm"
elif st.getQuestItemsCount(BOOK_OF_PRAGA) == 0 and st.getQuestItemsCount(NECKLACE_OF_MOTHER) == 1 and st.getQuestItemsCount(PENDANT_OF_MOTHER) == 1 :
htmltext = "30333-03.htm"
st.takeItems(NECKLACE_OF_MOTHER,1)
st.takeItems(PENDANT_OF_MOTHER,1)
st.giveItems(BOOK_OF_PRAGA,1)
st.set("cond","2")
elif st.getQuestItemsCount(BOOK_OF_PRAGA)>0 :
htmltext = "30333-04.htm"
elif npcId == 30408 and st.getInt("cond") :
if st.getQuestItemsCount(LETTER_OF_ORDER2) == 0 :
htmltext = "30408-02.htm"
elif st.getQuestItemsCount(LETTER_OF_ORDER2) == 1 and st.getQuestItemsCount(BOOK_OF_LEMONIELL) == 0 and st.getQuestItemsCount(LEMONIELLS_COVENANT) == 0 and st.getQuestItemsCount(CERTIFICATE_OF_GALLINT) == 0 :
htmltext = "30408-01.htm"
st.giveItems(BOOK_OF_LEMONIELL,1)
st.set("cond","4")
elif st.getQuestItemsCount(LETTER_OF_ORDER2) == 1 and st.getQuestItemsCount(BOOK_OF_LEMONIELL) == 1 and st.getQuestItemsCount(LEMONIELLS_COVENANT) == 0 and st.getQuestItemsCount(CERTIFICATE_OF_GALLINT) == 0 :
htmltext = "30408-03.htm"
elif st.getQuestItemsCount(LETTER_OF_ORDER2) == 1 and st.getQuestItemsCount(BOOK_OF_LEMONIELL) == 0 and st.getQuestItemsCount(LEMONIELLS_COVENANT) == 0 and st.getQuestItemsCount(CERTIFICATE_OF_GALLINT) == 1 :
htmltext = "30408-04.htm"
st.takeItems(CERTIFICATE_OF_GALLINT,1)
st.giveItems(LEMONIELLS_COVENANT,1)
st.set("cond","6")
elif st.getQuestItemsCount(LETTER_OF_ORDER2) == 1 and st.getQuestItemsCount(BOOK_OF_LEMONIELL) == 0 and st.getQuestItemsCount(LEMONIELLS_COVENANT) == 1 and st.getQuestItemsCount(CERTIFICATE_OF_GALLINT) == 0 :
htmltext = "30408-05.htm"
elif npcId == 30017 and st.getInt("cond") and st.getQuestItemsCount(LETTER_OF_ORDER2)==1 and st.getQuestItemsCount(LEMONIELLS_COVENANT)==0 :
if st.getQuestItemsCount(BOOK_OF_LEMONIELL) == 1 and st.getQuestItemsCount(CERTIFICATE_OF_GALLINT) == 0 :
htmltext = "30017-01.htm"
st.takeItems(BOOK_OF_LEMONIELL,1)
st.giveItems(CERTIFICATE_OF_GALLINT,1)
st.set("cond","5")
elif st.getQuestItemsCount(BOOK_OF_LEMONIELL) == 0 and st.getQuestItemsCount(CERTIFICATE_OF_GALLINT) == 1 :
htmltext = "30017-02.htm"
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != STARTED : return
npcId = npc.getNpcId()
if npcId == 20026 :
st.set("id","0")
if st.getInt("cond") and st.getQuestItemsCount(PENDANT_OF_MOTHER) == 0 :
st.giveItems(PENDANT_OF_MOTHER,1)
st.playSound("ItemSound.quest_middle")
elif npcId == 20029 :
st.set("id","0")
if st.getInt("cond") and st.getQuestItemsCount(PENDANT_OF_MOTHER) == 0 :
st.giveItems(PENDANT_OF_MOTHER,1)
st.playSound("ItemSound.quest_middle")
return
QUEST = Quest(405,qn,"Path To Cleric")
CREATED = State('Start', QUEST)
STARTING = State('Starting', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(30022)
QUEST.addTalkId(30022)
QUEST.addTalkId(30017)
QUEST.addTalkId(30030)
QUEST.addTalkId(30253)
QUEST.addTalkId(30333)
QUEST.addTalkId(30408)
QUEST.addKillId(20026)
QUEST.addKillId(20029)
STARTED.addQuestDrop(30408,LEMONIELLS_COVENANT,1)
STARTED.addQuestDrop(30022,LETTER_OF_ORDER2,1)
STARTED.addQuestDrop(30333,BOOK_OF_PRAGA,1)
STARTED.addQuestDrop(30030,BOOK_OF_VIVI,1)
STARTED.addQuestDrop(30253,BOOK_OF_SIMLON,1)
STARTED.addQuestDrop(30022,LETTER_OF_ORDER1,1)
STARTED.addQuestDrop(30333,NECKLACE_OF_MOTHER,1)
STARTED.addQuestDrop(20026,PENDANT_OF_MOTHER,1)
STARTED.addQuestDrop(20029,PENDANT_OF_MOTHER,1)
STARTED.addQuestDrop(30017,CERTIFICATE_OF_GALLINT,1)
STARTED.addQuestDrop(30408,BOOK_OF_LEMONIELL,1)
|
zenn1989/scoria-interlude
|
L2Jscoria-Game/data/scripts/quests/405_PathToCleric/__init__.py
|
Python
|
gpl-3.0
| 8,825
|
[
"VisIt"
] |
ca4a001c43f18fb4479fff5c97ff105ca90fb887c04bfe6dd0f54946725d5cd2
|
"""
AGNEXTRACT
extract galaxy spectra from image or images and combine them
"""
import os, sys, glob, shutil
import numpy as np
from scipy.ndimage.filters import median_filter
from astropy.io import fits
from PySpectrograph.Spectra import findobj, Spectrum
import argparse
import pylab as pl
def extract(data, error, mask, wave, y1, y2):
"""Extract the spectra
Parameters
----------
data: numpy.ndarray
Flux data for spectra
error: numpy.ndarray
error data for spectra
mask: numpy.ndarray
mask data for spectra
wave: numpy.ndarray
wavelength map for spectra
Returns
-------
"""
# estimate the wavelength range to be used
ys, xs = data.shape
w = wave[int(ys/2.0),:]
wmin = w[w>0].min()
wmax = w[w>0].max()
wmask = ((w > wmin) * (w < wmax))
wbin = (wmax-wmin)/wmask.sum()
w = np.arange(wmin, wmax, wbin)
#estimate the sky
s = np.zeros_like(w)
count = 0
dy = y2-y1
for y in range(y1-2*dy, y1-dy) + range(y2+dy, y2+2*dy):
s += np.interp(w, wave[y, wmask], data[y, wmask])
count += 1
s = s / count
# extract the spectra
f = np.zeros_like(w)
e = np.zeros_like(w)
b = np.zeros_like(w)
for y in range(y1, y2):
f += np.interp(w, wave[y, wmask], data[y, wmask]) - s
# not exactly correct but estimate
e += np.interp(w, wave[y, wmask], error[y, wmask])**2
b += np.interp(w, wave[y, wmask], mask[y, wmask])
e = e**0.5
b = 1.0 * ( b > 0 )
b = b.astype('uint8')
return w, f, e, b, wbin
def write_spectra(wave, sci_ow, var_ow, badbin_ow, header, wbin, outfile):
"""Write out the spectra in the correct format
"""
header.update('VAREXT',2)
header.update('BPMEXT',3)
header.update('CRVAL1',wave[0]+wbin/2.) # this needs to be fixed
header.update('CRVAL2',0)
header.update('CDELT1',wbin) # this needs to be fixed
header.update('CTYPE1','Angstroms')
hduout = fits.PrimaryHDU(header=header)
hduout = fits.HDUList(hduout)
#what's wavs?
#what's the initial shape?
hduout.append(fits.ImageHDU(data=sci_ow, header=header, name='SCI'))
header.update('SCIEXT',1,'Extension for Science Frame',before='VAREXT')
hduout.append(fits.ImageHDU(data=var_ow, header=header, name='VAR'))
hduout.append(fits.ImageHDU(data=badbin_ow, header=header, name='BPM'))
hduout.writeto(outfile,clobber=True,output_verify='warn')
if __name__=='__main__':
calfile=None
parser = argparse.ArgumentParser(description='Extract SALT Polarimetric data')
parser.add_argument('image', help='Image to extract the spectra', nargs='*')
parser.add_argument('--yo', dest='yo', type=int, help='y position of o beam')
parser.add_argument('--ye', dest='ye', type=int, help='y position of e beam')
parser.add_argument('--dy', dest='dy', type=int, help='y width of psf')
#parser.add_argument('-w', dest='basic_wave', default=True, action='store_false',
# help='Skip wavelength calibration')
args = parser.parse_args()
thresh = 5
convert = False
for img in args.image:
hdu = fits.open(img)
y1 = args.yo - args.dy
y2 = args.yo + args.dy
o = 0
data = hdu[1].data[o]
error = hdu[2].data[o]
mask = hdu[3].data[o]
wave = hdu[4].data[o]
wo, fo, eo, bo, wbin = extract(data, error, mask, wave, y1, y2)
y1 = args.ye - args.dy
y2 = args.ye + args.dy
o = 1
data = hdu[1].data[o]
error = hdu[2].data[o]
mask = hdu[3].data[o]
wave = hdu[4].data[o]
we, fe, ee, be, wbin = extract(data, error, mask, wave, y1, y2)
sci_list = [[fo], [fe]]
err_list = [[eo], [ee]]
bad_list = [[bo], [be]]
w = wo
write_spectra(w, sci_list, err_list, bad_list, hdu[0].header, wbin, 'e' + img)
exit()
#pl.figure()
#pl.axes([0.1, 0.7, 0.8, 0.25])
#pl.plot(wo, fe)
#pl.axes([0.1, 0.4, 0.8, 0.25])
#pl.plot(wo, ee)
##pl.axes([0.1, 0.1, 0.8, 0.25])
#pl.plot(wo, be)
#pl.show()
|
saltastro/polsalt
|
scripts/pol_extract.py
|
Python
|
bsd-3-clause
| 4,155
|
[
"Galaxy"
] |
de07c3edc1ea97c8973b2488d0b494a86cef41db364e4e3ff45e2fcc3ba81507
|
import matplotlib.pyplot as plt
#%matplotlib inline
import nengo
import numpy as np
import scipy.ndimage
import matplotlib.animation as animation
from matplotlib import pylab
from PIL import Image
import nengo.spa as spa
import cPickle
from nengo_extras.data import load_mnist
from nengo_extras.vision import Gabor, Mask
#Encode categorical integer features using a one-hot aka one-of-K scheme.
def one_hot(labels, c=None):
assert labels.ndim == 1
n = labels.shape[0]
c = len(np.unique(labels)) if c is None else c
y = np.zeros((n, c))
y[np.arange(n), labels] = 1
return y
rng = np.random.RandomState(9)
# --- load the data
img_rows, img_cols = 28, 28
(X_train, y_train), (X_test, y_test) = load_mnist()
X_train = 2 * X_train - 1 # normalize to -1 to 1
X_test = 2 * X_test - 1 # normalize to -1 to 1
train_targets = one_hot(y_train, 10)
test_targets = one_hot(y_test, 10)
# --- set up network parameters
#Want to encode and decode the image
n_vis = X_train.shape[1]
n_out = X_train.shape[1]
#number of neurons/dimensions of semantic pointer
n_hid = 5000 #Try with more neurons for more accuracy
#n_hid = 1000
#Want the encoding/decoding done on the training images
ens_params = dict(
eval_points=X_train,
neuron_type=nengo.LIFRate(), #Why not use LIF?
intercepts=nengo.dists.Choice([-0.5]),
max_rates=nengo.dists.Choice([100]),
)
#Least-squares solver with L2 regularization.
solver = nengo.solvers.LstsqL2(reg=0.01)
#solver = nengo.solvers.LstsqL2(reg=0.0001)
solver2 = nengo.solvers.LstsqL2(reg=0.01)
#network that
with nengo.Network(seed=3) as model:
a = nengo.Ensemble(n_hid, n_vis, seed=3, **ens_params)
v = nengo.Node(size_in=n_out)
conn = nengo.Connection(
a, v, synapse=None,
eval_points=X_train, function=X_train,#want the same thing out
solver=solver)
'''
v2 = nengo.Node(size_in=train_targets.shape[1])
conn2 = nengo.Connection(
a, v2, synapse=None,
eval_points=X_train, function=train_targets, #Want to get the labels out
solver=solver2)
'''
def get_outs(sim, images):
_, acts = nengo.utils.ensemble.tuning_curves(a, sim, inputs=images)
return np.dot(acts, sim.data[conn2].weights.T)
'''
def get_error(sim, images, labels):
return np.argmax(get_outs(sim, images), axis=1) != labels
def get_labels(sim,images):
return np.argmax(get_outs(sim, images), axis=1)
'''
#Get the neuron activity of an image or group of images (this is the semantic pointer in this case)
def get_activities(sim, images):
_, acts = nengo.utils.ensemble.tuning_curves(a, sim, inputs=images)
return acts
def get_encoder_outputs(sim,images):
outs = np.dot(images,sim.data[a].encoders.T) #before the neurons Why transpose?
return outs
def intense(img):
newImg = img.copy()
newImg[newImg < 0] = -1
newImg[newImg > 0] = 1
return newImg
def filtered(img):
return intense(scipy.ndimage.gaussian_filter(img, sigma=1))
#Images to train, starting at random orientation
orig_imgs = X_train[:100000].copy()
for img in orig_imgs:
img[:] = filtered(scipy.ndimage.interpolation.rotate(np.reshape(img,(28,28)),
(np.random.randint(360)),reshape=False,mode="nearest").ravel())
degrees = -6
#Images rotated a fixed amount from the original random orientation
rotated_imgs =orig_imgs.copy()
for img in rotated_imgs:
img[:] = filtered(scipy.ndimage.interpolation.rotate(np.reshape(img,(28,28)),degrees,reshape=False,mode="nearest").ravel())
#^encoder outputs
#Add noise
for img in orig_imgs:
noise = np.random.random([28,28]).ravel()
img[:] = img + noise
'''#Images not used for training, but for testing (all at random orientations)
test_imgs = X_test[:1000].copy()
for img in test_imgs:
img[:] = scipy.ndimage.interpolation.rotate(np.reshape(img,(28,28)),
(np.random.randint(360)),reshape=False,mode="nearest").ravel()
'''
# linear filter used for edge detection as encoders, more plausible for human visual system
encoders = Gabor().generate(n_hid, (11, 11), rng=rng)
encoders = Mask((28, 28)).populate(encoders, rng=rng, flatten=True)
#Set the ensembles encoders to this
a.encoders = encoders
#Check the encoders were correctly made
#plt.imshow(encoders[0].reshape(28, 28), vmin=encoders[0].min(), vmax=encoders[0].max(), cmap='gray')
with nengo.Simulator(model) as sim:
#Neuron activities of different mnist images
#The semantic pointers
orig_acts = get_activities(sim,orig_imgs)
#rotated_acts = get_activities(sim,rotated_imgs)
#test_acts = get_activities(sim,test_imgs)
#X_test_acts = get_activities(sim,X_test)
#labels_out = get_outs(sim,X_test)
rotated_after_encoders = get_encoder_outputs(sim,rotated_imgs)
#original_after_encoders = get_encoder_outputs(sim,orig_imgs)
#solvers for a learning rule
#solver_tranform = nengo.solvers.LstsqL2(reg=1e-8)
#solver_word = nengo.solvers.LstsqL2(reg=1e-8)
solver_rotate_encoder = nengo.solvers.LstsqL2(reg=1e-8)
#solver_identity_encoder = nengo.solvers.LstsqL2(reg=1e-8)
#find weight matrix between neuron activity of the original image and the rotated image
#weights returns a tuple including information about learning process, just want the weight matrix
#weights,_ = solver_tranform(orig_acts, rotated_acts)
#find weight matrix between labels and neuron activity
#label_weights,_ = solver_word(labels_out,X_test_acts)
rotated_after_encoder_weights,_ = solver_rotate_encoder(orig_acts,rotated_after_encoders)
#identity_after_encoder_weights,_ = solver_identity_encoder(orig_acts,original_after_encoders)
#filename = "label_weights_clockwise" + str(n_hid) +".p"
#cPickle.dump(label_weights, open( filename, "wb" ) )
#filename = "activity_to_img_weights_clockwise" + str(n_hid) +".p"
#cPickle.dump(sim.data[conn].weights.T, open( filename, "wb" ) )
#filename = "rotation_weights_clockwise" + str(n_hid) +".p"
#cPickle.dump(weights, open( filename, "wb" ) )
filename = "rotated_after_encoder_weights_clockwise_filter_noise" + str(n_hid) +".p"
cPickle.dump(rotated_after_encoder_weights, open( filename, "wb" ) )
#filename = "identity_after_encoder_weights" + str(n_hid) +".p"
#cPickle.dump(identity_after_encoder_weights, open( filename, "wb" ) )
|
science-of-imagination/nengo-buffer
|
Project/mental_rotation_training_clockwise_noise.py
|
Python
|
gpl-3.0
| 6,433
|
[
"NEURON"
] |
7e33fe5e3fac24d45092da3e349fe2b16716404d4faad2df456129f2de0bf56c
|
"""PyZMQ and 0MQ version functions."""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import re
from .backend import zmq_version_info
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
__version__ = '13.1.0'
__revision__ = ''
def pyzmq_version():
"""return the version of pyzmq as a string"""
if __revision__:
return '@'.join([__version__,__revision__[:6]])
else:
return __version__
def pyzmq_version_info():
"""return the pyzmq version as a tuple of numbers
If pyzmq is a dev version, the patch-version will be `inf`.
This helps comparison of version tuples in Python 3, where str-int
comparison is no longer legal for some reason.
"""
parts = re.findall('[0-9]+', __version__)
parts = [ int(p) for p in parts ]
if 'dev' in __version__:
parts.append(float('inf'))
return tuple(parts)
def zmq_version():
"""return the version of libzmq as a string"""
return "%i.%i.%i" % zmq_version_info()
__all__ = ['zmq_version', 'zmq_version_info',
'pyzmq_version','pyzmq_version_info',
'__version__', '__revision__'
]
|
IsCoolEntertainment/debpkg_python-pyzmq
|
zmq/sugar/version.py
|
Python
|
lgpl-3.0
| 1,754
|
[
"Brian"
] |
d2c362d128d6a23bb9cd1792a278622b964a046eccdf9dcf915c088e8e5301c3
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from benchmarklib.charts import views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include('benchmarklib.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^charts/', include('benchmarklib.charts.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
|
frRoy/Benchmarklib
|
config/urls.py
|
Python
|
bsd-3-clause
| 1,489
|
[
"VisIt"
] |
fb5949358fe931a0e2b88ed2110a6e7a6a6be020d45d1be63ed66aa0c23e60e9
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2011, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 or (at your
# option) any later version as published by the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
from Products.ZenUtils.Ext import DirectRouter, DirectResponse
from Products import Zuul
from Products.ZenMessaging.audit import audit
class CloudStackRouter(DirectRouter):
def _getFacade(self):
return Zuul.getFacade('cloudstack', self.context)
def add_cloudstack(self, device_name, url, api_key, secret_key,collector='localhost'):
facade = self._getFacade()
success = facade.add_cloudstack(
device_name, url, api_key, secret_key,collector)
audit('UI.Cloudstack.Add', url=url, collector=collector)
if success:
return DirectResponse.succeed()
else:
return DirectResponse.fail("Failed to add CloudStack device: %s" % device_name)
|
zenoss/ZenPacks.zenoss.CloudStack
|
ZenPacks/zenoss/CloudStack/routers.py
|
Python
|
gpl-2.0
| 1,285
|
[
"VisIt"
] |
3875998c7fde1dd4871f1bb06a5549f1a06a8f0c84d8da35266ff253b276d577
|
from datetime import date
EVENTS = {
"2017 Big Fun Run Liverpool": ("5k", date(2017, 8, 12)),
"2017 Dog Jog Liverpool": ("5k", date(2017, 8, 12)),
"2017 Big Fun Run Sheffield": ("5k", date(2017, 8, 13)),
"2017 Dog Jog Sheffield": ("5k", date(2017, 8, 13)),
"2017 Big Fun Run Derby": ("5k", date(2017, 8, 19)),
"2017 Dog Jog Derby": ("5k", date(2017, 8, 19)),
"2017 Big Fun Run Manchester": ("5k", date(2017, 8, 20)),
"2017 Dog Jog Manchester": ("5k", date(2017, 8, 20)),
"2017 Big Fun Run Birmingham": ("5k", date(2017, 9, 9)),
"2017 Dog Jog Birmingham": ("5k", date(2017, 9, 9)),
"2017 Big Fun Run Coventry": ("5k", date(2017, 9, 10)),
"2017 Dog Jog Coventry": ("5k", date(2017, 9, 10)),
"2017 Scottish 10K": ("10k", date(2017, 9, 24)),
"2017 Scottish Half Marathon": ("half", date(2017, 9, 24)),
"2017 BMF Supersonic 10K": ("10k", date(2017, 10, 7)),
"2017 BMF Supernova 5K": ("5k", date(2017, 10, 7)),
"2017 Bournemouth Half Marathon": ("half", date(2017, 10, 8)),
"2017 Bournemouth Marathon": ("full", date(2017, 10, 8)),
"2017 Big Fun Run Ipswich": ("5k", date(2017, 10, 14)),
"2017 Dog Jog Ipswich": ("5k", date(2017, 10, 14)),
"2017 Big Fun Run Milton Keynes": ("5k", date(2017, 10, 15)),
"2017 Dog Jog Milton Keynes": ("5k", date(2017, 10, 15)),
"2017 Big Fun Run Newcastle": ("5k", date(2017, 10, 22)),
"2017 Big Fun Run London(Crystal Palace Park)": ("5k", date(2017, 10, 28)),
"2017 Dog Jog London(Crystal Palace Park)": ("5k", date(2017, 10, 28)),
"2017 Big Fun Run London(Victoria Park)": ("5k", date(2017, 10, 29)),
"2017 Dog Jog London(Victoria Park)": ("5k", date(2017, 10, 29)),
"2017 Men's 10K Edinburgh": ("10k", date(2017, 11, 5)),
"2017 Supernova Kelpies - Friday": ("5k", date(2017, 11, 10)),
"2017 Supernova Kelpies - Saturday": ("5k", date(2017, 11, 11)),
"2017 Supernova Kelpies - Sunday": ("5k", date(2017, 11, 12)),
"2018 Supernova London": ("5k", date(2018, 3, 31)),
"2018 EMF 10k": ("10k", date(2018, 5, 26)),
"2018 EMF 5k": ("5k", date(2018, 5, 26)),
"2018 Edinburgh Half Marathon": ("half", date(2018, 5, 27)),
"2018 Edinburgh Marathon": ("full", date(2018, 5, 27)),
"2018 Men's 10K Glasgow": ("10k", date(2018, 6, 17)),
"2018 BMF Supersonic 10K": ("10k", date(2018, 10, 6)),
"2018 BMF Supernova 5K": ("5k", date(2018, 10, 6)),
"2018 Bournemouth Half Marathon": ("half", date(2018, 10, 7)),
"2018 Bournemouth Marathon": ("full", date(2018, 10, 7)),
"2018 Men's 10K Edinburgh": ("10k", date(2018, 11, 4)),
"2018 Supernova Kelpies - Friday": ("5k", date(2018, 11, 9)),
"2018 Supernova Kelpies - Saturday": ("5k", date(2018, 11, 10)),
"2018 Supernova Kelpies - Sunday": ("5k", date(2018, 11, 11)),
}
|
benranderson/training-plan
|
app/main/events.py
|
Python
|
mit
| 2,809
|
[
"CRYSTAL"
] |
0a50d698f6b02b9aae05f8d1f3e0b2a540840c261535897d2da97d286f4089d2
|
# Copyright 2007 by Tiago Antao <tiagoantao@gmail.com>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""This module allows you to control fdist (DEPRECATED).
This will allow you to call fdist and associated programs (cplot,
datacal, pv) by Mark Beaumont.
http://www.rubic.rdg.ac.uk/~mab/software.html (old)
http://www.maths.bris.ac.uk/~mamab/ (new)
"""
import os
import subprocess
import sys
from random import randint
from time import strftime, clock
# from logging import debug
def my_float(f):
# Because of Jython, mostly
if f == "-nan":
f = "nan"
return float(f)
class FDistController(object):
def __init__(self, fdist_dir='', ext=None):
"""Initializes the controller.
fdist_dir is the directory where fdist2 is.
ext is the extension of binaries (.exe on windows,
none on Unix)
"""
self.tmp_idx = 0
self.fdist_dir = fdist_dir
self.os_name = os.name
if sys.platform == 'win32':
py_ext = '.exe'
else:
py_ext = ''
if ext is None:
self.ext = py_ext
else:
self.ext = ext
def _get_path(self, app):
"""Returns the path to an fdist application.
Includes Path where fdist can be found plus executable extension.
"""
if self.fdist_dir == '':
return app + self.ext
else:
return os.sep.join([self.fdist_dir, app]) + self.ext
def _get_temp_file(self):
"""Gets a temporary file name.
Returns a temporary file name, if executing inside jython
tries to replace unexisting tempfile.mkstemp().
"""
self.tmp_idx += 1
return strftime("%H%M%S") + str(int(clock() * 100)) + str(randint(0, 1000)) + str(self.tmp_idx)
def run_datacal(self, data_dir='.', version=1,
crit_freq=0.99, p=0.5, beta=(0.25, 0.25)):
"""Executes datacal.
data_dir - Where the data is found.
"""
if version == 1:
datacal_name = "datacal"
else:
datacal_name = "Ddatacal"
proc = subprocess.Popen([self._get_path(datacal_name)],
universal_newlines=True,
stdin=subprocess.PIPE,
shell=(sys.platform != "win32"),
stdout=subprocess.PIPE, cwd=data_dir)
if version == 1:
out, err = proc.communicate('a\n')
lines = out.split("\n")
fst_line = lines[0].rstrip().split(' ')
fst = my_float(fst_line[4])
sample_line = lines[1].rstrip().split(' ')
sample = int(sample_line[9])
else:
out, err = proc.communicate('%f\n%f\n%f %f\na\n' % (
crit_freq, p, beta[0], beta[1]))
lines = out.split("\n")
l = lines[0].rstrip().split(" ")
loci, pops = int(l[-5]), int(l[-2])
fst_line = lines[1].rstrip().split(' ')
fst = my_float(fst_line[4])
sample_line = lines[2].rstrip().split(' ')
sample = int(sample_line[9])
F_line = lines[3].rstrip().split(' ')
F, obs = my_float(F_line[5]), int(F_line[8])
if version == 1:
return fst, sample
else:
return fst, sample, loci, pops, F, obs
def _generate_intfile(self, data_dir):
"""Generates an INTFILE.
Parameter:
data_dir - data directory
"""
inf = open(data_dir + os.sep + 'INTFILE', 'w')
for i in range(98):
inf.write(str(randint(-2 ** 31 + 1, 2 ** 31 - 1)) + '\n')
inf.write('8\n')
inf.close()
def run_fdist(self, npops, nsamples, fst, sample_size,
mut=0, num_sims=50000, data_dir='.',
is_dominant=False, theta=0.06, beta=(0.25, 0.25),
max_freq=0.99):
"""Executes (d)fdist.
Parameters:
- npops - Number of populations
- nsamples - Number of populations sampled
- fst - expected Fst
- sample_size - Sample size per population
For dfdist: if zero a sample size file has to be provided
- mut - 1=Stepwise, 0=Infinite allele
- num_sims - number of simulations
- data_dir - Where the data is found
- is_dominant - If true executes dfdist
- theta - Theta (=2Nmu)
- beta - Parameters for the beta prior
- max_freq - Maximum allowed frequency of the commonest allele
Returns:
- fst - Average Fst
Important Note: This can take quite a while to run!
"""
if fst >= 0.9:
# Lets not joke
fst = 0.899
if fst <= 0.0:
# 0 will make fdist run forever
fst = 0.001
if is_dominant:
config_name = "Dfdist_params"
else:
config_name = "fdist_params2.dat"
f = open(data_dir + os.sep + config_name, 'w')
f.write(str(npops) + '\n')
f.write(str(nsamples) + '\n')
f.write(str(fst) + '\n')
f.write(str(sample_size) + '\n')
if is_dominant:
f.write(str(theta) + '\n')
else:
f.write(str(mut) + '\n')
f.write(str(num_sims) + '\n')
if is_dominant:
f.write("%f %f\n" % beta)
f.write("%f\n" % max_freq)
f.close()
self._generate_intfile(data_dir)
if is_dominant:
bin_name = "Dfdist"
else:
bin_name = "fdist2"
proc = subprocess.Popen([self._get_path(bin_name)], cwd=data_dir,
universal_newlines=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
shell=(sys.platform != "win32"))
out, err = proc.communicate('y\n\n')
lines = out.split("\n")
for line in lines:
if line.startswith('average Fst'):
fst = my_float(line.rstrip().split(' ')[-1])
return fst
def run_fdist_force_fst(self, npops, nsamples, fst, sample_size,
mut=0, num_sims=50000, data_dir='.',
try_runs=5000, limit=0.001, is_dominant=False,
theta=0.06, beta=(0.25, 0.25),
max_freq=0.99):
"""Executes fdist trying to force Fst.
Parameters:
- try_runs - Number of simulations on the part trying to get
Fst correct
- limit - Interval limit
Other parameters can be seen on run_fdist.
"""
max_run_fst = 1
min_run_fst = 0
current_run_fst = fst
while True:
real_fst = self.run_fdist(npops, nsamples, current_run_fst,
sample_size, mut, try_runs, data_dir,
is_dominant, theta, beta, max_freq)
if abs(real_fst - fst) < limit:
return self.run_fdist(npops, nsamples, current_run_fst,
sample_size, mut, num_sims, data_dir,
is_dominant, theta, beta, max_freq)
if real_fst > fst:
max_run_fst = current_run_fst
if current_run_fst < min_run_fst + limit:
# we can do no better
# debug('Lower limit is ' + str(min_run_fst))
return self.run_fdist(npops, nsamples, current_run_fst,
sample_size, mut, num_sims,
data_dir)
current_run_fst = (min_run_fst + current_run_fst) / 2
else:
min_run_fst = current_run_fst
if current_run_fst > max_run_fst - limit:
return self.run_fdist(npops, nsamples, current_run_fst,
sample_size, mut, num_sims,
data_dir, is_dominant, theta,
beta, max_freq)
current_run_fst = (max_run_fst + current_run_fst) / 2
def run_cplot(self, ci=0.95, data_dir='.', version=1, smooth=0.04):
"""Executes cplot.
ci - Confidence interval.
data_dir - Where the data is found.
"""
self._generate_intfile(data_dir)
if version == 1:
cplot_name = "cplot"
else:
cplot_name = "cplot2"
proc = subprocess.Popen([self._get_path(cplot_name)], cwd=data_dir,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
shell=(sys.platform != "win32"),
universal_newlines=True)
if version == 1:
proc.communicate('out.dat out.cpl\n' + str(ci) + '\n')
else:
proc.communicate("\n".join([
"data_fst_outfile out.cpl out.dat",
str(ci), str(smooth)]))
f = open(data_dir + os.sep + 'out.cpl')
conf_lines = []
l = f.readline()
try:
while l != '':
conf_lines.append(
tuple(my_float(x) for x in l.rstrip().split(' ')))
l = f.readline()
except ValueError:
f.close()
return []
f.close()
return conf_lines
def run_pv(self, out_file='probs.dat', data_dir='.',
version=1, smooth=0.04):
"""Executes pv.
out_file - Name of output file.
data_dir - Where the data is found.
"""
self._generate_intfile(data_dir)
if version == 1:
pv_name = "pv"
else:
pv_name = "pv2"
proc = subprocess.Popen([self._get_path(pv_name)], cwd=data_dir,
shell=(sys.platform != "win32"),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
proc.communicate('data_fst_outfile ' + out_file +
' out.dat\n' + str(smooth) + '\n')
pvf = open(data_dir + os.sep + out_file, 'r')
result = [tuple(my_float(y) for y in x.rstrip().split(' ')) for x in pvf.readlines()]
pvf.close()
return result
|
zjuchenyuan/BioWeb
|
Lib/Bio/PopGen/FDist/Controller.py
|
Python
|
mit
| 10,699
|
[
"Biopython"
] |
8fcd55bddb081a96051358feb8acf0fd7262fed61221000d1c80a8c1a5c8ecff
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The minc module provides classes for interfacing with the `MINC
<http://www.bic.mni.mcgill.ca/ServicesSoftware/MINC>`_ command line tools. This
module was written to work with MINC version 2.2.00.
Author: Carlo Hamalainen <carlo@carlo-hamalainen.net>
http://carlo-hamalainen.net
"""
from ..base import (
TraitedSpec,
CommandLineInputSpec,
CommandLine,
StdOutCommandLineInputSpec,
StdOutCommandLine,
File,
Directory,
InputMultiPath,
OutputMultiPath,
traits,
isdefined,
)
import glob
import os
import os.path
import re
import warnings
warn = warnings.warn
warnings.filterwarnings('always', category=UserWarning)
def check_minc():
"""Returns True if and only if MINC is installed.'
"""
return Info.version() is not None
def no_minc():
"""Returns True if and only if MINC is *not* installed.
"""
return not check_minc()
class Info(object):
"""Handle MINC version information.
version refers to the version of MINC on the system
"""
@staticmethod
def version():
"""Check for minc version on the system
Parameters
----------
None
Returns
-------
version : dict
Version number as dict or None if MINC not found
"""
try:
clout = CommandLine(command='mincinfo',
args='-version',
terminal_output='allatonce').run()
except IOError:
return None
out = clout.runtime.stdout
def read_program_version(s):
if 'program' in s:
return s.split(':')[1].strip()
return None
def read_libminc_version(s):
if 'libminc' in s:
return s.split(':')[1].strip()
return None
def read_netcdf_version(s):
if 'netcdf' in s:
return ' '.join(s.split(':')[1:]).strip()
return None
def read_hdf5_version(s):
if 'HDF5' in s:
return s.split(':')[1].strip()
return None
versions = {'minc': None,
'libminc': None,
'netcdf': None,
'hdf5': None, }
for l in out.split('\n'):
for (name, f) in [('minc', read_program_version),
('libminc', read_libminc_version),
('netcdf', read_netcdf_version),
('hdf5', read_hdf5_version), ]:
if f(l) is not None:
versions[name] = f(l)
return versions
def aggregate_filename(files, new_suffix):
"""
Try to work out a sensible name given a set of files that have
been combined in some way (e.g. averaged). If we can't work out a
sensible prefix, we use the first filename in the list.
Examples
--------
>>> from nipype.interfaces.minc.base import aggregate_filename
>>> f = aggregate_filename(['/tmp/foo1.mnc', '/tmp/foo2.mnc', '/tmp/foo3.mnc'], 'averaged')
>>> os.path.split(f)[1] # This has a full path, so just check the filename.
'foo_averaged.mnc'
>>> f = aggregate_filename(['/tmp/foo1.mnc', '/tmp/blah1.mnc'], 'averaged')
>>> os.path.split(f)[1] # This has a full path, so just check the filename.
'foo1_averaged.mnc'
"""
path = os.path.split(files[0])[0]
names = [os.path.splitext(os.path.split(x)[1])[0] for x in files]
common_prefix = os.path.commonprefix(names)
path = os.getcwd()
if common_prefix == '':
return os.path.abspath(os.path.join(path, os.path.splitext(files[0])[0] + '_' + new_suffix + '.mnc'))
else:
return os.path.abspath(os.path.join(path, common_prefix + '_' + new_suffix + '.mnc'))
|
FCP-INDI/nipype
|
nipype/interfaces/minc/base.py
|
Python
|
bsd-3-clause
| 3,938
|
[
"NetCDF"
] |
dc79301eb57a9d0805b183647d66044a1a04ac48da2eb5ff06eacee81536b979
|
# GromacsWrapper: xpm.py
# Copyright (c) 2012 Oliver Beckstein <orbeckst@gmail.com>
# Copyright (c) 2010 Tsjerk Wassenaar <tsjerkw@gmail.com>
# Released under the GNU Public License 3 (or higher, your choice)
# See the file COPYING for details.
"""
Gromacs XPM file format
=======================
Gromacs stores matrix data in the xpm file format. This implementation
of a Python reader is based on Tsjerk Wassenaar's post to gmx-users
`numerical matrix from xpm file`_ (Mon Oct 4 13:05:26 CEST 2010). This
version returns a NumPy array and can guess an appropriate dtype for
the array.
.. _numerical matrix from xpm file:
http://lists.gromacs.org/pipermail/gmx-users/2010-October/054557.html
Classes
-------
.. autoclass:: XPM
:members:
.. attribute:: xvalues
Values of on the x-axis, extracted from the xpm file.
.. attribute:: yvalues
Values of on the y-axis, extracted from the xpm file. These are
in the same order as the rows in the xpm matrix. If *reverse* =
``False`` then this is typically a *descending* list of numbers
(highest to lowest residue number, index number, etc). For
*reverse* = ``True`` it is resorted accordingly.
Example: Analysing H-bonds
--------------------------
Run :func:`gromacs.g_hbond` to produce the existence map (and the log
file for the atoms involved in the bonds; the ndx file is also
useful)::
gromacs.g_hbond(s=TPR, f=XTC, g="hbond.log", hbm="hb.xpm", hbn="hb.ndx")
Load the XPM::
hb = XPM("hb.xpm", reverse=True)
Calculate the fraction of time that each H-bond existed::
hb_fraction = hb.array.mean(axis=0)
Get the descriptions of the bonds::
desc = [line.strip() for line in open("hbond.log") if not line.startswith('#')]
.. Note::
It is important that ``reverse=True`` is set so that the rows in
the xpm matrix are brought in the same order as the H-bond
labels.
Show the results::
print "\\n".join(["%-40s %4.1f%%" % p for p in zip(desc, 100*hb_fraction)])
"""
from __future__ import absolute_import, with_statement
from six.moves import range
import os, errno
import re
import warnings
import numpy
from ..exceptions import ParseError, AutoCorrectionWarning
from .. import utilities
from .convert import Autoconverter
import logging
class XPM(utilities.FileUtils):
"""Class to make a Gromacs XPM matrix available as a NumPy :class:`numpy.ndarray`.
The data is available in the attribute :attr:`XPM.array`.
.. Note::
By default, the rows (2nd dimension) in the :attr:`XPM.array`
are re-ordered so that row 0 (i.e. ``array[:,0]`` corresponds
to the first residue/hydrogen bond/etc. The original xpm matrix
is obtained for *reverse* = ``False``. The :class:`XPM` reader
always reorders the :attr:`XPM.yvalues` (obtained from the xpm
file) to match the order of the rows.
"""
default_extension = "xpm"
logger = logging.getLogger('gromacs.formats.XPM')
#: compiled regular expression to parse the colors in the xpm file::
#:
#: static char *gromacs_xpm[] = {
#: "14327 9 2 1",
#: " c #FFFFFF " /* "None" */,
#: "o c #FF0000 " /* "Present" */,
#:
#: Matches are named "symbol", "color" (hex string), and "value". "value"
#: is typically autoconverted to appropriate values with
#: :class:`gromacs.fileformats.convert.Autoconverter`.
#: The symbol is matched as a `printable ASCII character`_ in the range
#: 0x20 (space) to 0x7E (~).
#:
#: .. _`printable ASCII character`: http://www.danshort.com/ASCIImap/indexhex.htm
COLOUR = re.compile("""\
^.*" # start with quotation mark
(?P<symbol>[\x20-\x7E])# printable ASCII symbol used in the actual pixmap: 'space' to '~'
\s+ # white-space separated
c\s+ # 'c' to prefix colour??
(?P<color>\#[0-9A-F]+) # colour as hex string (always??)
\s*" # close with quotes
\s*/\*\s* # white space then opening C-comment /*
" # start new string
(?P<value>.*) # description/value as free form string
" # ... terminated by quotes
""", re.VERBOSE)
def __init__(self, filename=None, **kwargs):
"""Initialize xpm structure.
:Arguments:
*filename*
read from xpm file directly
*autoconvert*
try to guess the type of the output array from the
colour legend [``True``]
*reverse*
reverse rows (2nd dimension): re-orders the rows so that
the first row corresponds e.g. to the first residue or
first H-bonds and not the last) [``True``]
"""
self.autoconvert = kwargs.pop("autoconvert", True)
self.reverse = kwargs.pop("reverse", True)
self.__array = None
super(XPM, self).__init__(**kwargs) # can use kwargs to set dict! (but no sanity checks!)
if filename is not None:
self._init_filename(filename)
self.read(filename)
def to_df(self):
import pandas as pd
# Add Time to the data as column
data = numpy.vstack((self.xvalues, self.array.T)).T
# Column names are resids
df = pd.DataFrame(data, columns=["Time"]+ list(self.yvalues))
# Converts Time to a numeric type
df['Time'] = pd.to_numeric(df['Time'])
return df
@property
def array(self):
"""XPM matrix as a :class:`numpy.ndarray`.
The attribute itself cannot be assigned a different array but
the contents of the array can be modified.
"""
return self.__array
def read(self, filename=None):
"""Read and parse mdp file *filename*."""
self._init_filename(filename)
self.parse()
def parse(self):
"""Parse the xpm file and populate :attr:`XPM.array`."""
with utilities.openany(self.real_filename) as xpm:
# Read in lines until we find the start of the array
meta = [xpm.readline()]
while not meta[-1].startswith("static char *gromacs_xpm[]"):
meta.append(xpm.readline())
# The next line will contain the dimensions of the array
dim = xpm.readline()
# There are four integers surrounded by quotes
# nx: points along x, ny: points along y, nc: ?, nb: stride x
nx, ny, nc, nb = [int(i) for i in self.unquote(dim).split()]
# The next dim[2] lines contain the color definitions
# Each pixel is encoded by dim[3] bytes, and a comment
# at the end of the line contains the corresponding value
colors = dict([self.col(xpm.readline()) for i in range(nc)])
if self.autoconvert:
autoconverter = Autoconverter(mode="singlet")
for symbol, value in colors.items():
colors[symbol] = autoconverter.convert(value)
self.logger.debug("Autoconverted colours: %r", colors)
# make an array containing all possible values and let numpy figure out the dtype
dtype = numpy.array(colors.values()).dtype
self.logger.debug("Guessed array type: %s", dtype.name)
# pre-allocate array
data = numpy.zeros((int(nx/nb), ny), dtype=dtype)
self.logger.debug("dimensions: NX=%d NY=%d strideX=%d (NC=%d) --> (%d, %d)",
nx, ny, nb, nc, nx/nb, ny)
iy = 0
xval = []
yval = []
autoconverter = Autoconverter(mode="singlet")
for line in xpm:
if line.startswith("/*"):
# lines '/* x-axis:' ... and '/* y-axis:' contain the
# values of x and y coordinates
s = self.uncomment(line).strip()
if s.startswith('x-axis:'):
xval.extend([autoconverter.convert(x) for x in s[7:].split()])
elif s.startswith('y-axis:'):
yval.extend([autoconverter.convert(y) for y in s[7:].split()])
continue
s = self.unquote(line)
# Joao M. Damas <jmdamas@itqb.unl.pt> suggests on gmx-users (24 Oct 2014)
# that the next line should read:
#
# data[:, iy] = [colors[j[k:k+nb]] for k in range(0,nx*nb,nb)]
#
# "if one is using higher -nlevels for the .xpm construction (in g_rms, for example)"
# However, without a test case I am not eager to change it right away so in
# case some faulty behavior is discovered with the XPM reader then this comment
# might be helpful. --- Oliver 2014-10-25
data[:, iy] = [colors[s[k:k+nb]] for k in range(0,nx,nb)]
self.logger.debug("read row %d with %d columns: '%s....%s'",
iy, data.shape[0], s[:4], s[-4:])
iy += 1 # for next row
self.xvalues = numpy.array(xval)
if self.reverse:
self.logger.debug("reversed row order, reverse=%r", self.reverse)
self.__array = data[:, ::-1]
self.yvalues = numpy.array(yval)
else:
self.__array = data
self.yvalues = numpy.array(yval)[::-1] # must reverse y-values to match!
@staticmethod
def unquote(s):
"""Return string *s* with quotes ``"`` removed."""
return s[1+s.find('"'):s.rfind('"')]
@staticmethod
def uncomment(s):
"""Return string *s* with C-style comments ``/*`` ... ``*/`` removed."""
return s[2+s.find('/*'):s.rfind('*/')]
def col(self, c):
"""Parse colour specification"""
m = self.COLOUR.search(c)
if not m:
self.logger.fatal("Cannot parse colour specification %r.", c)
raise ParseError("XPM reader: Cannot parse colour specification {0!r}.".format(c))
value = m.group('value')
color = m.group('symbol')
self.logger.debug("%s: %s %s\n", c.strip(), color, value)
return color, value
|
Becksteinlab/GromacsWrapper
|
gromacs/fileformats/xpm.py
|
Python
|
gpl-3.0
| 10,360
|
[
"Gromacs"
] |
6c3368a69cbc889350b55a0a76501af6fb869522933c08f6367bcf721d1f6cda
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import espressomd.electrostatics
import numpy as np
GAP = np.array([0, 0, 3.])
BOX_L = np.array(3 * [10]) + GAP
TIME_STEP = 1e-100
POTENTIAL_DIFFERENCE = -3.
@utx.skipIfMissingFeatures(["P3M"])
class ElcTest(ut.TestCase):
system = espressomd.System(box_l=BOX_L, time_step=TIME_STEP)
system.cell_system.skin = 0.0
def test_finite_potential_drop(self):
system = self.system
p1 = system.part.add(pos=[0, 0, 1], q=+1)
p2 = system.part.add(pos=[0, 0, 9], q=-1)
p3m = espressomd.electrostatics.P3M(
# zero is not allowed
prefactor=1e-100,
mesh=32,
cao=5,
accuracy=1e-3,
)
elc = espressomd.electrostatics.ELC(
p3m_actor=p3m,
gap_size=GAP[2],
maxPWerror=1e-3,
delta_mid_top=-1,
delta_mid_bot=-1,
const_pot=1,
pot_diff=POTENTIAL_DIFFERENCE,
)
system.actors.add(elc)
# Calculated energy
U_elc = system.analysis.energy()['coulomb']
# Expected E-Field is voltage drop over the box
E_expected = POTENTIAL_DIFFERENCE / (BOX_L[2] - GAP[2])
# Expected potential is -E_expected * z, so
U_expected = -E_expected * (p1.pos[2] * p1.q + p2.pos[2] * p2.q)
self.assertAlmostEqual(U_elc, U_expected)
system.integrator.run(0)
self.assertAlmostEqual(E_expected, p1.f[2] / p1.q)
self.assertAlmostEqual(E_expected, p2.f[2] / p2.q)
# Check if error is thrown when particles enter the ELC gap
# positive direction
p1.pos = [BOX_L[0] / 2, BOX_L[1] / 2, BOX_L[2] - GAP[2] / 2]
with self.assertRaises(Exception):
self.system.analysis.energy()
with self.assertRaisesRegex(Exception, 'entered ELC gap region'):
self.system.integrator.run(2)
# negative direction
p1.pos = [BOX_L[0] / 2, BOX_L[1] / 2, -GAP[2] / 2]
with self.assertRaises(Exception):
self.system.analysis.energy()
with self.assertRaisesRegex(Exception, 'entered ELC gap region'):
self.system.integrator.run(2)
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/python/elc.py
|
Python
|
gpl-3.0
| 2,980
|
[
"ESPResSo"
] |
a87c37b4c24bee83ce38201bdf41e15682e18295f1b8ebbc77035b205e1a2a32
|
import pygrib
import matplotlib.pyplot as plt
import numpy as np
from numpy import ma
from mpl_toolkits.basemap import Basemap
for grb in pygrib.open('../sampledata/ecmwf_tigge.grb'):
if grb['parameterName'] == 'Soil moisture':
fld = grb['values']
lats,lons = grb.latlons()
break
#from ncepgrib2 import Grib2Decode
#grbs = Grib2Decode('../sampledata/ecmwf_tigge.grb')
#grbx = grbs[14]
#fld = grbx.data()
#lats,lons = grbx.grid()
llcrnrlon = lons[0,0]
llcrnrlat = lats[0,0]
urcrnrlon = lons[-1,-1]
urcrnrlat = lats[-1,-1]
m = Basemap(llcrnrlon=llcrnrlon,llcrnrlat=llcrnrlat,
urcrnrlon=urcrnrlon,urcrnrlat=urcrnrlat,
resolution='l',projection='cyl')
CS = m.contourf(lons,lats,fld,15,cmap=plt.cm.jet)
plt.colorbar(shrink=0.6)
m.drawcoastlines()
# draw parallels
delat = 30.
circles = np.arange(-90.,90.+delat,delat)
m.drawparallels(circles,labels=[1,0,0,0])
# draw meridians
delon = 60.
meridians = np.arange(0,360,delon)
m.drawmeridians(meridians,labels=[0,0,0,1])
plt.title(grb['parameterName']+' on ECMWF Reduced Gaussian Grid')
plt.show()
|
erdc-cm/pygrib
|
test/test_ectigge.py
|
Python
|
isc
| 1,094
|
[
"Gaussian"
] |
6902524c46517b35aa702216a1b853d6f9b7e755753ae2cbd0c5f74b630b58ad
|
# Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing siutations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format (('Full Name', 'email@domain.com'), ('Full Name', 'anotheremail@domain.com'))
ADMINS = ()
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ()
# Local time zone for this installation. All choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities).
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box. The language name
# should be the utf-8 encoded local name for the language.
LANGUAGES = (
('ar', gettext_noop('Arabic')),
('bn', gettext_noop('Bengali')),
('bg', gettext_noop('Bulgarian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('es', gettext_noop('Spanish')),
('et', gettext_noop('Estonian')),
('es-ar', gettext_noop('Argentinean Spanish')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('hu', gettext_noop('Hungarian')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('ko', gettext_noop('Korean')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('lv', gettext_noop('Latvian')),
('lt', gettext_noop('Lithuanian')),
('mk', gettext_noop('Macedonian')),
('nl', gettext_noop('Dutch')),
('no', gettext_noop('Norwegian')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sr', gettext_noop('Serbian')),
('sv', gettext_noop('Swedish')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('uk', gettext_noop('Ukrainian')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ("he", "ar", "fa")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = ()
LANGUAGE_COOKIE_NAME = 'django_language'
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various e-mails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# E-mail address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Whether to send broken-link e-mails.
SEND_BROKEN_LINK_EMAILS = False
# Database connection info.
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
DATABASE_OPTIONS = {} # Set to empty dictionary for default.
DATABASES = {}
# Host for sending e-mail.
EMAIL_HOST = 'localhost'
# Port for sending e-mail.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
# List of strings representing installed apps.
INSTALLED_APPS = ()
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = ()
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
# 'django.core.context_processors.request',
)
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Default e-mail address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = (
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# )
DISALLOWED_USER_AGENTS = ()
ABSOLUTE_URL_OVERRIDES = {}
# Tuple of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ('/home/html', '/var/www')
ALLOWED_INCLUDE_ROOTS = ()
# If this is a admin settings module, this should be a list of
# settings modules (in the format 'foo.bar.baz') for which this admin
# is an admin.
ADMIN_FOR = ()
# 404s that may be ignored.
IGNORABLE_404_STARTS = ('/cgi-bin/', '/_vti_bin', '/_vti_inf')
IGNORABLE_404_ENDS = ('mail.pl', 'mailform.pl', 'mail.cgi', 'mailform.cgi', 'favicon.ico', '.php')
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Path to the "jing" executable -- needed to validate XMLFields
JING_PATH = "/usr/bin/jing"
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = ''
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_PERMISSIONS = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now
MONTH_DAY_FORMAT = 'F j'
# Do you want to manage transactions manually?
# Hint: you really don't!
TRANSACTIONS_MANAGED = False
# The User-Agent string to use when checking for URL validity through the
# isExistingURL validator.
from django import get_version
URL_VALIDATOR_USER_AGENT = "Django/%s (http://www.djangoproject.com)" % get_version()
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.middleware.http.ConditionalGetMiddleware',
# 'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
)
############
# SESSIONS #
############
SESSION_COOKIE_NAME = 'sessionid' # Cookie name. This can be whatever you want.
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_DOMAIN = None # A string like ".lawrence.com", or None for standard domain cookie.
SESSION_COOKIE_SECURE = False # Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_PATH = '/' # The path of the session cookie.
SESSION_SAVE_EVERY_REQUEST = False # Whether to save the session data on every request.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False # Whether a user's session cookie expires when the Web browser is closed.
SESSION_ENGINE = 'django.contrib.sessions.backends.db' # The module to store session data
SESSION_FILE_PATH = None # Directory to store session files if using the file session module. If None, the backend will use a sensible default.
#########
# CACHE #
#########
# The cache backend to use. See the docstring in django.core.cache for the
# possible values.
CACHE_BACKEND = 'locmem://'
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
####################
# COMMENTS #
####################
COMMENTS_ALLOW_PROFANITIES = False
# The profanities that will trigger a validation error in the
# 'hasNoProfanities' validator. All of these should be in lowercase.
PROFANITIES_LIST = ('asshat', 'asshead', 'asshole', 'cunt', 'fuck', 'gook', 'nigger', 'shit')
# The group ID that designates which users are banned.
# Set to None if you're not using it.
COMMENTS_BANNED_USERS_GROUP = None
# The group ID that designates which users can moderate comments.
# Set to None if you're not using it.
COMMENTS_MODERATORS_GROUP = None
# The group ID that designates the users whose comments should be e-mailed to MANAGERS.
# Set to None if you're not using it.
COMMENTS_SKETCHY_USERS_GROUP = None
# The system will e-mail MANAGERS the first COMMENTS_FIRST_FEW comments by each
# user. Set this to 0 if you want to disable it.
COMMENTS_FIRST_FEW = 0
# A tuple of IP addresses that have been banned from participating in various
# Django-powered features.
BANNED_IPS = ()
##################
# AUTHENTICATION #
##################
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
###########
# TESTING #
###########
# The name of the method to use to invoke the test suite
TEST_RUNNER = 'django.test.simple.run_tests'
# The name of the database to use for testing purposes.
# If None, a name of 'test_' + DATABASE_NAME will be assumed
TEST_DATABASE_NAME = None
# Strings used to set the character set and collation order for the test
# database. These values are passed literally to the server, so they are
# backend-dependent. If None, no special settings are sent (system defaults are
# used).
TEST_DATABASE_CHARSET = None
TEST_DATABASE_COLLATION = None
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = ()
|
weigj/django-multidb
|
django/conf/global_settings.py
|
Python
|
bsd-3-clause
| 14,671
|
[
"VisIt"
] |
3ecf32cab0c367d604b8c98576ebc7f8a59be9f91786e33e76551f3ce4fadc31
|
# The OARN Relief Nursery Database
# Copyright (C) 2015 Oregon Association of Relief Nurseries
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from rest_framework import status
from rest_framework.test import APITestCase, APIClient
from django.contrib.auth.models import User, Group
from django.core.urlresolvers import reverse
from oarndb.models import HomeVisit
from oarndb.tests.oarn_factory import OarnFactory
class HomeVisitViewsTestCase(APITestCase):
def setUp(self):
self.factory = OarnFactory() # init loads basic data
self.fbb_family_a = self.factory.new_basic_family(self.factory.fbb)
self.fbb_family_b = self.factory.new_basic_family(self.factory.fbb)
self.mtn_family_a = self.factory.new_basic_family(self.factory.mtn)
self.mtn_family_b = self.factory.new_basic_family(self.factory.mtn)
self.fbb_staff = self.factory.new_person(self.factory.fbb, is_client=False)
self.mtn_staff = self.factory.new_person(self.factory.mtn, is_client=False)
# JSON for post tests:
self.fbb_post_data = {
"family": self.factory.new_basic_family(self.factory.fbb).pk,
"person": self.factory.rand_staff(self.factory.fbb).pk,
"ref_home_visit_location": self.factory.home_visit_location.ref_home_visit_location_id,
"visit_date": '2014-04-26',
"service_minutes": 90
}
self.mtn_post_data = {
"family": self.factory.new_basic_family(self.factory.mtn).family_id,
"person": self.factory.rand_staff(self.factory.mtn).person_id,
"ref_home_visit_location": self.factory.home_visit_location.ref_home_visit_location_id,
"visit_date": '2014-04-26',
"service_minutes": 90
}
self.hv_fbb_family_a = HomeVisit.objects.create(
family=self.fbb_family_a,
person=self.factory.rand_staff(self.factory.fbb),
ref_home_visit_location=self.factory.home_visit_location,
visit_date='2014-04-26',
service_minutes=90
)
self.hv_fbb_family_b = HomeVisit.objects.create(
family=self.fbb_family_b,
person=self.factory.rand_staff(self.factory.fbb),
ref_home_visit_location=self.factory.home_visit_location,
visit_date='2014-04-26',
service_minutes=90
)
self.hv_mtn_family_a = HomeVisit.objects.create(
family=self.mtn_family_a,
person=self.factory.rand_staff(self.factory.fbb),
ref_home_visit_location=self.factory.home_visit_location,
visit_date='2014-04-26',
service_minutes=90
)
self.hv_mtn_family_b = HomeVisit.objects.create(
family=self.mtn_family_b,
person=self.factory.rand_staff(self.factory.fbb),
ref_home_visit_location=self.factory.home_visit_location,
visit_date='2014-04-26',
service_minutes=90
)
def test_globalreadonly_can_get_all_records(self):
list_url = reverse('home-visit-list')
response = self.factory.global_readonly_client.get(list_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
ids = []
for item in response.data['results']:
ids.append(
item['family']
)
self.assertTrue(HomeVisit.objects.filter(
family=self.fbb_family_a).filter(
family_id__in=ids)
)
self.assertTrue(HomeVisit.objects.filter(
family=self.fbb_family_b).filter(
family_id__in=ids)
)
self.assertTrue(HomeVisit.objects.filter(
family=self.mtn_family_a).filter(
family_id__in=ids)
)
self.assertTrue(HomeVisit.objects.filter(
family=self.mtn_family_b).filter(
family_id__in=ids)
)
def test_globaladmin_can_get_all_records(self):
list_url = reverse('home-visit-list')
response = self.factory.global_admin_client.get(list_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
ids = []
for item in response.data['results']:
ids.append(
item['family']
)
self.assertTrue(HomeVisit.objects.filter(
family=self.fbb_family_a).filter(
family_id__in=ids)
)
self.assertTrue(HomeVisit.objects.filter(
family=self.fbb_family_b).filter(
family_id__in=ids)
)
self.assertTrue(HomeVisit.objects.filter(
family=self.mtn_family_a).filter(
family_id__in=ids)
)
self.assertTrue(HomeVisit.objects.filter(
family=self.mtn_family_b).filter(
family_id__in=ids)
)
def test_fbb_readonly_can_get_fbb_records(self):
list_url = reverse('home-visit-list')
response = self.factory.fbb_readonly_client.get(list_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
ids = []
for item in response.data['results']:
ids.append(
item['family']
)
self.assertTrue(HomeVisit.objects.filter(
family=self.fbb_family_a).filter(
family_id__in=ids)
)
self.assertTrue(HomeVisit.objects.filter(
family=self.fbb_family_b).filter(
family_id__in=ids)
)
self.assertFalse(HomeVisit.objects.filter(
family=self.mtn_family_a).filter(
family_id__in=ids)
)
self.assertFalse(HomeVisit.objects.filter(
family=self.mtn_family_b).filter(
family_id__in=ids)
)
def test_fbb_readwrite_can_get_fbb_records(self):
list_url = reverse('home-visit-list')
response = self.factory.fbb_readwrite_client.get(list_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
ids = []
for item in response.data['results']:
ids.append(
item['family']
)
self.assertTrue(HomeVisit.objects.filter(
family=self.fbb_family_a).filter(
family_id__in=ids)
)
self.assertTrue(HomeVisit.objects.filter(
family=self.fbb_family_b).filter(
family_id__in=ids)
)
self.assertFalse(HomeVisit.objects.filter(
family=self.mtn_family_a).filter(
family_id__in=ids)
)
self.assertFalse(HomeVisit.objects.filter(
family=self.mtn_family_b).filter(
family_id__in=ids)
)
def test_fbb_admin_can_get_fbb_records(self):
list_url = reverse('home-visit-list')
response = self.factory.fbb_admin_client.get(list_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
ids = []
for item in response.data['results']:
ids.append(
item['family']
)
self.assertTrue(HomeVisit.objects.filter(
family=self.fbb_family_a).filter(
family_id__in=ids)
)
self.assertTrue(HomeVisit.objects.filter(
family=self.fbb_family_b).filter(
family_id__in=ids)
)
self.assertFalse(HomeVisit.objects.filter(
family=self.mtn_family_a).filter(
family_id__in=ids)
)
self.assertFalse(HomeVisit.objects.filter(
family=self.mtn_family_b).filter(
family_id__in=ids)
)
def test_global_readonly_cannot_create_an_fbb_record(self):
create_url = reverse('home-visit-list')
response = self.factory.global_readonly_client.post(create_url, self.fbb_post_data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_global_admin_can_create_an_fbb_record(self):
create_url = reverse('home-visit-list')
response = self.factory.global_admin_client.post(create_url, self.fbb_post_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_fbb_readonly_cannot_create_an_fbb_record(self):
create_url = reverse('home-visit-list')
response = self.factory.fbb_readonly_client.post(create_url, self.fbb_post_data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_fbb_readwrite_can_create_an_fbb_record(self):
create_url = reverse('home-visit-list')
response = self.factory.fbb_readwrite_client.post(create_url, self.fbb_post_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_fbb_admin_can_create_an_fbb_record(self):
create_url = reverse('home-visit-list')
response = self.factory.fbb_readwrite_client.post(create_url, self.fbb_post_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_fbb_readonly_cannot_create_an_mtn_record(self):
create_url = reverse('home-visit-list')
response = self.factory.fbb_readonly_client.post(create_url, self.mtn_post_data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_fbb_readwrite_cannot_create_an_mtn_record(self):
create_url = reverse('home-visit-list')
response = self.factory.fbb_readwrite_client.post(create_url, self.mtn_post_data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_fbb_admin_cannot_create_an_mtn_record(self):
create_url = reverse('home-visit-list')
response = self.factory.fbb_admin_client.post(create_url, self.mtn_post_data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_global_readonly_cannot_update_an_fbb_record(self):
hvr = self.hv_fbb_family_a
patch_data = {
"family": self.hv_fbb_family_a.family.family_id,
"person": self.factory.rand_staff(self.factory.fbb).person_id,
"ref_home_visit_location": self.factory.home_visit_location.ref_home_visit_location_id,
"visit_date": '2014-04-27',
"service_minutes": 60
}
detail_url = reverse('home-visit-detail', args=[hvr.pk])
response = self.factory.global_readonly_client.patch(
detail_url, patch_data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_global_admin_can_update_an_fbb_record(self):
hvr = self.hv_fbb_family_a
patch_data = {
"family": self.hv_fbb_family_a.family.family_id,
"person": self.factory.rand_staff(self.factory.fbb).person_id,
"ref_home_visit_location": self.factory.home_visit_location.ref_home_visit_location_id,
"visit_date": '2014-04-27',
"service_minutes": 60
}
detail_url = reverse('home-visit-detail', args=[hvr.pk])
response = self.factory.global_admin_client.patch(
detail_url, patch_data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_fbb_readonly_cannot_update_an_fbb_record(self):
hvr = self.hv_fbb_family_a
patch_data = {
"family": self.hv_fbb_family_a.family.family_id,
"person": self.factory.rand_staff(self.factory.fbb).person_id,
"ref_home_visit_location": self.factory.home_visit_location.ref_home_visit_location_id,
"visit_date": '2014-04-27',
"service_minutes": 60
}
detail_url = reverse('home-visit-detail', args=[hvr.pk])
response = self.factory.fbb_readonly_client.patch(
detail_url, patch_data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_fbb_readwrite_can_update_an_fbb_record(self):
hvr = self.hv_fbb_family_a
patch_data = {
"family": self.hv_fbb_family_a.family.family_id,
"person": self.factory.rand_staff(self.factory.fbb).person_id,
"ref_home_visit_location": self.factory.home_visit_location.ref_home_visit_location_id,
"visit_date": '2014-04-27',
"service_minutes": 60
}
detail_url = reverse('home-visit-detail', args=[hvr.pk])
response = self.factory.fbb_readwrite_client.patch(
detail_url, patch_data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_fbb_admin_can_update_an_fbb_record(self):
hvr = self.hv_fbb_family_a
patch_data = {
"family": self.hv_fbb_family_a.family.family_id,
"person": self.factory.rand_staff(self.factory.fbb).person_id,
"ref_home_visit_location": self.factory.home_visit_location.ref_home_visit_location_id,
"visit_date": '2014-04-27',
"service_minutes": 60
}
detail_url = reverse('home-visit-detail', args=[hvr.pk])
response = self.factory.fbb_admin_client.patch(
detail_url, patch_data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_fbb_readonly_cannot_update_an_mtn_record(self):
hvr = self.hv_fbb_family_a
patch_data = {
"family": self.hv_mtn_family_a.family.family_id,
"person": self.factory.rand_staff(self.factory.mtn).person_id,
"ref_home_visit_location": self.factory.home_visit_location.ref_home_visit_location_id,
"visit_date": '2014-04-27',
"service_minutes": 60
}
detail_url = reverse('home-visit-detail', args=[hvr.pk])
response = self.factory.fbb_readonly_client.patch(
detail_url, patch_data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_fbb_readwrite_cannot_update_an_mtn_record(self):
hvr = self.hv_fbb_family_a
patch_data = {
"family": self.hv_mtn_family_a.family.family_id,
"person": self.factory.rand_staff(self.factory.mtn).person_id,
"ref_home_visit_location": self.factory.home_visit_location.ref_home_visit_location_id,
"visit_date": '2014-04-27',
"service_minutes": 60
}
detail_url = reverse('home-visit-detail', args=[hvr.pk])
response = self.factory.fbb_readwrite_client.patch(
detail_url, patch_data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_fbb_admin_cannot_update_an_mtn_record(self):
hvr = self.hv_fbb_family_a
patch_data = {
"family": self.hv_mtn_family_a.family.family_id,
"person": self.factory.rand_staff(self.factory.mtn).person_id,
"ref_home_visit_location": self.factory.home_visit_location.ref_home_visit_location_id,
"visit_date": '2014-04-27',
"service_minutes": 60
}
detail_url = reverse('home-visit-detail', args=[hvr.pk])
response = self.factory.fbb_admin_client.patch(
detail_url, patch_data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_global_readonly_cannot_delete_an_fbb_record(self):
hvr = self.hv_fbb_family_a
detail_url = reverse('home-visit-detail', args=[hvr.pk])
response = self.factory.global_readonly_client.delete(detail_url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_global_admin_can_delete_an_fbb_record(self):
hvr = self.hv_fbb_family_a
detail_url = reverse('home-visit-detail', args=[hvr.pk])
response = self.factory.global_admin_client.delete(detail_url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_fbb_readonly_cannot_delete_an_fbb_record(self):
hvr = self.hv_fbb_family_a
detail_url = reverse('home-visit-detail', args=[hvr.pk])
response = self.factory.fbb_readonly_client.delete(detail_url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_fbb_readwrite_can_delete_an_fbb_record(self):
hvr = self.hv_fbb_family_a
detail_url = reverse('home-visit-detail', args=[hvr.pk])
response = self.factory.fbb_readwrite_client.delete(detail_url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_fbb_admin_can_delete_an_fbb_record(self):
hvr = self.hv_fbb_family_a
detail_url = reverse('home-visit-detail', args=[hvr.pk])
response = self.factory.fbb_admin_client.delete(detail_url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_fbb_readonly_cannot_delete_an_mtn_record(self):
hvr = self.hv_mtn_family_a
detail_url = reverse('home-visit-detail', args=[hvr.pk])
response = self.factory.global_readonly_client.delete(detail_url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
|
wire-rabbit/oarn-database
|
oarndb/tests/test_home_visit_views.py
|
Python
|
agpl-3.0
| 18,444
|
[
"VisIt"
] |
65c8dfd63ecd74f112f9980b5e0083e8416ee0f456a3cc786c24f6a9d5f6a684
|
import bisect
from . import vmdutil
from . import vmddef
from . import pmxutil
from . import pmxdef
def get_global_transform(this_transform, this_bone_def,
parent_transform, parent_bone_def, parent_global,
additional_transform=None):
""" Compute the global rotation and position.
Args:
this_transform: (rotation, position) of this bone
this_bone_def: pmxdef.bone of this bone
parent_bone_transform: (rotation, positoin) of parent bone
parent_bone_def: pmxdef.bone of parent
parent_global: global (rotation, position) of parent bone
additional_transform: additional (rotation, position)
Returns:
(rotation, position)
"""
bone_vector = vmdutil.sub_v(
vmdutil.add_v(this_bone_def.position, this_transform[1]),
parent_bone_def.position)
this_global_pos = vmdutil.add_v(
parent_global[1],
vmdutil.rotate_v3q(bone_vector, parent_global[0]))
local_rot = this_transform[0]
if additional_transform is not None:
this_global_pos = vmdutil.add_v(
this_global_pos, additional_transform[1])
local_rot = vmdutil.multiply_quaternion(
additional_transform[0], local_rot)
this_global_rot = vmdutil.multiply_quaternion(
local_rot, parent_global[0])
return this_global_rot, this_global_pos
BONE_FIELD = ['rotation', 'position']
CAMERA_FIELD = ['rotation', 'position', 'distance', 'angle_of_view']
MORPH_FIELD = ['weight']
LIGHT_FIELD = ['rgb', 'direction']
NO_NAME = '12345678901234567890' # motoin name must <= 15 bytes
class VmdMotion():
def interpolate_morph(self, frame_no, begin, end):
t = (frame_no - begin.frame) / (end.frame - begin.frame)
return vmdutil.lerp_v([begin.weight], [end.weight], t)[0]
def interpolate_light(self, frame_no, begin, end):
t = (frame_no - begin.frame) / (end.frame - begin.frame)
rgb = vmdutil.lerp_v(begin.rgb, end.rgb, t)
direction = vmdutil.lerp_v(begin.direction, end.direction, t)
return rgb, direction
def interpolate_bone(self, frame_no, begin, end):
return ( # rotation position
vmdutil.interpolate_rotation(frame_no, begin, end, 'bones'),
vmdutil.interpolate_position(frame_no, begin, end, 'bones'),
)
def interpolate_camera(self, frame_no, begin, end):
return ( # rotation, position, distance, angle of view
vmdutil.interpolate_rotation(frame_no, begin, end, 'cameras'),
vmdutil.interpolate_position(frame_no, begin, end, 'cameras'),
vmdutil.interpolate_camera_distance(frame_no, begin, end),
vmdutil.interpolate_camera_angle_of_view(frame_no, begin, end),
)
def __init__(self, motion_defs):
self.switchcase = {
# (field_names, interpolation, default)
vmddef.morph: (
MORPH_FIELD, self.interpolate_morph, 0),
vmddef.bone: (
BONE_FIELD, self.interpolate_bone,
(vmddef.BONE_SAMPLE.rotation, vmddef.BONE_SAMPLE.position)),
vmddef.camera: (
CAMERA_FIELD, self.interpolate_camera,
(vmddef.CAMERA_SAMPLE.rotation, vmddef.CAMERA_SAMPLE.position,
vmddef.CAMERA_SAMPLE.distance,
vmddef.CAMERA_SAMPLE.angle_of_view)),
vmddef.light: (
LIGHT_FIELD, self.interpolate_light,
(vmddef.LIGHT_SAMPLE.rgb, vmddef.LIGHT_SAMPLE.direction)),
None: (None, None, None)
}
self.motion_defs = motion_defs
if len(motion_defs) <= 0:
self.motion_name_dict = {}
self.motion_frame_dict = {}
self.sorted_keyframes = {}
self.kind = None
else:
self.kind = motion_defs[0].__class__
if 'name' in motion_defs[0]._fields:
self.motion_name_dict = vmdutil.make_name_dict(
vmdutil.frames_to_dict(motion_defs), True)
else:
self.sorted_motions = sorted(
motion_defs, key=lambda e: e.frame)
self.motion_name_dict = {NO_NAME: self.sorted_motions}
self.motion_frame_dict = { # {name: {frame_no: motion_def}}
name: {
motion.frame: motion
for motion in self.motion_name_dict[name]}
for name in self.motion_name_dict}
self.sorted_keyframes = { # {bone_name: [frame_no]} for bisect
name:
[frame.frame for frame in self.motion_name_dict[name]]
for name in self.motion_name_dict}
def get_vmd_frame(self, frame_no, name=NO_NAME):
# Return motion if the frame_no in vmd, otherwise return None
d = self.motion_frame_dict.get(name)
return None if d is None else (
self.motion_frame_dict[name].get(frame_no))
def get_vmd_index(self, frame_no, name=NO_NAME):
# Return index or closest below index of the frame_no
# in sorted list of vmd keyframes
keys = self.sorted_keyframes[name]
index = bisect.bisect_left(keys, frame_no)
if index <= len(keys) - 1 and keys[index] == frame_no:
return index, True
else:
return index - 1, False
def get_vmd_transform(self, frame_no, name=NO_NAME):
def collect_fields(frame):
d = frame._asdict()
r = [d[field] for field in self.switchcase[self.kind][0]]
return tuple(r) if len(r) > 1 else r[0]
if name not in self.motion_frame_dict:
return self.switchcase[self.kind][2] # return default
frame_dict = self.motion_frame_dict[name]
key_frames = self.sorted_keyframes[name]
vmd_index, is_key_frame = self.get_vmd_index(frame_no, name)
if is_key_frame:
m = frame_dict[frame_no]
result = collect_fields(m)
else:
if vmd_index < 0:
first_frame = frame_dict[key_frames[0]]
result = collect_fields(first_frame)
else:
begin = frame_dict[key_frames[vmd_index]]
if vmd_index < len(key_frames) - 1:
end = frame_dict[key_frames[vmd_index + 1]]
result = self.switchcase[self.kind][1](
frame_no, begin, end)
else:
result = collect_fields(begin)
return result
class BoneTransformation():
""" Transform the bone at frame_no according to vmd motion,
and stores those results.
Predecessors of the bone are also transformed and stored.
"""
def __init__(self, bone_defs, motion_defs,
mandatory_bone_names=None, subgraph=False):
""" Constructor
If subgraph == False, bones to be transformed are
(bone in pmx) AND ((bone in vmd) OR (bone in mandatory_bones)).
If subgraph == True,
(mandatory_bones and it's predecessors in pmx) AND (bone in vmd).
Args:
bone_defs: {bone_index: pmxdef.bone}
motion_defs: [vmdutil.bone]
mandatory_bone_names: [bone name],
by_default 'センター' is mandatory
subgraph: boolean
"""
self.bone_defs = bone_defs
self.motion_defs = motion_defs
self.vmd_motion = VmdMotion(motion_defs)
self.motion_name_dict = self.vmd_motion.motion_name_dict
self.motion_index_dict = vmdutil.make_index_dict(motion_defs, True)
self.mandatory_bone_names = (
mandatory_bone_names[:]
if mandatory_bone_names is not None else [])
self.bone_name_to_index = pmxutil.make_index_dict(self.bone_defs)
self.mandatory_bone_indexes = [
self.bone_name_to_index[name]
for name in self.mandatory_bone_names]
self.transform_bone_graph = self.make_bone_graph(subgraph)
self.transform_bone_indexes = [
bone_index for bone_index in self.transform_bone_graph.edges]
self.transform_bone_names = [
self.bone_defs[index].name_jp
for index in self.transform_bone_indexes]
self.leaf_indexes = [
bone_index for bone_index in self.transform_bone_indexes if
self.transform_bone_graph.out_degree(bone_index) == 0]
# {frame_no: {bone_index: (global, local, additional)}}
self.transform_dict = dict()
self.ext_transform = None
def set_external_link(self, ext_transform, bone_name):
if ext_transform is None:
return
self.ext_transform = ext_transform
self.ext_bone_name = bone_name
self.ext_bone_index = ext_transform.bone_name_to_index[bone_name]
((rot, pos), _, _) = ext_transform.do_transform(0, self.ext_bone_index)
def make_bone_graph(self, subgraph):
if len(self.mandatory_bone_names) > 0 and subgraph is True:
bone_graph = pmxutil.make_sub_bone_link_graph(
self.bone_defs, 0, self.mandatory_bone_indexes)
else:
# all bones
bone_graph = pmxutil.make_all_bone_link_graph(self.bone_defs)
bone_indexes = [index for index in bone_graph.edges]
# remove nodes not in vmd nor mandatory
for node_index in bone_indexes:
bone_def = self.bone_defs[node_index]
name = bone_def.name_jp
# if the bone has additional transform and it's ref bone is in vmd
# keep it in graph
if (bone_def.flag &
(pmxdef.BONE_ADD_ROTATE | pmxdef.BONE_ADD_ROTATE)) > 0:
add_parent_index = bone_def.additional_transform.parent
add_parent_name = self.bone_defs[add_parent_index].name_jp
if (add_parent_name in self.motion_name_dict):
continue
if (name != 'センター' and
name not in self.motion_name_dict and
name not in self.mandatory_bone_names):
bone_graph.remove_node(node_index)
return bone_graph
def search(self, frame_no, bone_index=None):
if bone_index is None:
return self.transform_dict.get(frame_no)
else:
return self.transform_dict.get(frame_no, {}).get(bone_index)
def insert(self, frame_no, bone_index, global_transform, local_transform,
additional_transform):
if frame_no in self.transform_dict:
self.transform_dict[frame_no][bone_index] = (
global_transform, local_transform, additional_transform)
else:
self.transform_dict[frame_no] = {
bone_index: (
global_transform, local_transform, additional_transform)}
def delete(self, frame_no, bone_index=None):
if self.ext_transform is not None:
self.ext_transform.delete(frame_no, self.ext_bone_index)
if frame_no in self.transform_dict:
if bone_index is None:
return self.transform_dict.pop(frame_no)
else:
if bone_index in self.transform_dict[frame_no]:
return self.transform_dict[frame_no].pop(bone_index)
else:
return None
else:
return None
def delete_descendants(self, frame_no, bone_index):
descendants = self.transform_bone_graph.get_descendants(bone_index)
for child_bone in descendants:
self.delete(frame_no, child_bone)
def get_vmd_frame(self, frame_no, bone_name):
return self.vmd_motion.get_vmd_frame(frame_no, bone_name)
def get_vmd_index(self, frame_no, bone_name):
return self.vmd_motion.get_vmd_index(frame_no, bone_name)
def replace_vmd_frames(self, frames):
rep = self.motion_defs[:]
if len(frames) <= 0:
return
bone_name = vmdutil.b_to_str(frames[0].name)
name_frames = self.motion_index_dict.get(bone_name)
if name_frames:
for frame in frames:
index = name_frames.get(frame.frame)
if index:
rep[index] = frame
self.motion_defs = rep
self.vmd_motion = VmdMotion(self.motion_defs)
return
def get_vmd_transform(self, frame_no, bone_index):
bone_name = self.bone_defs[bone_index].name_jp
bone_def = self.bone_defs[bone_index]
rotation, position = self.vmd_motion.get_vmd_transform(
frame_no, bone_name)
if (bone_def.flag & pmxdef.BONE_CAN_TRANSLATE !=
pmxdef.BONE_CAN_TRANSLATE):
position = [0, 0, 0]
if (bone_def.flag & pmxdef.BONE_CAN_ROTATE !=
pmxdef.BONE_CAN_ROTATE):
rotation = vmdutil.QUATERNION_IDENTITY
return rotation, position
def get_additional_transform(self, frame_no, bone_index):
additional_transform = None
bone_def = self.bone_defs[bone_index]
flag = bone_def.flag
if flag & pmxdef.BONE_ADD_LOCAL == pmxdef.BONE_ADD_LOCAL:
raise Exception('local addition is not supported.')
if flag & (pmxdef.BONE_ADD_ROTATE | pmxdef.BONE_ADD_TRANSLATE) > 0:
add_parent_index, add_scale = bone_def.additional_transform
add_trans = self.do_transform(
frame_no, add_parent_index)
additional_rot = vmdutil.QUATERNION_IDENTITY
additional_pos = (0, 0, 0)
if add_trans is None:
return (additional_rot, additional_pos)
add_global, add_vmd, add_add = add_trans
# rot
if flag & pmxdef.BONE_ADD_ROTATE == pmxdef.BONE_ADD_ROTATE:
if add_add is None:
additional_rot = add_vmd[0]
else:
additional_rot = add_add[0]
if add_scale != 1.0:
additional_rot = vmdutil.scale_q(
additional_rot, add_scale)
# pos
if flag & pmxdef.BONE_ADD_TRANSLATE == pmxdef.BONE_ADD_TRANSLATE:
if add_add is None:
additional_pos = add_vmd[1]
else:
additional_pos = add_add[1]
if add_scale != 1.0:
additional_pos = vmdutil.scale_v(
additional_pos, add_scale)
additional_transform = (additional_rot, additional_pos)
return additional_transform
def do_transform(self, frame_no, bone_index, vmd_transform=None):
"""Return and store the global/local transformation of the bone
at frame_no.
If vmd_transform is not None, compute with it.
Or compute with motion in vmd file.
Returns:
(global_transformation, vmd_transformation(local),
additional_transformation)
Transformations consist of (rotation, position).
"""
if bone_index not in self.transform_bone_indexes:
return None
if vmd_transform is None:
transform = self.search(frame_no, bone_index)
if transform is not None:
return transform
else:
vmd_transform = self.get_vmd_transform(frame_no, bone_index)
if (bone_index <= 0 or
self.transform_bone_graph.in_degree(bone_index) <= 0):
additional_transform = None
if self.ext_transform is None:
global_transform = (
vmd_transform[0],
vmdutil.add_v(
self.bone_defs[bone_index].position, vmd_transform[1]))
else:
ext_g, ext_v, ext_a = self.ext_transform.do_transform(
frame_no, self.ext_bone_index)
global_transform = get_global_transform(
vmd_transform, self.bone_defs[bone_index],
ext_v, self.bone_defs[bone_index], # ext pos = this pos
ext_g)
else:
parent_index = next(
iter(self.transform_bone_graph.preds[bone_index]))
parent_global, parent_vmd, parent_add = self.do_transform(
frame_no, parent_index)
additional_transform = self.get_additional_transform(
frame_no, bone_index)
global_transform = get_global_transform(
vmd_transform, self.bone_defs[bone_index],
parent_vmd, self.bone_defs[parent_index],
parent_global, additional_transform)
self.insert(
frame_no, bone_index, global_transform, vmd_transform,
additional_transform)
return global_transform, vmd_transform, additional_transform
|
Hashi4/vmdgadgets
|
vmdgadgets/vmdutil/vmdmotion.py
|
Python
|
apache-2.0
| 16,940
|
[
"VMD"
] |
e4c0abf5c22f8ae9516cf321d0b8c1de914154cb904734703ac71b5fd1ee8de9
|
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Script to generate inputs/outputs exclusion lists for GradientTape.
To use this script:
bazel run tensorflow/python/eager:gradient_input_output_exclusions -- \
$PWD/tensorflow/python/eager/pywrap_gradient_exclusions.cc
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import liveness
from tensorflow.python.autograph.pyct.static_analysis import reaching_fndefs
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
_GENERATED_FILE_HEADER = """/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Inputs/Outputs exclusion lists for GradientTape.
//
// This file is MACHINE GENERATED! Do not edit.
// Generated by: tensorflow/python/eager/gen_gradient_input_output_exclusions.py
"""
_INCLUDES = """
#include "tensorflow/python/eager/pywrap_gradient_exclusions.h"
#include "absl/types/optional.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/lib/gtl/flatset.h"
using tensorflow::string;
namespace {
// Keep static data in a format that's easy to init statically.
struct OpIndexInfo {
const char *op_name;
int num_indices;
std::array<int, 4> unused_indices;
};
// Helper function to initialize FlatMap<string,FlatSet> from OpIndexInfo.
template <typename T>
auto OpGradientInfoInit(const T &a) {
auto *m = new tensorflow::gtl::FlatMap<string, tensorflow::gtl::FlatSet<int>>;
for (const auto &item : a) {
m->emplace(string(item.op_name),
tensorflow::gtl::FlatSet<int>(
item.unused_indices.begin(),
item.unused_indices.begin() + item.num_indices));
}
return m;
}
} // namespace
"""
_EXCLUDED_OPS = [
# Composite ops with custom gradient functions.
"If",
"StatelessIf",
"While",
"StatelessWhile",
"Case",
# TF Lite. These ops only appear in OSS.
# TODO(srbs): Find a better way to filter these out.
"AudioMicrofrontend",
]
class _SubscriptUseTracker(transformer.Base):
"""Track uses of composite names, excluding certain names when subscripted."""
def __init__(self, ctx, exclude_when_subscripted):
super(_SubscriptUseTracker, self).__init__(ctx)
self.exclude = exclude_when_subscripted
self.reads = set()
self.complex_reads = set()
def visit_Attribute(self, node):
"""Visits attribute nodes in the AST."""
if anno.hasanno(node, anno.Basic.QN):
qn = anno.getanno(node, anno.Basic.QN)
if isinstance(node.ctx, gast.Load):
self.reads.add(qn)
node = self.generic_visit(node)
return node
def visit_Subscript(self, node):
"""Visits nodes with subscript in the AST."""
if anno.hasanno(node, anno.Basic.QN):
qn = anno.getanno(node, anno.Basic.QN)
if isinstance(node.ctx, gast.Load):
self.reads.add(qn)
elif not isinstance(node.slice, gast.Index):
if anno.hasanno(node, anno.Basic.QN):
self.complex_reads.add(anno.getanno(node, anno.Basic.QN))
elif anno.hasanno(node.value, anno.Basic.QN):
self.complex_reads.add(anno.getanno(node.value, anno.Basic.QN))
value_qn = anno.getanno(node.value, anno.Basic.QN, None)
if value_qn in self.exclude:
node.value = self.generic_visit(node.value)
else:
node.value = self.visit(node.value)
node.slice = self.visit(node.slice)
return node
class _FunctionCallsTracker(transformer.Base):
"""Tracks any function calls made with a given first argument name."""
def __init__(self, ctx, first_argument_name):
super(_FunctionCallsTracker, self).__init__(ctx)
self.first_argument_name = first_argument_name
self.calls = set()
def visit_Name(self, node):
node = self.generic_visit(node)
if isinstance(node.ctx, gast.Load) and node.id in self.ctx.info.namespace:
anno.setanno(node, "static_value", self.ctx.info.namespace[node.id])
return node
def visit_Attribute(self, node):
node = self.generic_visit(node)
parent_val = anno.getanno(node.value, "static_value", default=None)
if parent_val is not None:
if hasattr(parent_val, node.attr):
anno.setanno(node, "static_value", getattr(parent_val, node.attr))
return node
def visit_Call(self, node):
node = self.generic_visit(node)
if (node.args and anno.getanno(node.args[0], anno.Basic.QN,
None) == self.first_argument_name):
fn_object = anno.getanno(node.func, "static_value", None)
if fn_object is not None:
self.calls.add(fn_object)
return node
_ALL = object()
def _live_tensors(f, attr_name="inputs"):
"""Returns the indices of the used inputs.
Note: This currently only handles direct index accesses e.g. op.inputs[1].
If the function has slicing or list comprehension on attr_name then returns
_ALL. This ensure that this is correct even if inefficient.
Args:
f: A grad function, taking the op as first argument.
attr_name: op attr to track. "inputs" or "outputs".
Returns:
Either one of:
* set of integers representing individual indices of inputs used
* the value _ALL, if indices are used but cannot be determined which
* empty set, if no inputs are used
"""
node, _ = parser.parse_entity(f, ())
entity_info = transformer.EntityInfo(
name=f.__name__,
source_code=None,
source_file=None,
future_features=(),
namespace=sys.modules[f.__module__].__dict__)
ctx = transformer.Context(entity_info, None, None)
graphs = cfg.build(node)
node = qual_names.resolve(node)
node = activity.resolve(node, ctx, None)
node = reaching_fndefs.resolve(node, ctx, graphs)
node = liveness.resolve(node, ctx, graphs)
op_arg_name = anno.getanno(node.args.args[0], anno.Basic.QN)
op_inputs_outputs_name = qual_names.QN(op_arg_name, attr=attr_name)
special_tracker = _SubscriptUseTracker(ctx, (op_inputs_outputs_name,))
node = special_tracker.visit(node)
live_vars_in = anno.getanno(node.body[0], anno.Static.LIVE_VARS_IN)
inputs_outputs_used_qns = set()
for v in special_tracker.complex_reads:
# Complicated patterns like op.inputs[:3]. Could be smarter about them
# if they matter much.
if v == op_inputs_outputs_name:
return _ALL
for v in live_vars_in:
if v in special_tracker.reads:
if (v.has_subscript() and v.parent == op_inputs_outputs_name):
inputs_outputs_used_qns.add(v)
elif v == op_inputs_outputs_name:
# When op.{attr_name} is used directly, assume all tensors are
# used for now. In that case, no point digging further.
# TODO(mdan): We can descend into tuple expansions.
return _ALL
function_calls_tracker = _FunctionCallsTracker(ctx, op_arg_name)
node = function_calls_tracker.visit(node)
input_output_indices = set()
for called_f in function_calls_tracker.calls:
child_indices = _live_tensors(called_f, attr_name=attr_name)
if child_indices is _ALL:
return _ALL
input_output_indices |= child_indices
for v in inputs_outputs_used_qns:
assert v.has_subscript()
_, subscript = v.qn
if not subscript.is_simple():
# Not a number, assuming it can be anything.
return _ALL
subscript_val, = subscript.qn
if (not isinstance(subscript_val, qual_names.Literal) and
not isinstance(subscript_val.value, int)):
# Not a number, assuming it can be anything.
return _ALL
input_output_indices.add(subscript_val.value)
return input_output_indices
def _get_num_inputs_outputs(op_type):
"""Returns (num_inputs, num_outputs).
Args:
op_type: String. The type of the Operation. Used to lookup the op in the
registry.
Returns:
(num_inputs, num_outputs), for either num_inputs or num_outputs if the value
can't be statically inferred from the OpDef alone or of the OpDef lookup
fails, -1 is returned.
"""
def _is_list_arg(arg):
return arg.number_attr or arg.type_list_attr
def _count_args(arg_defs):
for arg in arg_defs:
if _is_list_arg(arg):
# Op has list type args which could be variable.
return -1
return len(arg_defs)
op_def = op_def_registry.get(op_type)
if not op_def:
return -1, -1
return _count_args(op_def.input_arg), _count_args(op_def.output_arg)
def get_entries(attr_name):
"""Returns the dict of entries.
Each entry is of the form {op_name, {true|false, indices}}
true: All values are unused.
false: `indices` are the only unused indices.
Note: ops for which all values are used are not printed.
Args:
attr_name: inputs or outputs.
Returns:
A dict from op_type to formatted entry in the dict.
"""
assert attr_name in ["inputs", "outputs"]
entries = {}
for op_type in ops._gradient_registry.list(): # pylint: disable=protected-access
if op_type in _EXCLUDED_OPS:
continue
num_values = _get_num_inputs_outputs(op_type)[0 if attr_name ==
"inputs" else 1]
gradient_fn = ops._gradient_registry.lookup(op_type) # pylint: disable=protected-access
if gradient_fn is None:
# NotDifferentiable
if num_values != -1:
entries[op_type] = "{\"%s\"}," % op_type
continue
used_tensors = _live_tensors(gradient_fn, attr_name=attr_name)
if used_tensors is _ALL:
continue
elif not used_tensors:
entries[op_type] = "{\"%s\"}," % op_type
else:
all_tensors = set(range(num_values))
unused_tensors = all_tensors - used_tensors
if unused_tensors:
unused_tensor_list = sorted(list(unused_tensors))
entries[op_type] = "{\"%s\", %d, {%s}}," % (
op_type, len(unused_tensor_list), ", ".join(
str(i) for i in unused_tensor_list))
return entries
def get_function(name, entries):
"""Generates lookup function with given name and lookup table entries."""
contents = """
absl::optional<tensorflow::gtl::FlatSet<int>> {name}(
const tensorflow::string &op_name) {{
static std::array<OpIndexInfo, {count}> a = {{{{
""".format(
name=name, count=len(entries) + 1)
contents += " "
contents += "\n ".join(entries[op_type] for op_type in sorted(entries))
contents += "\n {\"VarHandleOp\"},"
contents += """
}};
static const auto &m = *OpGradientInfoInit(a);
auto it = m.find(op_name);
if (it != m.end()) {
return it->second;
}
return absl::nullopt;
}
"""
return contents
def get_contents():
"""Returns contents for the generated file."""
contents = ""
contents += _GENERATED_FILE_HEADER + _INCLUDES
contents += get_function("OpGradientUnusedInputIndices",
get_entries("inputs"))
contents += get_function("OpGradientUnusedOutputIndices",
get_entries("outputs"))
return contents
def main(output_file):
with open(output_file, "w") as fp:
fp.write(get_contents())
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("output", metavar="O", type=str, help="Output file.")
args = arg_parser.parse_args()
main(args.output)
|
davidzchen/tensorflow
|
tensorflow/python/eager/gradient_input_output_exclusions.py
|
Python
|
apache-2.0
| 12,833
|
[
"VisIt"
] |
68cc7a7ce4dfaf63b477e94df55ec9261a5d89b7f6533697aa1ddbbd723599f8
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014, Paweł Wodnicki
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the 32bitmicro nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL Paweł Wodnicki BE LIABLE FOR ANY
#DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from eda import *
from eda.packages.package import *
from eda.components.NXP.lpc214x import *
from eda.components.JAE.USB import *
from eda.components.JAE.SD import *
from eda.components.ST.stm3210x import *
from eda.components.ST.M25PExx import *
from eda.components.ST.ST1480A import *
from eda.components.MAXIM.rs232 import *
from eda.components.FTDI.TTL_232R_3V3 import *
from eda.components.MODULE.DIP40 import *
from eda.components.MODULE.SW_DEBUG_CON import *
from eda.components.EEPROM.I2C import *
from eda.components.Abracon.ABM3B import *
from eda.components.Nichicon.F93 import *
from eda.components.Diodes.DCX100 import *
# LDO subcircuit
from eda.circuits.ldo_ld1117 import *
crefid = 1
rrefid = 1
#####################################################################
# connect to DIP40 by port
# P0 <-> PA0
# P15 <-> PA15
# X0 <-> PB0
# X15 <-> PB15
# Use 0.036" holes for the header
# VIA Rajda
# 40 x 24 mil
# 32 x 12 mil na plytce
# 55 mili round is STANDARDVIA
# 40/28, 40/24, 40/20, 32/12
# Analog VDD
# filtr RC 100 ohm + 10uF
# Digital VDD
# 100nF on all VDD
def connect_by_port():
# Pin 5 P0 - 10 RB0 GPIO; comparator output;MIWU/Interrupt input
# IO AN0 - Analog input 0
P0net = CNet("P0")
sch.addNet(P0net)
P0net.add(CNode(MOD1,'P0'))
# CPU
P0net.add(CNode(U1,'PA0-WKUP/USART2_CTS/ADC_IN0/TIM2_CH1_ETR'))
# Pin 6 P1 - 11 RB1 GPIO; comparator negative input;MIWU/Interrupt input
# IO AN1 - Analog input 1
P1net = CNet("P1")
sch.addNet(P1net)
P1net.add(CNode(MOD1,'P1'))
# CPU
P1net.add(CNode(U1,'PA1/USART2_RTS/ADC_IN1/TIM2_CH2'))
# Pin 7 P2 - 12 RB2 GPIO; comparator positive input;MIWU/Interrupt input
# IO AN2 - Analog input 2
P2net = CNet("P2")
sch.addNet(P2net)
P2net.add(CNode(MOD1,'P2'))
# CPU
P2net.add(CNode(U1,'PA2/USART2_TX/ADC_IN2/TIM2_CH3'))
# Pin 8 P3 - 13 RB3 GPIO; MIWU/Interrupt input
# IO AN3 - Analog input 3
P3net = CNet("P3")
sch.addNet(P3net)
P3net.add(CNode(MOD1,'P3'))
# CPU
P3net.add(CNode(U1,'PA3/USART2_RX/ADC_IN3/TIM2_CH4'))
# Pin 9 P4 - 14 RB4 GPIO; MIWU/Interrupt input; Timer T1 Capture Input 1
P4net = CNet("P4")
sch.addNet(P4net)
P4net.add(CNode(MOD1,'P4'))
# CPU
P4net.add(CNode(U1,'PA4/SPI1_NSS/USART2_CK/ADC_IN4'))
# Pin 10 P5 - 15 RB5 GPIO; MIWU/Interrupt input; Timer T1 Capture Input 2
P5net = CNet("P5")
sch.addNet(P5net)
P5net.add(CNode(MOD1,'P5'))
# CPU
P5net.add(CNode(U1,'PA5/SPI1_SCK/ADC_IN5'))
# Pin 11 P6 - 16 RB6 GPIO; MIWU/Interrupt input; Timer T1 PWM/Compare Output
P6net = CNet("P6")
sch.addNet(P6net)
P6net.add(CNode(MOD1,'P6'))
# CPU
P6net.add(CNode(U1,'PA6/SPI1_MISO/ADC_IN6/TIM3_CH1'))
# Pin 12 P7 - RB7 GPIO; MIWU/Interrupt input; Timer T1 External Event Counter Input
P7net = CNet("P7")
sch.addNet(P7net)
P7net.add(CNode(MOD1,'P7'))
# CPU
P7net.add(CNode(U1,'PA7/SPI1_MOSI/ADC_IN7/TIM3_CH2'))
# Pin 13 P8 - 20 RC0 GPIO;Timer T2 Capture Input 1
P8net = CNet("P8")
sch.addNet(P8net)
P8net.add(CNode(MOD1,'P8'))
# CPU
P8net.add(CNode(U1,'PA8/USART1_CK/MCO'))
# Pin 14 P9 - 21 RC1 GPIO;Timer T2 Capture Input 2
P9net = CNet("P9")
sch.addNet(P9net)
P9net.add(CNode(MOD1,'P9'))
# CPU
P9net.add(CNode(U1,'PA9/USART1_TX'))
# Pin 15 P10 - 22 RC2 GPIO;Timer T2 PWM compare output
P10net = CNet("P10")
sch.addNet(P10net)
P10net.add(CNode(MOD1,'P10'))
# CPU
P10net.add(CNode(U1,'PA10/USART1_RX'))
# Pin 16 P11 - 23 RC3 GPIO;Timer T2 External Event Counter Input
P11net = CNet("P11")
sch.addNet(P11net)
P11net.add(CNode(MOD1,'P11'))
# CPU
P11net.add(CNode(U1,'PA11/USART1_CTS'))
# Pin 17 P12 - 24 RC4 GPIO
P12net = CNet("P12")
sch.addNet(P12net)
P12net.add(CNode(MOD1,'P12'))
# CPU
P12net.add(CNode(U1,'PA12/USART1_RTS'))
# Pin 18 P13 - 25 RC5 GPIO
P13net = CNet("P13")
sch.addNet(P13net)
P13net.add(CNode(MOD1,'P13'))
# CPU
P13net.add(CNode(U1,'PA13/JTMS/SWDIO'))
# Pin 19 P14 - 26 RC6 GPIO
P14net = CNet("P14")
sch.addNet(P14net)
P14net.add(CNode(MOD1,'P14'))
# CPU
P14net.add(CNode(U1,'PA14/JTCK/SWCLK'))
# Pin 20 P15 - 27 RC7 GPIO
P15net = CNet("P15")
sch.addNet(P15net)
P15net.add(CNode(MOD1,'P15'))
# CPU
P15net.add(CNode(U1,'PA15/JTDI'))
# Right Side
# Pin 21 X0 - 28 RD0 GPIO
X0net = CNet("X0")
sch.addNet(X0net)
X0net.add(CNode(MOD1,'X0'))
# CPU
X0net.add(CNode(U1,'PB0/ADC_IN8/TIM3_CH3'))
# Pin 22 X1 - 29 RD1 GPIO
X1net = CNet("X1")
sch.addNet(X1net)
X1net.add(CNode(MOD1,'X1'))
# CPU
X1net.add(CNode(U1,'PB1/ADC_IN9/TIM3_CH4'))
# Pin 23 X2 - 30 RD2 GPIO
X2net = CNet("X2")
sch.addNet(X2net)
X2net.add(CNode(MOD1,'X2'))
# CPU
X2net.add(CNode(U1,'PB2/BOOT1'))
# Pin 24 X3 - 31 RD3 GPIO
X3net = CNet("X3")
sch.addNet(X3net)
X3net.add(CNode(MOD1,'X3'))
# CPU
X3net.add(CNode(U1,'PB3/JTDO/TRACESWO'))
# Pin 25 X4 - 34 RD4 GPIO
X4net = CNet("X4")
sch.addNet(X4net)
X4net.add(CNode(MOD1,'X4'))
# CPU
X4net.add(CNode(U1,'PB4/JNTRST'))
# Pin 26 X5 - 35 RD5 GPIO
X5net = CNet("X5")
sch.addNet(X5net)
X5net.add(CNode(MOD1,'X5'))
# CPU
X5net.add(CNode(U1,'PB5/I2C1_SMBAl'))
# Pin 27 X6 - 36 RD6 GPIO
X6net = CNet("X6")
sch.addNet(X6net)
X6net.add(CNode(MOD1,'X6'))
# CPU
X6net.add(CNode(U1,'PB6/I2C1_SCL/TIM4_CH1'))
# Pin 28 X7 - 37 RD7 GPIO
X7net = CNet("X7")
sch.addNet(X7net)
X7net.add(CNode(MOD1,'X7'))
# CPU
X7net.add(CNode(U1,'PB7/I2C1_SDA/TIM4_CH2'))
# Pin 29 X8 - 38 RE0 GPIO
X8net = CNet("X8")
sch.addNet(X8net)
X8net.add(CNode(MOD1,'X8'))
# CPU
X8net.add(CNode(U1,'PB8/TIM4_CH3'))
# Pin 30 X9 - 39 RE1 GPIO
X9net = CNet("X9")
sch.addNet(X9net)
X9net.add(CNode(MOD1,'X9'))
# CPU
X9net.add(CNode(U1,'PB9/TIM4_CH4'))
# Pin 31 X10 - 40 RE2 GPIO
X10net = CNet("X10")
sch.addNet(X10net)
X10net.add(CNode(MOD1,'X10'))
# CPU
X10net.add(CNode(U1,'PB10/I2C2_SCL/USART3_TX'))
# Pin 32 X11 - 41 RE3 GPIO
X11net = CNet("X11")
sch.addNet(X11net)
X11net.add(CNode(MOD1,'X11'))
# CPU
X11net.add(CNode(U1,'PB11/I2C2_SDA/USART3_RX'))
# Pin 33 X12 - 42 RE4 GPIO
X12net = CNet("X12")
sch.addNet(X12net)
X12net.add(CNode(MOD1,'X12'))
# CPU
X12net.add(CNode(U1,'PB12/SPI2_NSS/I2C2_SMBAl/USART3_CK'))
# Pin 34 X13 - 43 RE5 GPIO
X13net = CNet("X13")
sch.addNet(X13net)
X13net.add(CNode(MOD1,'X13'))
# CPU
X13net.add(CNode(U1,'PB13/SPI2_SCK/USART3_CTS'))
# Pin 35 X14 - 44 RE6 GPIO
X14net = CNet("X14")
sch.addNet(X14net)
X14net.add(CNode(MOD1,'X14'))
# CPU
X14net.add(CNode(U1,'PB14/SPI2_MISO/USART3_RTS'))
# Pin 36 X15 - 45 RE7 GPIO
X15net = CNet("X15")
sch.addNet(X15net)
X15net.add(CNode(MOD1,'X15'))
# CPU
X15net.add(CNode(U1,'PB15/SPI2_MOSI'))
#####################################################################
def gen_schematic(sch, DesignName, LibName):
# Ground Net
GNDnet = CNet("GND")
sch.addNet(GNDnet)
# VDD 3.3V Net
VDD3V3net = CNet("3V3")
sch.addNet(VDD3V3net)
# Add stm3210x
U1 = STM3210X("U1","","stm3210_lqfp48",LibName,"STM3210")
sch.addDev(U1)
sch.addSymbolFromDev(U1, 4, "STM3210")
# Connect stm3210x to GND
# VSS
GNDnet.add(CNode(U1,'VSS_1'))
GNDnet.add(CNode(U1,'VSS_2'))
GNDnet.add(CNode(U1,'VSS_3'))
# analog ground
GNDnet.add(CNode(U1,'VSSA'))
# Connect stm3210x to 3V3
VDD3V3net.add(CNode(U1,'VDD_1'))
VDD3V3net.add(CNode(U1,'VDD_2'))
VDD3V3net.add(CNode(U1,'VDD_3'))
VDD3V3net.add(CNode(U1,'VDDA')) # analog
VDD3V3net.add(CNode(U1,'VBAT')) # battery
###########################################
# Add simple reset with
# Capacitor + Resistor
# 0805 size
C30 = CAPSMT("C30","100nF","CAP_RESET",LibName,"CAP")
sch.addDev(C30)
sch.addSymbolFromDev(C30, 0, "CAP")
# pin 1 tied to GND, pin 2 tied to RESET#
GNDnet.add(CNode(C30,'1'))
# 0805 size
R30 = RESSMT("R30","10k","RES_RESET",LibName,"RES")
sch.addDev(R30)
sch.addSymbolFromDev(R30, 0, "RES")
# pin 1 tied to to RESET#, pin 2 tied 3V3
VDD3V3net.add(CNode(R30,'2'))
###########################################
# Add Crystal and Capacitors
Q1 = ABM3B("Q1","12 Mhz","Q1_12MHz",LibName,"ABM3B")
sch.addDev(Q1)
sch.addSymbolFromDev(Q1, 0, "ABM3B")
# 0805 size
CQ1 = CAPSMT("CQ1","18pF","CAP_1_Q1",LibName,"CAP")
sch.addDev(CQ1)
# 0805 size
CQ2 = CAPSMT("CQ2","18pF","CAP_2_Q1",LibName,"CAP")
sch.addDev(CQ2)
OSC_OUTnet = CNet("OSC_OUT")
sch.addNet(OSC_OUTnet)
OSC_OUTnet.add(CNode(U1,'OSC_OUT'))
OSC_OUTnet.add(CNode(Q1, '3'))
GNDnet.add(CNode(Q1, 'GND_2'))
OSC_OUTnet.add(CNode(CQ1, '1'))
GNDnet.add(CNode(CQ1, '2'))
OSC_INnet = CNet("OSC_IN")
sch.addNet(OSC_INnet)
OSC_INnet.add(CNode(U1,'OSC_IN'))
OSC_INnet.add(CNode(Q1, '1'))
GNDnet.add(CNode(Q1, 'GND_4'))
OSC_INnet.add(CNode(CQ2, '2'))
GNDnet.add(CNode(CQ2, '1'))
###########################################
# ADD LED and resistor # 0805 size
# Green diode 2.1V
LED1 = LEDSMT("LED1","Green","LED",LibName,"LED")
sch.addDev(LED1)
sch.addSymbolFromDev(LED1, 0, "LED")
# 20 mA from 3.3V 3.3 - 2.1 = 1.2 V -> 60 ohm
R50 = RESSMT("R50","60","RES_LED",LibName,"RES")
sch.addDev(R50)
LEDnet = CNet("LED")
sch.addNet(LEDnet)
GNDnet.add(CNode(LED1, 'K'))
LEDnet.add(CNode(LED1, 'A'))
LEDnet.add(CNode(R50, '1'))
VDD3V3net.add(CNode(R50,'2'))
###########################################
# RS232 TxD, RxD and Reset
U4 = DCX114EU("U4","","DCX114EU_SERIAL",LibName,"DCX114EU")
sch.addDev(U4)
sch.addSymbolFromDev(U4, 2, "DCX114EU")
# Tripple diode D1 is used for SIN, D2 is used for ATN
U5 = BAS16TW("U5","","BAS16TW",LibName,"BAS16TW")
sch.addDev(U5)
sch.addSymbolFromDev(U5, 2, "BAS16TW")
##########################################################
# SIN
# U4 Q1 is pre-biased NPN with R1 and R2 it is used for RxD input SIN
# Base is conected to SIN and through diode and resistor to SOUT
# Collector is connected via resistor to VDD3V3
# Emiter is connected to GND
GNDnet.add(CNode(U4,'EQ1'))
#P2net.add(CNode(U4,'BQ1')) # 'PA2/USART2_TX/ADC_IN2/TIM2_CH3'
#SOUTnet.add(CNode(U4,'CQ1'))
# 0805 size
R41 = RESSMT("R41","10k","RES_RXD",LibName,"RES")
sch.addDev(R41)
#Connect collector with output resistor
# Q1CNET is P3
#Q1Cnet = CNet("Q1CNET")
#sch.addNet(Q1Cnet)
# Connect USART RxD
#Q1Cnet.add(CNode(U1,'PA3/USART2_RX/ADC_IN3/TIM2_CH4'))
#Q1Cnet.add(CNode(U4,'CQ1'))
#Q1Cnet.add(CNode(R41,'1'))
# Connect pull-up resistor to 3V3
VDD3V3net.add(CNode(R41,'2'))
##########################################################
# SOUT
# U4 Q2 is pre-biased PNP with R1 and R2
# it is used for TxD output SOUT
# Base is conected to TxD ouput from CPU
# Emiter is at VDD3V3
# Collector is connected to SOUT
# through resistor and diode to TxD line from RS232
# Capacitor between diode and resitor is conected to GND
VDD3V3net.add(CNode(U4, 'EQ2'))
#VDD3V3net.add(CNode(U4, 'BQ2'))
#VDD3V3net.add(CNode(U4, 'CQ2'))
##########################################################
# Diode and resistor between SIN and SOUT
# Resitsor R40 connected to SOUT
R40 = RESSMT("R40","10k","RES_BOOST",LibName,"RES")
sch.addDev(R40)
#SOUTnet.add(CNode(R40,'1'))
# Capacitor to ground
C40 = CAPSMT("C40","100nF","CAP_BOOST",LibName,"CAP")
sch.addDev(C40)
GNDnet.add(CNode(C40, '1'))
# Diode net
Dnet = CNet("DNET")
sch.addNet(Dnet)
Dnet.add(CNode(R40,'2')) # Resistor
Dnet.add(CNode(C40,'2')) # Capacitor
Dnet.add(CNode(U5,'A1')) # Diode Anode SIN
Dnet.add(CNode(U5,'A2')) # Diode Anode ATN
# Cathode is connected to SIN
#SINnet.add(CNode(U5,'C1'))
##########################################################
# Reset
U6 = DCX114EU("U6","","DCX114EU_RESET",LibName,"DCX114EU")
sch.addDev(U6)
# U6 Q1 is pre-biased NPN with R1 and R2
# it is used for Reset ATN
# Base is conected to ATN
# Collector is connected to RESET#
# Emiter is connected to GND
GNDnet.add(CNode(U6, 'EQ1'))
###########################################
# Add 40 pin module layout
MOD1=DIP40("MOD1","","DIP40",LibName,"DIP40")
sch.addDev(MOD1)
sch.addSymbolFromDev(MOD1, 2, "DIP40")
# Add Header for left side
#CON1=HEADER(20,1,"CON1","","Header 20x1")
#sch.addDev(CON1)
# Add Header for right side
#CON2=HEADER(20,1,"CON2","","Header 20x1")
#sch.addDev(CON2)
###########################################
# Power LDO
# 3.3 V LDO
# Power supply 12V
VDD12Vnet = CNet("12V")
sch.addNet(VDD12Vnet)
U2 = LD1117S33("U2","","LD1117S33",LibName,"LD1117")
sch.addDev(U2)
sch.addSymbolFromDev(U2, 2, "LD1117")
VDD12Vnet.add(CNode(U2,'IN'))
VDD3V3net.add(CNode(U2,'OUT'))
GNDnet.add(CNode(U2,'GND'))
GNDnet.add(CNode(U2,'GND2'))
# LDO ld1117 sub-circuit
#ldo = ldo_ld1117()
#ldo.add(sch,U2,VDD12Vnet,VDD3V3net,GNDnet,10,10)
# Input caps
C11 = F93_B("C11","","CAP_IN_TANT",LibName,"CAPTANT")
sch.addDev(C11)
sch.addSymbolFromDev(C11, 2, "CAPTANT")
VDD12Vnet.add(CNode(C11,'+'))
GNDnet.add(CNode(C11,'-'))
# This cap is not needed on input
#C12 = CAPSMT("C12","","cap ceramic IN")
#sch.addDev(C12)
#VDD12Vnet.add(CNode(C12,'1'))
#GNDnet.add(CNode(C12,'2'))
# Output caps
C21 = F93_B("C21","","CAP_OUT_TANT",LibName,"CAPTANT")
sch.addDev(C21)
VDD3V3net.add(CNode(C21,'+'))
GNDnet.add(CNode(C21,'-'))
C22 = CAPSMT("C22","","CAP_OUT_CER",LibName,"CAP")
sch.addDev(C22)
VDD3V3net.add(CNode(C22,'1'))
GNDnet.add(CNode(C22,'2'))
###########################################
# Add 24LCxx in SO8
# I2C memory
#U3 = EE24LCXX("U3","","EE24LCXX")
###########################################
# SPI memory
# Add M25PEXX in SO8
U3 = M25PEXX("U3","","M25PEXX",LibName,"M25PEXX")
sch.addDev(U3)
sch.addSymbolFromDev(U3, 2, "M25PEXX")
# 0805 size
# tied to 3V3 and S#
R60 = RESSMT("R60","10k","RES_3V3",LibName,"RES")
sch.addDev(R60)
# tied to PA4/SPI1_NSS/USART2_CK/ADC_IN4 and S#
R61 = RESSMT("R61","63","RES_S#",LibName,"RES")
sch.addDev(R61)
# tied to PA6/SPI1_MISO/ADC_IN6/TIM3_CH1 and Q
R62 = RESSMT("R62","62","RES_Q",LibName,"RES")
sch.addDev(R62)
# tie pins to ground and 3V3
GNDnet.add(CNode(U3, 'VSS'))
VDD3V3net.add(CNode(U3,'VCC'))
VDD3V3net.add(CNode(U3,'TSL#_W#'))
VDD3V3net.add(CNode(U3,'RESET#'))
VDD3V3net.add(CNode(R60,'2'))
# P4 net
# Connect to SPI1 PA4 through R61
#'PA4/SPI1_NSS/USART2_CK/ADC_IN4'
#SPI1_NSSnet = CNet("SPI1_NSS")
#sch.addNet(SPI1_NSSnet)
#SPI1_NSSnet.add(CNode(U1, 'PA4/SPI1_NSS/USART2_CK/ADC_IN4'))
#SPI1_NSSnet.add(CNode(R61, '1'))
SPI_Snet = CNet("SPI_S#")
sch.addNet(SPI_Snet)
SPI_Snet.add(CNode(R61, '2'))
SPI_Snet.add(CNode(U3, 'S#'))
SPI_Snet.add(CNode(R60, '1')) # pull-up to 3V3
# P5 net
# Connect to SPI1 PA5
#"PA5/SPI1_SCK/ADC_IN5"
#SPI1_SCKnet = CNet("SPI1_SCK")
#sch.addNet(SPI1_SCKnet)
#SPI1_SCKnet.add(CNode(U1, 'PA5/SPI1_SCK/ADC_IN5'))
#SPI1_SCKnet.add(CNode(U3, 'C'))
# P6 Net
# Connect to SPI1 PA6 through R62
#"PA6/SPI1_MISO/ADC_IN6/TIM3_CH1"
#SPI1_MISOnet = CNet("SPI1_MISO")
#sch.addNet(SPI1_MISOnet)
#SPI1_MISOnet.add(CNode(U1, 'PA6/SPI1_MISO/ADC_IN6/TIM3_CH1'))
#SPI1_MISOnet.add(CNode(R62, '1'))
SPI_Qnet = CNet("SPI_Q")
sch.addNet(SPI_Qnet)
SPI_Qnet.add(CNode(R62, '2'))
SPI_Qnet.add(CNode(U3, 'Q'))
# P&
#"PA7/SPI1_MOSI/ADC_IN7/TIM3_CH2"
#SPI1_MOSInet = CNet("SPI1_MOSI")
#sch.addNet(SPI1_MOSInet)
#SPI1_MOSInet.add(CNode(U1, 'PA7/SPI1_MOSI/ADC_IN7/TIM3_CH2'))
#SPI1_MOSInet.add(CNode(U3, 'D'))
###########################################
# Add FTDI connector
CON3=TTL_232R_3V3("CON3","","TTL_232R_3V3",LibName,"TTL_232R_3V3")
sch.addDev(CON3)
sch.addSymbolFromDev(CON3,1, "TTL_232R_3V3")
GNDnet.add(CNode(CON3, 'GND'))
# Jumper for 3V3 from the FTDI connector
J1=HEADER(2,1,"J1","","J1",LibName,"J1", "J1")
sch.addDev(J1)
sch.addSymbolFromDev(J1,1, "J1")
VDD3V3net.add(CNode(J1,'1'))
# Connect J1 to 3V3 from serial connector
VCC3V3net = CNet("VCC3V3")
sch.addNet(VCC3V3net)
VCC3V3net.add(CNode(J1,'2'))
VCC3V3net.add(CNode(CON3, 'VCC'))
# No connection to VDD3V3, should have jumper so it can power the chip from USB
# VDD3V3net.add(CNode(CON3, 'VCC'))
# Add connections to USART1
#"PA9/USART1_TX"
TXDnet = CNet("TXD")
sch.addNet(TXDnet)
# CPU
TXDnet.add(CNode(U1,'PA9/USART1_TX'))
# Serial header
TXDnet.add(CNode(CON3,'TXD'))
#"PA10/USART1_RX"
RXDnet = CNet("RXD")
sch.addNet(RXDnet)
# CPU
RXDnet.add(CNode(U1,'PA10/USART1_RX'))
# Serial header
RXDnet.add(CNode(CON3,'RXD'))
#"PA11/USART1_CTS"
CTSnet = CNet("CTS")
sch.addNet(CTSnet)
# CPU
CTSnet.add(CNode(U1,'PA11/USART1_CTS'))
# Serial header
CTSnet.add(CNode(CON3,'CTS#'))
#"PA12/USART1_RTS"
RTSnet = CNet("RTS")
sch.addNet(RTSnet)
# CPU
RTSnet.add(CNode(U1,'PA12/USART1_RTS'))
# Serial header
RTSnet.add(CNode(CON3,'RTS#'))
##################################################################################################
# Connection to the DIP40 pin
# Left side
# Pin 1 SOUT
# Serial Out
SOUTnet = CNet("SOUT")
sch.addNet(SOUTnet)
SOUTnet.add(CNode(MOD1,'SOUT'))
# CPU - can not connect directly
# PA2/USART2_TX/ADC_IN2/TIM2_CH3
# Serial header
# Serial Interface - Resistor R40
SOUTnet.add(CNode(R40,'1'))
SOUTnet.add(CNode(U4,'CQ2'))
# Pin 2 SIN
# Serial IN
SINnet = CNet("SIN")
sch.addNet(SINnet)
SINnet.add(CNode(MOD1,'SIN'))
# CPU - can not connect directly
# PA3/USART2_RX/ADC_IN3/TIM2_CH4
# Serial Interface - Base Q1
SINnet.add(CNode(U4,'BQ1'))
# Cathode U5 D1
SINnet.add(CNode(U5,'C1'))
# Pin 3 ATN
ATNnet = CNet("ATN")
sch.addNet(ATNnet)
ATNnet.add(CNode(MOD1,'ATN'))
# Serial header
###ATNnet.add(CNode(CON3,'RTS#'))
# Reset Interface - Base Q1
ATNnet.add(CNode(U6,'BQ1'))
# Cathode U5 D2
ATNnet.add(CNode(U5,'C2'))
# Pin 4 VSS_1
# Ground GND
GNDnet.add(CNode(MOD1,'VSS_1'))
#################################################################
# Connect by closest pin
# Pin 5 P0 - 10 RB0 GPIO; comparator output;MIWU/Interrupt input
# IO AN0 - Analog input 0
P0net = CNet("P0")
sch.addNet(P0net)
P0net.add(CNode(MOD1,'P0'))
# CPU
P0net.add(CNode(U1,'PA0-WKUP/USART2_CTS/ADC_IN0/TIM2_CH1_ETR'))
# Pin 6 P1 - 11 RB1 GPIO; comparator negative input;MIWU/Interrupt input
# IO AN1 - Analog input 1
P1net = CNet("P1")
sch.addNet(P1net)
P1net.add(CNode(MOD1,'P1'))
# CPU
P1net.add(CNode(U1,'PA1/USART2_RTS/ADC_IN1/TIM2_CH2'))
# Pin 7 P2 - 12 RB2 GPIO; comparator positive input;MIWU/Interrupt input
# IO AN2 - Analog input 2
P2net = CNet("P2")
sch.addNet(P2net)
P2net.add(CNode(MOD1,'P2'))
# CPU
P2net.add(CNode(U1,'PA2/USART2_TX/ADC_IN2/TIM2_CH3'))
# Serial interface
P2net.add(CNode(U4,'BQ2')) # 'PA2/USART2_TX/ADC_IN2/TIM2_CH3'
# Pin 8 P3 - 13 RB3 GPIO; MIWU/Interrupt input
# IO AN3 - Analog input 3
P3net = CNet("P3")
sch.addNet(P3net)
P3net.add(CNode(MOD1,'P3'))
# CPU
P3net.add(CNode(U1,'PA3/USART2_RX/ADC_IN3/TIM2_CH4'))
# Serial interface
P3net.add(CNode(U4,'CQ1'))
P3net.add(CNode(R41,'1'))
# Pin 9 P4 - 14 RB4 GPIO; MIWU/Interrupt input; Timer T1 Capture Input 1
P4net = CNet("P4")
sch.addNet(P4net)
P4net.add(CNode(MOD1,'P4'))
# CPU
P4net.add(CNode(U1,'PA4/SPI1_NSS/USART2_CK/ADC_IN4'))
# SPI NSS
P4net.add(CNode(R61, '1'))
# Pin 10 P5 - 15 RB5 GPIO; MIWU/Interrupt input; Timer T1 Capture Input 2
P5net = CNet("P5")
sch.addNet(P5net)
P5net.add(CNode(MOD1,'P5'))
# CPU
P5net.add(CNode(U1,'PA5/SPI1_SCK/ADC_IN5'))
# SPI SCK
P5net.add(CNode(U3, 'C'))
# Pin 11 P6 - 16 RB6 GPIO; MIWU/Interrupt input; Timer T1 PWM/Compare Output
P6net = CNet("P6")
sch.addNet(P6net)
P6net.add(CNode(MOD1,'P6'))
# CPU
P6net.add(CNode(U1,'PA6/SPI1_MISO/ADC_IN6/TIM3_CH1'))
# SPI MISO
P6net.add(CNode(R62, '1'))
# Pin 12 P7 - RB7 GPIO; MIWU/Interrupt input; Timer T1 External Event Counter Input
P7net = CNet("P7")
sch.addNet(P7net)
P7net.add(CNode(MOD1,'P7'))
# CPU
P7net.add(CNode(U1,'PA7/SPI1_MOSI/ADC_IN7/TIM3_CH2'))
# SPI
P7net.add(CNode(U3, 'D'))
# Pin 13 P8 - 20 RC0 GPIO;Timer T2 Capture Input 1
P8net = CNet("P8")
sch.addNet(P8net)
P8net.add(CNode(MOD1,'P8'))
# CPU
P8net.add(CNode(U1,'PB0/ADC_IN8/TIM3_CH3'))
# Pin 14 P9 - 21 RC1 GPIO;Timer T2 Capture Input 2
P9net = CNet("P9")
sch.addNet(P9net)
P9net.add(CNode(MOD1,'P9'))
# CPU
P9net.add(CNode(U1,'PB1/ADC_IN9/TIM3_CH4'))
# Pin 15 P10 - 22 RC2 GPIO;Timer T2 PWM compare output
P10net = CNet("P10")
sch.addNet(P10net)
P10net.add(CNode(MOD1,'P10'))
# CPU
P10net.add(CNode(U1,'PB2/BOOT1'))
# Pin 16 P11 - 23 RC3 GPIO;Timer T2 External Event Counter Input
P11net = CNet("P11")
sch.addNet(P11net)
P11net.add(CNode(MOD1,'P11'))
# CPU
P11net.add(CNode(U1,'PB10/I2C2_SCL/USART3_TX'))
# Pin 17 P12 - 24 RC4 GPIO
P12net = CNet("P12")
sch.addNet(P12net)
P12net.add(CNode(MOD1,'P12'))
# CPU
P12net.add(CNode(U1,'PB11/I2C2_SDA/USART3_RX'))
# Pin 18 P13 - 25 RC5 GPIO
P13net = CNet("P13")
sch.addNet(P13net)
P13net.add(CNode(MOD1,'P13'))
# CPU
P13net.add(CNode(U1,'PB12/SPI2_NSS/I2C2_SMBAl/USART3_CK'))
# Pin 19 P14 - 26 RC6 GPIO
P14net = CNet("P14")
sch.addNet(P14net)
P14net.add(CNode(MOD1,'P14'))
# CPU
P14net.add(CNode(U1,'PB13/SPI2_SCK/USART3_CTS'))
# Pin 20 P15 - 27 RC7 GPIO
P15net = CNet("P15")
sch.addNet(P15net)
P15net.add(CNode(MOD1,'P15'))
# CPU
P15net.add(CNode(U1,'PB14/SPI2_MISO/USART3_RTS'))
# Right Side
# Pin 21 X0 - 28 RD0 GPIO
X0net = CNet("X0")
sch.addNet(X0net)
X0net.add(CNode(MOD1,'X0'))
# CPU
X0net.add(CNode(U1,'PB15/SPI2_MOSI'))
# Pin 22 X1 - 29 RD1 GPIO
X1net = CNet("X1")
sch.addNet(X1net)
X1net.add(CNode(MOD1,'X1'))
# CPU
X1net.add(CNode(U1,'PA8/USART1_CK/MCO'))
# Pin 23 X2 - 30 RD2 GPIO
X2net = CNet("X2")
sch.addNet(X2net)
X2net.add(CNode(MOD1,'X2'))
# CPU
X2net.add(CNode(U1,'PA13/JTMS/SWDIO'))
# Pin 24 X3 - 31 RD3 GPIO
X3net = CNet("X3")
sch.addNet(X3net)
X3net.add(CNode(MOD1,'X3'))
# CPU
X3net.add(CNode(U1,'PA14/JTCK/SWCLK'))
# Pin 25 X4 - 34 RD4 GPIO
X4net = CNet("X4")
sch.addNet(X4net)
X4net.add(CNode(MOD1,'X4'))
# CPU
X4net.add(CNode(U1,'PA15/JTDI'))
# Pin 26 X5 - 35 RD5 GPIO
X5net = CNet("X5")
sch.addNet(X5net)
X5net.add(CNode(MOD1,'X5'))
# CPU
X5net.add(CNode(U1,'PB3/JTDO/TRACESWO'))
# Pin 27 X6 - 36 RD6 GPIO
X6net = CNet("X6")
sch.addNet(X6net)
X6net.add(CNode(MOD1,'X6'))
# CPU
#X6net.add(CNode(U1,'PA13/JTMS/SWDIO'))
X6net.add(CNode(U1,'PB4/JNTRST'))
# Pin 28 X7 - 37 RD7 GPIO
X7net = CNet("X7")
sch.addNet(X7net)
X7net.add(CNode(MOD1,'X7'))
# CPU
#X7net.add(CNode(U1,'PA14/JTCK/SWCLK'))
X7net.add(CNode(U1,'PB5/I2C1_SMBAl'))
# Pin 29 X8 - 38 RE0 GPIO
X8net = CNet("X8")
sch.addNet(X8net)
X8net.add(CNode(MOD1,'X8'))
# CPU
#X8net.add(CNode(U1,'PA15/JTDI'))
X8net.add(CNode(U1,'PB6/I2C1_SCL/TIM4_CH1'))
# Pin 30 X9 - 39 RE1 GPIO
X9net = CNet("X9")
sch.addNet(X9net)
X9net.add(CNode(MOD1,'X9'))
# CPU
#X9net.add(CNode(U1,'PB3/JTDO/TRACESWO'))
X9net.add(CNode(U1,'PB7/I2C1_SDA/TIM4_CH2'))
# Pin 31 X10 - 40 RE2 GPIO
X10net = CNet("X10")
sch.addNet(X10net)
X10net.add(CNode(MOD1,'X10'))
# CPU
#X10net.add(CNode(U1,'PB4/JNTRST'))
# CPU boot pin control, 0 - User Flash 1 - System Memory as long as BOOT2 is tied low
X10net.add(CNode(U1,'BOOT0'))
# Pin 32 X11 - 41 RE3 GPIO
X11net = CNet("X11")
sch.addNet(X11net)
X11net.add(CNode(MOD1,'X11'))
# CPU
#X11net.add(CNode(U1,'PB5/I2C1_SMBAl'))
X11net.add(CNode(U1,'PB8/TIM4_CH3'))
# Pin 33 X12 - 42 RE4 GPIO
X12net = CNet("X12")
sch.addNet(X12net)
X12net.add(CNode(MOD1,'X12'))
# CPU
#X12net.add(CNode(U1,'PB6/I2C1_SCL/TIM4_CH1'))
X12net.add(CNode(U1,'PB9/TIM4_CH4'))
# Pin 34 X13 - 43 RE5 GPIO
X13net = CNet("X13")
sch.addNet(X13net)
X13net.add(CNode(MOD1,'X13'))
# CPU
#X13net.add(CNode(U1,'PB7/I2C1_SDA/TIM4_CH2'))
X13net.add(CNode(U1,'PC13-ANTI_TAMP'))
# Pin 35 X14 - 44 RE6 GPIO
X14net = CNet("X14")
sch.addNet(X14net)
X14net.add(CNode(MOD1,'X14'))
# CPU
#X14net.add(CNode(U1,'PB8/TIM4_CH3'))
X14net.add(CNode(U1,'PC14-OSC32_IN'))
# Pin 36 X15 - 45 RE7 GPIO
X15net = CNet("X15")
sch.addNet(X15net)
X15net.add(CNode(MOD1,'X15'))
# CPU
#X15net.add(CNode(U1,'PB9/TIM4_CH4'))
X15net.add(CNode(U1,'PC15-OSC32_OUT'))
# Pin 37 VDD
# VDD3V3net
VDD3V3net.add(CNode(MOD1,'VDD'))
# Pin 38 RES#
# Reset
RESETnet = CNet("RESET#")
sch.addNet(RESETnet)
RESETnet.add(CNode(MOD1,'RESET#'))
# CPU
RESETnet.add(CNode(U1,'NRST'))
# Reset circuit C30 and R30
RESETnet.add(CNode(C30,'2'))
RESETnet.add(CNode(R30,'1'))
# Reset Interface - Collector Q2
RESETnet.add(CNode(U6, 'CQ1'))
# Pin 39 VSS_2
# Ground GND
GNDnet.add(CNode(MOD1,'VSS_2'))
# Pin 40 VIN
# VDD12Vnet
VDD12Vnet.add(CNode(MOD1,'VIN'))
# LDO
VDD12Vnet.add(CNode(U2,'IN'))
# SWD Connector
# 1 - GND
# 2 - VCC
# 3 - PA14 - "PA14/JTCK/SWCLK"
# 4 - # RESET
# 5 - PA13 - "PA13/JTMS/SWDIO" pull-up 100k on the board recommended by ARM
# 6 - PB3 - "PB3/JTDO/TRACESWO"
SWDCON=SW_DEBUG_CON("SWDCON","","SW_DEBUG_CON",LibName,"SWDCON")
sch.addDev(SWDCON)
sch.addSymbolFromDev(SWDCON, 2, "SWDCON")
# 1 - GND
GNDnet.add(CNode(SWDCON, 'GND'))
# 2 - VCC
VDD3V3net.add(CNode(SWDCON, 'VCC'))
# 3- PA14 - "PA14/JTCK/SWCLK"
X3net.add(CNode(SWDCON,'SWCLK'))
# 4 - RESET#
RESETnet.add(CNode(SWDCON,'RESET#'))
# 5 - PA13 - "PA13/JTMS/SWDIO"
X2net.add(CNode(SWDCON,'SWDIO'))
# 6 - PB3 - "PB3/JTDO/TRACESWO"
X5net.add(CNode(SWDCON, 'TRACESWO'))
def make(dname="stm32dip40", lname="stm32dip40"):
# Schematic
DesignName = dname
LibName = lname
# Create schematic
sch = CSchematic()
# Simple rectangle
sizex = 7.0
sizey = 6.0
sch.outline.append(Point(0,0))
sch.outline.append(Point(inch2sch(sizex),0))
sch.outline.append(Point(inch2sch(sizex),inch2sch(sizey)))
sch.outline.append(Point(0,inch2sch(sizey)))
sch.bbox=Rectangle(0, 0, inch2sch(sizex), inch2sch(sizey)) # shoul be calculated automatically
gen_schematic(sch, DesignName, LibName)
return sch
if __name__ == "__main__":
sch = make('stm32dip40', 'stm32dip40')
|
32bitmicro/EDA
|
python/eda/designs/stm32dip40/stm32dip40_schematic.py
|
Python
|
bsd-3-clause
| 28,020
|
[
"CRYSTAL"
] |
3811378b6cf40cd57a99304a568c72559d545929d29a42d06be31d7dfee6a238
|
####
# This sample is published as part of the blog article at www.toptal.com/blog
# Visit www.toptal.com/blog and subscribe to our newsletter to read great posts
####
import logging
import os
from queue import Queue
from threading import Thread
from time import time
from download import setup_download_dir, get_links, download_link
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class DownloadWorker(Thread):
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
def run(self):
while True:
# Get the work from the queue and expand the tuple
directory, link = self.queue.get()
try:
download_link(directory, link)
finally:
self.queue.task_done()
def main():
ts = time()
client_id = os.getenv('IMGUR_CLIENT_ID')
if not client_id:
raise Exception("Couldn't find IMGUR_CLIENT_ID environment variable!")
download_dir = setup_download_dir()
links = get_links(client_id)
# Create a queue to communicate with the worker threads
queue = Queue()
# Create 8 worker threads
for x in range(8):
worker = DownloadWorker(queue)
# Setting daemon to True will let the main thread exit even though the workers are blocking
worker.daemon = True
worker.start()
# Put the tasks into the queue as a tuple
for link in links:
logger.info('Queueing {}'.format(link))
queue.put((download_dir, link))
# Causes the main thread to wait for the queue to finish processing all the tasks
queue.join()
logging.info('Took %s', time() - ts)
if __name__ == '__main__':
main()
|
volker48/python-concurrency
|
threading_imgur.py
|
Python
|
mit
| 1,787
|
[
"VisIt"
] |
dd6e194cd99a6774dd134e04376c22f486836b9b55276d3f89b3fcfd52374ff3
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from typing import Union, List
try:
from dataclasses import dataclass
except ImportError:
from pydantic.dataclasses import dataclass
import numpy as np
from psi4 import core
from psi4.driver import constants
from psi4.driver.p4util import solvers
from psi4.driver.p4util.exceptions import *
from psi4.driver.procrouting.response.scf_products import (TDRSCFEngine, TDUSCFEngine)
dipole = {
'name': 'Dipole polarizabilities',
'printout_labels': ['X', 'Y', 'Z'],
'mints_function': core.MintsHelper.ao_dipole,
'vector names': ['AO Mux', 'AO Muy', 'AO Muz']
}
quadrupole = {
'name': 'Quadrupole polarizabilities',
'printout_labels': ['XX', 'XY', 'XZ', 'YY', 'YZ', 'ZZ'],
'mints_function': core.MintsHelper.ao_quadrupole,
}
quadrupole['vector names'] = ["AO Quadrupole " + x for x in quadrupole["printout_labels"]]
traceless_quadrupole = {
'name': 'Traceless quadrupole polarizabilities',
'printout_labels': ['XX', 'XY', 'XZ', 'YY', 'YZ', 'ZZ'],
'mints_function': core.MintsHelper.ao_traceless_quadrupole,
}
traceless_quadrupole['vector names'] = [
"AO Traceless Quadrupole " + x for x in traceless_quadrupole["printout_labels"]
]
property_dicts = {
'DIPOLE_POLARIZABILITIES': dipole,
'QUADRUPOLE_POLARIZABILITIES': quadrupole,
'TRACELESS_QUADRUPOLE_POLARIZABILITIES': traceless_quadrupole
}
def cpscf_linear_response(wfn, *args, **kwargs):
"""
Compute the static properties from a reference wavefunction. The currently implemented properties are
- dipole polarizability
- quadrupole polarizability
Parameters
----------
wfn : psi4 wavefunction
The reference wavefunction.
args : list
The list of arguments. For each argument, such as ``dipole polarizability``, will return the corresponding
response. The user may also choose to pass a list or tuple of custom vectors.
kwargs : dict
Options that control how the response is computed. The following options are supported (with default values):
- ``conv_tol``: 1e-5
- ``max_iter``: 10
- ``print_lvl``: 2
Returns
-------
responses : list
The list of responses.
"""
mints = core.MintsHelper(wfn.basisset())
# list of dictionaries to control response calculations, count how many user-supplied vectors we have
complete_dict = []
n_user = 0
for arg in args:
# for each string keyword, append the appropriate dictionary (vide supra) to our list
if isinstance(arg, str):
ret = property_dicts.get(arg)
if ret:
complete_dict.append(ret)
else:
raise ValidationError('Do not understand {}. Abort.'.format(arg))
# the user passed a list of vectors. absorb them into a dictionary
elif isinstance(arg, tuple) or isinstance(arg, list):
complete_dict.append({
'name': 'User Vectors',
'length': len(arg),
'vectors': arg,
'vector names': ['User Vector {}_{}'.format(n_user, i) for i in range(len(arg))]
})
n_user += len(arg)
# single vector passed. stored in a dictionary as a list of length 1 (can be handled as the case above that way)
# note: the length is set to '0' to designate that it was not really passed as a list
else:
complete_dict.append({
'name': 'User Vector',
'length': 0,
'vectors': [arg],
'vector names': ['User Vector {}'.format(n_user)]
})
n_user += 1
# vectors will be passed to the cphf solver, vector_names stores the corresponding names
vectors = []
vector_names = []
# construct the list of vectors. for the keywords, fetch the appropriate tensors from MintsHelper
for prop in complete_dict:
if 'User' in prop['name']:
for name, vec in zip(prop['vector names'], prop['vectors']):
vectors.append(vec)
vector_names.append(name)
else:
tmp_vectors = prop['mints_function'](mints)
for tmp in tmp_vectors:
tmp.scale(-2.0) # RHF only
vectors.append(tmp)
vector_names.append(tmp.name)
# do we have any vectors to work with?
if len(vectors) == 0:
raise ValidationError('I have no vectors to work with. Aborting.')
# print information on module, vectors that will be used
_print_header(complete_dict, n_user)
# fetch wavefunction information
nmo = wfn.nmo()
ndocc = wfn.nalpha()
nvirt = nmo - ndocc
c_occ = wfn.Ca_subset("AO", "OCC")
c_vir = wfn.Ca_subset("AO", "VIR")
nbf = c_occ.shape[0]
# the vectors need to be in the MO basis. if they have the shape nbf x nbf, transform.
for i in range(len(vectors)):
shape = vectors[i].shape
if shape == (nbf, nbf):
vectors[i] = core.triplet(c_occ, vectors[i], c_vir, True, False, False)
# verify that this vector already has the correct shape
elif shape != (ndocc, nvirt):
raise ValidationError('ERROR: "{}" has an unrecognized shape ({}, {}). Must be either ({}, {}) or ({}, {})'.format(
vector_names[i], shape[0], shape[1], nbf, nbf, ndocc, nvirt))
# compute response vectors for each input vector
params = [kwargs.pop("conv_tol", 1.e-5), kwargs.pop("max_iter", 10), kwargs.pop("print_lvl", 2)]
responses = wfn.cphf_solve(vectors, *params)
# zip vectors, responses for easy access
vectors = {k: v for k, v in zip(vector_names, vectors)}
responses = {k: v for k, v in zip(vector_names, responses)}
# compute response values, format output
output = []
for prop in complete_dict:
# try to replicate the data structure of the input
if 'User' in prop['name']:
if prop['length'] == 0:
output.append(responses[prop['vector names'][0]])
else:
buf = []
for name in prop['vector names']:
buf.append(responses[name])
output.append(buf)
else:
names = prop['vector names']
dim = len(names)
buf = np.zeros((dim, dim))
for i, i_name in enumerate(names):
for j, j_name in enumerate(names):
buf[i, j] = -1.0 * vectors[i_name].vector_dot(responses[j_name])
output.append(buf)
_print_output(complete_dict, output)
return output
def _print_header(complete_dict, n_user):
core.print_out('\n\n ---------------------------------------------------------\n'
' {:^57}\n'.format('CPSCF Linear Response Solver') +
' {:^57}\n'.format('by Marvin Lechner and Daniel G. A. Smith') +
' ---------------------------------------------------------\n')
core.print_out('\n ==> Requested Responses <==\n\n')
for prop in complete_dict:
if 'User' not in prop['name']:
core.print_out(' {}\n'.format(prop['name']))
if n_user != 0:
core.print_out(' {} user-supplied vector(s)\n'.format(n_user))
def _print_matrix(descriptors, content, title):
length = len(descriptors)
matrix_header = ' ' + ' {:^10}' * length + '\n'
core.print_out(matrix_header.format(*descriptors))
core.print_out(' -----' + ' ----------' * length + '\n')
for i, desc in enumerate(descriptors):
core.print_out(' {:^5}'.format(desc))
for j in range(length):
core.print_out(' {:>10.5f}'.format(content[i, j]))
# Set the name
var_name = title + " " + descriptors[i] + descriptors[j]
core.set_variable(var_name, content[i, j])
core.print_out('\n')
def _print_output(complete_dict, output):
core.print_out('\n ==> Response Properties <==\n')
for i, prop in enumerate(complete_dict):
if not 'User' in prop['name']:
core.print_out('\n => {} <=\n\n'.format(prop['name']))
directions = prop['printout_labels']
var_name = prop['name'].upper().replace("IES", "Y")
_print_matrix(directions, output[i], var_name)
def _print_tdscf_header(*, r_convergence: float, guess_type: str, restricted: bool, ptype: str):
core.print_out("\n\n ---------------------------------------------------------\n"
f" {'TDSCF excitation energies':^57}\n" +
f" {'by Andrew M. James and Daniel G. A. Smith':^57}\n" +
" ---------------------------------------------------------\n")
core.print_out("\n ==> Options <==\n\n")
core.print_out(f" {'Residual threshold':<20s}: {r_convergence:.4e}\n")
core.print_out(f" {'Initial guess':20s}: {guess_type.lower()}\n")
reference = 'RHF' if restricted else 'UHF'
core.print_out(f" {'Reference':20s}: {reference}\n")
solver_type = 'Hamiltonian' if ptype == "RPA" else "Davidson"
core.print_out(f" {'Solver type':20s}: {ptype} ({solver_type})\n")
core.print_out("\n")
@dataclass
class _TDSCFResults:
E_ex_au: float
irrep_GS: str
irrep_ES: str
irrep_trans: str
edtm_length: np.ndarray
f_length: float
edtm_velocity: np.ndarray
f_velocity: float
mdtm: np.ndarray
R_length: float
R_velocity: float
spin_mult: str
R_eigvec: Union[core.Matrix, List[core.Matrix]]
L_eigvec: Union[core.Matrix, List[core.Matrix]]
def _solve_loop(wfn,
ptype,
solve_function,
states_per_irrep: List[int],
maxiter: int,
restricted: bool = True,
spin_mult: str = "singlet") -> List[_TDSCFResults]:
"""
References
----------
For the expression of the transition moments in length and velocity gauges:
- T. B. Pedersen, A. E. Hansen, "Ab Initio Calculation and Display of the
Rotary Strength Tensor in the Random Phase Approximation. Method and Model
Studies." Chem. Phys. Lett., 246, 1 (1995)
- P. J. Lestrange, F. Egidi, X. Li, "The Consequences of Improperly
Describing Oscillator Strengths beyond the Electric Dipole Approximation."
J. Chem. Phys., 143, 234103 (2015)
"""
core.print_out("\n ==> Requested Excitations <==\n\n")
for nstate, state_sym in zip(states_per_irrep, wfn.molecule().irrep_labels()):
core.print_out(f" {nstate} {spin_mult} states with {state_sym} symmetry\n")
# construct the engine
if restricted:
if spin_mult == "triplet":
engine = TDRSCFEngine(wfn, ptype=ptype.lower(), triplet=True)
else:
engine = TDRSCFEngine(wfn, ptype=ptype.lower(), triplet=False)
else:
engine = TDUSCFEngine(wfn, ptype=ptype.lower())
# collect results and compute some spectroscopic observables
mints = core.MintsHelper(wfn.basisset())
results = []
irrep_GS = wfn.molecule().irrep_labels()[engine.G_gs]
for state_sym, nstates in enumerate(states_per_irrep):
if nstates == 0:
continue
irrep_ES = wfn.molecule().irrep_labels()[state_sym]
core.print_out(f"\n\n ==> Seeking the lowest {nstates} {spin_mult} states with {irrep_ES} symmetry")
engine.reset_for_state_symm(state_sym)
guess_ = engine.generate_guess(nstates * 4)
# ret = {"eigvals": ee, "eigvecs": (rvecs, rvecs), "stats": stats} (TDA)
# ret = {"eigvals": ee, "eigvecs": (rvecs, lvecs), "stats": stats} (RPA)
ret = solve_function(engine, nstates, guess_, maxiter)
# check whether all roots converged
if not ret["stats"][-1]["done"]:
# raise error
raise TDSCFConvergenceError(maxiter, wfn, f"singlet excitations in irrep {irrep_ES}", ret["stats"][-1])
# flatten dictionary: helps with sorting by energy
# also append state symmetry to return value
for e, (R, L) in zip(ret["eigvals"], ret["eigvecs"]):
irrep_trans = wfn.molecule().irrep_labels()[engine.G_gs ^ state_sym]
# length-gauge electric dipole transition moment
edtm_length = engine.residue(R, mints.so_dipole())
# length-gauge oscillator strength
f_length = ((2 * e) / 3) * np.sum(edtm_length**2)
# velocity-gauge electric dipole transition moment
edtm_velocity = engine.residue(L, mints.so_nabla())
## velocity-gauge oscillator strength
f_velocity = (2 / (3 * e)) * np.sum(edtm_velocity**2)
# length gauge magnetic dipole transition moment
# 1/2 is the Bohr magneton in atomic units
mdtm = 0.5 * engine.residue(L, mints.so_angular_momentum())
# NOTE The signs for rotatory strengths are opposite WRT the cited paper.
# This is becasue Psi4 defines length-gauge dipole integral to include the electron charge (-1.0)
# length gauge rotatory strength
R_length = np.einsum("i,i", edtm_length, mdtm)
# velocity gauge rotatory strength
R_velocity = -np.einsum("i,i", edtm_velocity, mdtm) / e
results.append(
_TDSCFResults(e, irrep_GS, irrep_ES, irrep_trans, edtm_length, f_length, edtm_velocity, f_velocity,
mdtm, R_length, R_velocity, spin_mult, R, L))
return results
def _states_per_irrep(states, nirrep):
"""Distributes states into nirrep"""
spi = [states // nirrep] * nirrep
for i in range(states % nirrep):
spi[i] += 1
return spi
def _validate_tdscf(*, wfn, states, triplets, guess) -> None:
# validate states
if not isinstance(states, (int, list)):
raise ValidationError("TDSCF: Number of states must be either an integer or a list of integers")
# list of states per irrep given, validate it
if isinstance(states, list):
if len(states) != wfn.nirrep():
raise ValidationError(f"TDSCF: States requested ({states}) do not match number of irreps ({wfn.nirrep()})")
# do triplets?
if triplets not in ["NONE", "ALSO", "ONLY"]:
raise ValidationError(
f"TDSCF: Triplet option ({triplets}) unrecognized. Must be one of 'NONE', 'ALSO' or 'ONLY'")
restricted = wfn.same_a_b_orbs()
do_triplets = False if triplets == "NONE" else True
if (not restricted) and do_triplets:
raise ValidationError("TDSCF: Cannot compute triplets with an unrestricted reference")
# determine how many states per irrep to seek and apportion them between singlets/triplets and irreps.
# validate calculation
if restricted and wfn.functional().needs_xc() and do_triplets:
raise ValidationError("TDSCF: Restricted Vx kernel only spin-adapted for singlets")
not_lda = wfn.functional().is_gga() or wfn.functional().is_meta()
if (not restricted) and not_lda:
raise ValidationError("TDSCF: Unrestricted Kohn-Sham Vx kernel currently limited to SVWN functional")
if guess != "DENOMINATORS":
raise ValidationError(f"TDSCF: Guess type {guess} is not valid")
def tdscf_excitations(wfn,
*,
states: Union[int, List[int]],
triplets: str = "NONE",
tda: bool = False,
r_convergence: float = 1.0e-4,
maxiter: int = 60,
guess: str = "DENOMINATORS",
verbose: int = 1):
"""Compute excitations from a SCF(HF/KS) wavefunction
Parameters
-----------
wfn : :py:class:`psi4.core.Wavefunction`
The reference wavefunction
states : Union[int, List[int]]
How many roots (excited states) should the solver seek to converge?
This function accepts either an integer or a list of integers:
- The list has :math:`n_{\mathrm{irrep}}` elements and is only
acceptable if the system has symmetry. It tells the solver how many
states per irrep to calculate.
- If an integer is given _and_ the system has symmetry, the states
will be distributed among irreps.
For example, ``states = 10`` for a D2h system will compute 10 states
distributed as ``[2, 2, 1, 1, 1, 1, 1, 1]`` among irreps.
triplets : {"NONE", "ONLY", "ALSO"}
Should the solver seek to converge states of triplet symmetry?
Default is `none`: do not seek to converge triplets.
Valid options are:
- `NONE`. Do not seek to converge triplets.
- `ONLY`. Only seek to converge triplets.
- `ALSO`. Seek to converge both triplets and singlets. This choice is
only valid for restricted reference wavefunction.
The number of states given will be apportioned roughly 50-50 between
singlet and triplet states, preferring the former. For example:
given ``state = 5, triplets = "ALSO"``, the solver will seek to
converge 3 states of singlet spin symmetry and 2 of triplet spin
symmetry. When asking for ``states = [3, 3, 3, 3], triplets =
"ALSO"`` states (C2v symmetry), ``[2, 2, 2, 2]`` will be of singlet
spin symmetry and ``[1, 1, 1, 1]``` will be of triplet spin
symmetry.
tda : bool, optional.
Should the solver use the Tamm-Dancoff approximation (TDA) or the
random-phase approximation (RPA)?
Default is ``False``: use RPA.
Note that TDA is equivalent to CIS for HF references.
r_convergence : float, optional.
The convergence threshold for the norm of the residual vector.
Default: 1.0e-4
Using a tighter convergence threshold here requires tighter SCF ground
state convergence threshold. As a rule of thumb, with the SCF ground
state density converged to :math:`10^{-N}` (``D_CONVERGENGE = 1.0e-N``),
you can afford converging a corresponding TDSCF calculation to
:math:`10^{-(N-2)}`.
The default value is consistent with the default value for
``D_CONVERGENCE``.
maxiter : int, optional
Maximum number of iterations.
Default: 60
guess : str, optional.
How should the starting trial vectors be generated?
Default: `DENOMINATORS`, i.e. use orbital energy differences to generate
guess vectors.
verbose : int, optional.
How verbose should the solver be?
Default: 1
Notes
-----
The algorithm employed to solve the non-Hermitian eigenvalue problem (``tda = False``)
will fail when the SCF wavefunction has a triplet instability.
This function can be used for:
- restricted singlets: RPA or TDA, any functional
- restricted triplets: RPA or TDA, Hartree-Fock only
- unresctricted: RPA or TDA, Hartre-Fock and LDA only
Tighter convergence thresholds will require a larger iterative subspace.
The maximum size of the iterative subspace is calculated based on `r_convergence`:
max_vecs_per_root = -np.log10(r_convergence) * 50
for the default converegence threshold this gives 200 trial vectors per root and a maximum subspace size
of:
max_ss_size = max_vecs_per_root * n
where `n` are the number of roots to seek in the given irrep.
For each irrep, the algorithm will store up to `max_ss_size` trial vectors
before collapsing (restarting) the iterations from the `n` best
approximations.
"""
# validate input parameters
triplets = triplets.upper()
guess = guess.upper()
_validate_tdscf(wfn=wfn, states=states, triplets=triplets, guess=guess)
restricted = wfn.same_a_b_orbs()
# determine how many states per irrep to seek and apportion them between singlets/triplets and irreps.
singlets_per_irrep = []
triplets_per_irrep = []
if isinstance(states, list):
if triplets == "ONLY":
triplets_per_irrep = states
elif triplets == "ALSO":
singlets_per_irrep = [(s // 2) + (s % 2) for s in states]
triplets_per_irrep = [(s // 2) for s in states]
else:
singlets_per_irrep = states
else:
# total number of states given
# first distribute them among singlets and triplets, preferring the
# former then distribute them among irreps
if triplets == "ONLY":
triplets_per_irrep = _states_per_irrep(states, wfn.nirrep())
elif triplets == "ALSO":
spi = (states // 2) + (states % 2)
singlets_per_irrep = _states_per_irrep(spi, wfn.nirrep())
tpi = states - spi
triplets_per_irrep = _states_per_irrep(tpi, wfn.nirrep())
else:
singlets_per_irrep = _states_per_irrep(states, wfn.nirrep())
# tie maximum number of vectors per root to requested residual tolerance
# This gives 200 vectors per root with default tolerance
max_vecs_per_root = int(-np.log10(r_convergence) * 50)
def rpa_solver(e, n, g, m):
return solvers.hamiltonian_solver(engine=e,
nroot=n,
guess=g,
r_convergence=r_convergence,
max_ss_size=max_vecs_per_root * n,
verbose=verbose)
def tda_solver(e, n, g, m):
return solvers.davidson_solver(engine=e,
nroot=n,
guess=g,
r_convergence=r_convergence,
max_ss_size=max_vecs_per_root * n,
verbose=verbose)
# determine which solver function to use: Davidson for TDA or Hamiltonian for RPA?
if tda:
ptype = "TDA"
solve_function = tda_solver
else:
ptype = "RPA"
solve_function = rpa_solver
_print_tdscf_header(r_convergence=r_convergence, guess_type=guess, restricted=restricted, ptype=ptype)
# collect solver results into a list
_results = []
# singlets solve loop
if triplets == "NONE" or triplets == "ALSO":
res_1 = _solve_loop(wfn, ptype, solve_function, singlets_per_irrep, maxiter, restricted, "singlet")
_results.extend(res_1)
# triplets solve loop
if triplets == "ALSO" or triplets == "ONLY":
res_3 = _solve_loop(wfn, ptype, solve_function, triplets_per_irrep, maxiter, restricted, "triplet")
_results.extend(res_3)
# sort by energy
_results = sorted(_results, key=lambda x: x.E_ex_au)
core.print_out("\n{}\n".format("*"*90) +
"{}{:^70}{}\n".format("*"*10, "WARNING", "*"*10) +
"{}{:^70}{}\n".format("*"*10, "Length-gauge rotatory strengths are **NOT** gauge-origin invariant", "*"*10) +
"{}\n\n".format("*"*90)) #yapf: disable
# print results
core.print_out(" " + (" " * 20) + " " + "Excitation Energy".center(31) + f" {'Total Energy':^15}" +
"Oscillator Strength".center(31) + "Rotatory Strength".center(31) + "\n")
core.print_out(
f" {'#':^4} {'Sym: GS->ES (Trans)':^20} {'au':^15} {'eV':^15} {'au':^15} {'au (length)':^15} {'au (velocity)':^15} {'au (length)':^15} {'au (velocity)':^15}\n"
)
core.print_out(
f" {'-':->4} {'-':->20} {'-':->15} {'-':->15} {'-':->15} {'-':->15} {'-':->15} {'-':->15} {'-':->15}\n")
# collect results
solver_results = []
for i, x in enumerate(_results):
sym_descr = f"{x.irrep_GS}->{x.irrep_ES} ({1 if x.spin_mult== 'singlet' else 3} {x.irrep_trans})"
E_ex_ev = constants.conversion_factor('hartree', 'eV') * x.E_ex_au
E_tot_au = wfn.energy() + x.E_ex_au
# prepare return dictionary for this root
solver_results.append({
"EXCITATION ENERGY": x.E_ex_au,
"ELECTRIC DIPOLE TRANSITION MOMENT (LEN)": x.edtm_length,
"OSCILLATOR STRENGTH (LEN)": x.f_length,
"ELECTRIC DIPOLE TRANSITION MOMENT (VEL)": x.edtm_velocity,
"OSCILLATOR STRENGTH (VEL)": x.f_velocity,
"MAGNETIC DIPOLE TRANSITION MOMENT": x.mdtm,
"ROTATORY STRENGTH (LEN)": x.R_length,
"ROTATORY STRENGTH (VEL)": x.R_velocity,
"SYMMETRY": x.irrep_trans,
"SPIN": x.spin_mult,
"RIGHT EIGENVECTOR ALPHA": x.R_eigvec if restricted else x.R_eigvec[0],
"LEFT EIGENVECTOR ALPHA": x.L_eigvec if restricted else x.L_eigvec[0],
"RIGHT EIGENVECTOR BETA": x.R_eigvec if restricted else x.R_eigvec[1],
"LEFT EIGENVECTOR BETA": x.L_eigvec if restricted else x.L_eigvec[1],
})
# stash in psivars/wfnvars
ssuper_name = wfn.functional().name()
wfn.set_variable(f"TD-{ssuper_name} ROOT {i+1} TOTAL ENERGY - {x.irrep_ES} SYMMETRY", E_tot_au)
wfn.set_variable(f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} EXCITATION ENERGY - {x.irrep_ES} SYMMETRY", x.E_ex_au)
wfn.set_variable(f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} OSCILLATOR STRENGTH (LEN) - {x.irrep_ES} SYMMETRY",
x.f_length)
wfn.set_variable(f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} OSCILLATOR STRENGTH (VEL) - {x.irrep_ES} SYMMETRY",
x.f_velocity)
wfn.set_variable(f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} ROTATORY STRENGTH (LEN) - {x.irrep_ES} SYMMETRY",
x.R_length)
wfn.set_variable(f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} ROTATORY STRENGTH (VEL) - {x.irrep_ES} SYMMETRY",
x.R_velocity)
wfn.set_array_variable(
f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} ELECTRIC TRANSITION DIPOLE MOMENT (LEN) - {x.irrep_ES} SYMMETRY",
core.Matrix.from_array(x.edtm_length.reshape((1, 3))))
wfn.set_array_variable(
f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} ELECTRIC TRANSITION DIPOLE MOMENT (VEL) - {x.irrep_ES} SYMMETRY",
core.Matrix.from_array(x.edtm_velocity.reshape((1, 3))))
wfn.set_array_variable(
f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} MAGNETIC TRANSITION DIPOLE MOMENT - {x.irrep_ES} SYMMETRY",
core.Matrix.from_array(x.mdtm.reshape((1, 3))))
wfn.set_array_variable(
f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} RIGHT EIGENVECTOR ALPHA - {x.irrep_ES} SYMMETRY",
x.R_eigvec if restricted else x.R_eigvec[0])
wfn.set_array_variable(f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} LEFT EIGENVECTOR ALPHA - {x.irrep_ES} SYMMETRY",
x.L_eigvec if restricted else x.L_eigvec[0])
wfn.set_array_variable(f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} RIGHT EIGENVECTOR BETA - {x.irrep_ES} SYMMETRY",
x.R_eigvec if restricted else x.R_eigvec[1])
wfn.set_array_variable(f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} LEFT EIGENVECTOR ALPHA - {x.irrep_ES} SYMMETRY",
x.L_eigvec if restricted else x.L_eigvec[1])
core.print_out(
f" {i+1:^4} {sym_descr:^20} {x.E_ex_au:< 15.5f} {E_ex_ev:< 15.5f} {E_tot_au:< 15.5f} {x.f_length:< 15.4f} {x.f_velocity:< 15.4f} {x.R_length:< 15.4f} {x.R_velocity:< 15.4f}\n"
)
core.print_out("\n")
return solver_results
|
dgasmith/psi4
|
psi4/driver/procrouting/response/scf_response.py
|
Python
|
lgpl-3.0
| 28,511
|
[
"Psi4"
] |
a09bf8b59f255aad801a97884fa71e4ea8a50ca6ee75dad59553ac9bf6998163
|
# -*- coding: utf-8 -*-
############################################################################
#
# Copyright (C) 2008-2014
# Christian Kohlöffel
# Vinzenz Schulz
#
# This file is part of DXF2GCODE.
#
# DXF2GCODE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DXF2GCODE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DXF2GCODE. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################
import Core.Globals as g
from DxfImport.SplineConvert import Spline2Arcs
from Core.Point import Point
from DxfImport.Classes import PointsClass, ContourClass
import logging
logger = logging.getLogger("DxfImport.GeoentSpline")
class GeoentSpline:
def __init__(self, Nr=0, caller=None):
self.Typ = 'Spline'
self.Nr = Nr
#Initialisieren der Werte
#Initialise the values
self.Layer_Nr = 0
self.Spline_flag = []
self.degree = 1
self.Knots = []
self.Weights = []
self.CPoints = []
self.geo = []
self.length = 0.0
#Lesen der Geometrie
#Read the geometry
self.Read(caller)
#Zuweisen der Toleranz f�rs Fitting
#Assign the fitting tolerance
tol = g.config.fitting_tolerance
check = g.config.vars.Import_Parameters['spline_check']
#Umwandeln zu einem ArcSpline
#Convert to a ArcSpline
Spline2ArcsClass = Spline2Arcs(degree=self.degree, Knots=self.Knots, \
Weights=self.Weights, CPoints=self.CPoints, tol=tol, check=check)
self.geo = Spline2ArcsClass.Curve
for geo in self.geo:
self.length += geo.length
def __str__(self):
# how to print the object
s = ('\nTyp: Spline') + \
('\nNr: %i' % self.Nr) + \
('\nLayer Nr: %i' % self.Layer_Nr) + \
('\nSpline flag: %i' % self.Spline_flag) + \
('\ndegree: %i' % self.degree) + \
('\nlength: %0.3f' % self.length) + \
('\nGeo elements: %i' % len(self.geo)) + \
('\nKnots: %s' % self.Knots) + \
('\nWeights: %s' % self.Weights) + \
('\nCPoints: ')
for Point in self.CPoints:
s = s + "\n" + str(Point)
s += ('\ngeo: ')
return s
def reverse(self):
"""
reverse()
"""
self.geo.reverse()
for geo in self.geo:
geo.reverse()
def App_Cont_or_Calc_IntPts(self, cont, points, i, tol, warning):
"""
App_Cont_or_Calc_IntPts()
"""
#Hinzuf�gen falls es keine geschlossener Spline ist
#Add if it is not a closed spline
if self.CPoints[0].isintol(self.CPoints[-1], tol):
self.analyse_and_opt()
cont.append(ContourClass(len(cont), 1, [[i, 0]], self.length))
else:
points.append(PointsClass(point_nr=len(points), geo_nr=i, \
Layer_Nr=self.Layer_Nr, \
be=self.geo[0].Pa, \
en=self.geo[-1].Pe, \
be_cp=[], en_cp=[]))
return warning
def analyse_and_opt(self):
"""
analyse_and_opt()
"""
summe = 0
#Richtung in welcher der Anfang liegen soll (unten links)
#Direction of the top (lower left) ???
Popt = Point(x= -1e3, y= -1e6)
#Calculation of the alignment after Gaussian-Elling
#Positive value means CW, negative value indicates CCW
#closed polygon
for Line in self.geo:
summe += (Line.Pa.x * Line.Pe.y - Line.Pe.x * Line.Pa.y) / 2
if summe > 0.0:
self.reverse()
#Find the smallest starting point from bottom left X (Must be new loop!)
#logger.debug(self.geo)
min_distance = self.geo[0].Pa.distance(Popt)
min_geo_nr = 0
for geo_nr in range(1, len(self.geo)):
if (self.geo[geo_nr].Pa.distance(Popt) < min_distance):
min_distance = self.geo[geo_nr].Pa.distance(Popt)
min_geo_nr = geo_nr
#Order contour so the new starting point is at the beginning
self.geo = self.geo[min_geo_nr:len(self.geo)] + self.geo[0:min_geo_nr]
def Read(self, caller):
"""
Read()
"""
#Assign short name
lp = caller.line_pairs
e = lp.index_code(0, caller.start + 1)
#Assign layer
s = lp.index_code(8, caller.start + 1)
self.Layer_Nr = caller.Get_Layer_Nr(lp.line_pair[s].value)
#Spline Flap zuweisen
#Assign Spline Flap
s = lp.index_code(70, s + 1)
self.Spline_flag = int(lp.line_pair[s].value)
#Spline Ordnung zuweisen
#Spline order to assign
s = lp.index_code(71, s + 1)
self.degree = int(lp.line_pair[s].value)
#Number of CPts
st = lp.index_code(73, s + 1)
nCPts = int(lp.line_pair[s].value)
s = st
#Read the node (knot)
while 1:
#Node (knot) value
sk = lp.index_code(40, s + 1, e)
if sk == None:
break
self.Knots.append(float(lp.line_pair[sk].value))
s = sk
#Read the weights
s = st
while 1:
#Node (knot) weights
sg = lp.index_code(41, s + 1, e)
if sg == None:
break
self.Weights.append(float(lp.line_pair[sg].value))
s = sg
#Read the control points
s = st
while 1:
#X value
s = lp.index_code(10, s + 1, e)
#Wenn kein neuer Punkt mehr gefunden wurde abbrechen ...
#Cancel if no new item was detected
if s == None:
break
x = float(lp.line_pair[s].value)
#Y value
s = lp.index_code(20, s + 1, e)
y = float(lp.line_pair[s].value)
self.CPoints.append(Point(x, y))
if len(self.Weights) == 0:
for nr in range(len(self.CPoints)):
self.Weights.append(1)
caller.start = e
# print nCPts
# print len(self.Knots)
# print len(self.Weights)
# print len(self.CPoints)
# print self
def get_start_end_points(self, direction=0):
"""
get_start_end_points()
"""
if not(direction):
punkt, angle = self.geo[0].get_start_end_points(direction)
elif direction:
punkt, angle = self.geo[-1].get_start_end_points(direction)
return punkt, angle
|
oryxr/dxf2gcode
|
DxfImport/GeoentSpline.py
|
Python
|
gpl-3.0
| 7,416
|
[
"Gaussian"
] |
22525c4062f5b4bec328f9c1bc06b1cb6d0624c842bebe0c5771f0c551c2466b
|
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import base64
import json
import logging
import os
import jinja2
from google.appengine.ext import webapp, db, deferred
from google.appengine.ext.webapp import template
from mcfw.properties import azzert
from rogerthat.bizz.job.update_friends import schedule_update_a_friend_of_a_service_identity_user
from rogerthat.bizz.service import create_send_user_data_requests
from rogerthat.bizz.user import calculate_secure_url_digest
from rogerthat.dal.app import get_app_by_user, get_app_name_by_id
from rogerthat.dal.mobile import get_user_active_mobiles, get_mobile_key_by_account
from rogerthat.models import ActivationLog, App, ServiceIdentity, UserProfile, FriendServiceIdentityConnection, UserData
from rogerthat.rpc import users
from rogerthat.rpc.models import Mobile
from rogerthat.templates import get_languages_from_request
from rogerthat.templates.jinja_extensions import TranslateExtension
from rogerthat.translations import localize
from rogerthat.utils import now, xml_escape
from rogerthat.utils.app import get_human_user_from_app_user, get_app_id_from_app_user, get_app_user_tuple
from rogerthat.utils.crypto import decrypt
from rogerthat.utils.service import add_slash_default
from rogerthat.utils.transactions import run_in_xg_transaction
class UnsubscribeReminderHandler(webapp.RequestHandler):
_BASE_DIR = None
_JINJA_ENVIRONMENT = None
@staticmethod
def get_base_dir():
if not UnsubscribeReminderHandler._BASE_DIR:
UnsubscribeReminderHandler._BASE_DIR = os.path.dirname(__file__)
return UnsubscribeReminderHandler._BASE_DIR
@staticmethod
def get_jinja_environment():
if not UnsubscribeReminderHandler._JINJA_ENVIRONMENT:
UnsubscribeReminderHandler._JINJA_ENVIRONMENT = \
jinja2.Environment(loader=jinja2.FileSystemLoader([UnsubscribeReminderHandler.get_base_dir()]),
extensions=[TranslateExtension])
return UnsubscribeReminderHandler._JINJA_ENVIRONMENT
def return_error(self, reason="Invalid url received."):
path = os.path.join(self.get_base_dir(), 'error.html')
self.response.out.write(template.render(path, {"reason": reason, "hide_header": True}))
return None, None
def parse_data(self, email, data):
user = users.User(email)
data = base64.decodestring(data)
data = decrypt(user, data)
data = json.loads(data)
azzert(data["d"] == calculate_secure_url_digest(data))
return data, user
def get_user_info(self):
email = self.request.get("email", None)
data = self.request.get("data", None)
if not email or not data:
return self.return_error()
try:
data_dict, _ = self.parse_data(email, data)
except:
logging.warn("Could not decipher url!", exc_info=True)
return self.return_error()
app_user = users.User(email)
return data_dict, app_user
def get(self):
data_dict, app_user = self.get_user_info()
if not data_dict or not app_user:
return
app, user_profile = db.get([App.create_key(get_app_id_from_app_user(app_user)),
UserProfile.createKey(app_user)])
if not user_profile:
self.redirect("/")
return
mobiles = list(get_user_active_mobiles(app_user))
if mobiles:
mobile = mobiles[0]
if mobile.type in Mobile.ANDROID_TYPES:
page_type = "android"
elif mobile.type in Mobile.IOS_TYPES:
page_type = "ios"
else:
return self.return_error()
else:
mobile = None
page_type = "web"
page_type = self.request.get("page_type", page_type)
language = self.request.get("language", user_profile.language)
ActivationLog(timestamp=now(), email=app_user.email(), mobile=mobile,
description="Visit unsubscribe page %s %s" % (page_type, user_profile.language)).put()
jinja_template = self.get_jinja_environment().get_template('unsubscribe_reminder_service.html')
params = {
'name': data_dict['n'],
'app_name': get_app_by_user(app_user).name,
'hide_header': True,
'data': self.request.get("data"),
'app_email': app_user.email(),
'email': get_human_user_from_app_user(app_user).email(),
'action': data_dict['a'],
'page_type': page_type,
'language': language,
'is_city_app': app.type == App.APP_TYPE_CITY_APP
}
self.response.out.write(jinja_template.render(params))
class UnsubscribeBroadcastHandler(UnsubscribeReminderHandler):
def _un_subscribe(self, app_user, si_user, broadcast_type):
user_profile, si, fsic = db.get([UserProfile.createKey(app_user),
ServiceIdentity.keyFromUser(add_slash_default(si_user)),
FriendServiceIdentityConnection.createKey(app_user, si_user)])
logging.info('%s is unsubscribing from notifications of "%s" with type "%s".',
user_profile.name if user_profile else app_user.email(),
si.name if si else si_user.email(),
broadcast_type)
updated = False
if fsic:
if broadcast_type in fsic.enabled_broadcast_types:
fsic.enabled_broadcast_types.remove(broadcast_type)
updated = True
if broadcast_type not in fsic.disabled_broadcast_types:
fsic.disabled_broadcast_types.append(broadcast_type)
updated = True
if updated:
fsic.put()
models = db.get([UserData.createKey(fsic.friend, fsic.service_identity_user)] +
[get_mobile_key_by_account(mobile.account) for mobile in user_profile.mobiles])
user_data_model, mobiles = models[0], models[1:]
create_send_user_data_requests(mobiles, user_data_model, fsic, fsic.friend, fsic.service_identity_user)
schedule_update_a_friend_of_a_service_identity_user(fsic.service_identity_user, fsic.friend, force=True,
clear_broadcast_settings_cache=True)
else:
logging.info('%s was already unsubscribed from notifications of "%s" with type "%s".',
user_profile.name if user_profile else app_user.email(),
si.name if si else si_user.email(),
broadcast_type)
return updated, user_profile, si, fsic
def get(self):
data_dict, app_user = self.get_user_info()
if not data_dict or not app_user:
return
azzert(data_dict['a'] == "unsubscribe broadcast")
broadcast_type = data_dict['bt']
si_user = users.User(data_dict['e'])
_, user_profile, si, fsic = run_in_xg_transaction(self._un_subscribe, app_user, si_user, broadcast_type)
if fsic or not si:
message = '%s,<br><br>%s' % (xml_escape(localize(user_profile.language, u'dear_name',
name=user_profile.name)),
xml_escape(localize(user_profile.language,
u'successfully_unsubscribed_broadcast_type',
notification_type=broadcast_type,
service=si.name if si else data_dict['n'])))
else:
language = get_languages_from_request(self.request)[0]
if not user_profile:
# User already deactivated his account
human_user, app_id = get_app_user_tuple(app_user)
message = localize(language, u'account_already_deactivated',
account=human_user.email(), app_name=get_app_name_by_id(app_id))
else:
# User is not connected anymore to this service identity
message = localize(language, u'account_already_disconnected_from_service',
service_name=si.name)
jinja_template = self.get_jinja_environment().get_template('unsubscribe_broadcast_type.html')
self.response.out.write(jinja_template.render(dict(message=message)))
class UnsubscribeDeactivateHandler(UnsubscribeReminderHandler):
def get(self):
data_dict, app_user = self.get_user_info()
if not data_dict or not app_user:
return
azzert(data_dict['a'] == "unsubscribe deactivate")
app, user_profile = db.get([App.create_key(get_app_id_from_app_user(app_user)),
UserProfile.createKey(app_user)])
if not user_profile:
self.redirect("/")
return
mobiles = list(get_user_active_mobiles(app_user))
if mobiles:
mobile = mobiles[0]
if mobile.type in Mobile.ANDROID_TYPES:
page_type = "android"
elif mobile.type in Mobile.IOS_TYPES:
page_type = "ios"
else:
return self.return_error()
else:
mobile = None
page_type = "web"
page_type = self.request.get("page_type", page_type)
language = self.request.get("language", user_profile.language)
ActivationLog(timestamp=now(), email=app_user.email(), mobile=mobile,
description="Visit unsubscribe page %s %s" % (page_type, user_profile.language)).put()
jinja_template = self.get_jinja_environment().get_template('unsubscribe_deactivate.html')
params = {
'name': data_dict['n'],
'app_name': get_app_by_user(app_user).name,
'hide_header': True,
'data': self.request.get("data"),
'app_email': app_user.email(),
'email': get_human_user_from_app_user(app_user).email(),
'action': data_dict['a'],
'page_type': page_type,
'language': language,
'is_city_app': app.type == App.APP_TYPE_CITY_APP
}
self.response.out.write(jinja_template.render(params))
|
rogerthat-platform/rogerthat-backend
|
src/rogerthat/pages/unsubscribe_reminder_service.py
|
Python
|
apache-2.0
| 11,107
|
[
"VisIt"
] |
e042052277e8404abffc7b11c263d2e654c0a42a64a5a2060fff7e6a68ff533a
|
#!/usr/bin/env python
"""
voice_nav.py allows controlling a mobile base using simple speech commands.
Based on the voice_cmd_vel.py script by Michael Ferguson in the pocketsphinx ROS package.
"""
import roslib; #roslib.load_manifest('pi_speech_tutorial')
import rospy
from geometry_msgs.msg import Twist
from std_msgs.msg import String
from math import copysign
from sound_play.libsoundplay import SoundClient
class talkative:
def __init__(self):
self.rate = rospy.get_param("~rate", 5)
r = rospy.Rate(self.rate)
self.paused = False
self.voice = rospy.get_param("~voice", "voice_cmu_us_bdl_arctic_clunits")
self.wavepath = rospy.get_param("~wavepath", "")
# Create the sound client object
self.soundhandle = SoundClient()
rospy.sleep(1)
self.soundhandle.stopAll()
self.soundhandle.playWave(self.wavepath + "/R2D2a.wav")
rospy.sleep(1)
# Subscribe to the /recognizer/output topic to receive voice commands.
rospy.Subscriber('/recognizer/output', String, self.speechCb)
# A mapping from keywords to commands.
self.keywords_to_command = {'stop': ['stop', 'halt', 'abort', 'kill', 'panic', 'off', 'freeze', 'shut down', 'turn off'],
'bye': ['bye', 'cheers', 'goodbye', 'see you', 'bye'],
'cafe' : ['cafe', 'campus', 'tea', 'coffee', 'eat'],
'hello': ['hi', 'hey', 'hello'],
'help' : ['help me', 'can help', 'help'],
'name' : ['your name', 'name'],
'wash' : ['washroom', 'toilet'],
'library' : ['library', 'book', 'borrow'],
'labs' : ['labs'],
'talk': ['talk to me?', 'really talk?', 'you talk', 'you really talk?', 'talk'],
'amazing' : ['amazing', 'wonderful'],
'psychology' : ['psychology'],
'teaching' : ['teaching', 'music'],
'engineering' : ['engineering'],
'biology' : ['biology', 'english', 'chemistry'],
'maths' : ['computing', 'mathematics'],
'geo' : ['geology', 'geography'],
'marine' : ['marine'],
'art' : ['art'],
'roland' : ['reception', 'architecture'],
'business' : ['business'],
'staff' : ['staff'],
'sports' : ['sports'],
'robots' : ['robotics', 'robots'],
'visit' : ['visit', 'to do'],
'supermarket' : ['shop', 'supermarket'],
'cashpoint' : ['cash points', 'ATM', 'cash machines'],
'day' : ['day', 'today'],
'weather' : ['weather'],
'pause': ['pause speech'],
'continue': ['continue speech']}
rospy.loginfo("Ready to receive voice commands")
# We have to keep publishing the cmd_vel message if we want the robot to keep moving.
while not rospy.is_shutdown():
r.sleep()
def get_command(self, data):
for (command, keywords) in self.keywords_to_command.iteritems():
for word in keywords:
if data.find(word) > -1:
return command
def speechCb(self, msg):
command = self.get_command(msg.data)
rospy.loginfo("Command: " + str(command))
if command == 'pause':
self.paused = True
elif command == 'continue':
self.paused = False
if self.paused:
return
if command == 'hello':
self.soundhandle.say("Greetings! from your very own Charlie. How may I assist you?.", self.voice)
rospy.sleep(3)
elif command == 'bye':
self.soundhandle.say("Ok! See you soon. Have a Nice Day!. ", self.voice)
rospy.sleep(2)
if command == 'help':
self.soundhandle.say("I am a receptionist robot. I can answer your questions about the faculty. or guide you through the faculty building!.", self.voice)
if command == 'talk':
self.soundhandle.say("Oh! I am talking.. Have you seen my brother Wall E?.", self.voice)
if command == 'weather':
self.soundhandle.say("Why don't you look out of the window?!. It is probably raining now!", self.voice)
if command == 'supermarket':
self.soundhandle.say("The nearest one is the TESCO, over the road, by the Reynold's building!. ", self.voice)
if command == 'day':
self.soundhandle.say("Today is tuesday!. Oh my God! it's still 3 days to the weekend", self.voice)
if command == 'psychology':
self.soundhandle.say("The psychology department is in the link building! You can get there! from the second floor of this building.!", self.voice)
if command == 'teaching':
self.soundhandle.say("The department is in the rolle building! You can find it on the north-west of the campus.!", self.voice)
if command == 'engineering':
self.soundhandle.say("That's right here! In the Smeaton's building! There are also brunel labs behind the library.!", self.voice)
if command == 'biology':
self.soundhandle.say("The department is in the Davy building! You can find it! across from the library over the plaza.!", self.voice)
if command == 'maths':
self.soundhandle.say("The department is in the babbage building! Leave via the western exit and take right across the road.!", self.voice)
if command == 'geo':
self.soundhandle.say("The department is in the Fitzroy building! You can find it north of the library beyond the trees!", self.voice)
if command == 'marine':
self.soundhandle.say("The marine department is in the reynolds building! And also the marine building where you can find the test tank, and the bridge simulators.!. It's across the road to the west of this building", self.voice)
if command == 'art':
self.soundhandle.say("The art faculty is in the scott building! You can find it directly south of the Smeaton's building.!", self.voice)
if command == 'roland':
self.soundhandle.say("That is in the roland levinsky building! You can find it directly south of the Smeaton's building.!", self.voice)
if command == 'business':
self.soundhandle.say("I believe that's in the cookworthy building! I have no memory of this place!", self.voice)
if command == 'staff':
self.soundhandle.say("You can find various members of staff in the Portland Square building!", self.voice)
if command == 'sports':
self.soundhandle.say("The sports facility is in the Nancy Astor building! You can find it on the northern edge of campus past Portland Square.!", self.voice)
if command == 'robots':
self.soundhandle.say("You can learn about robots here in Smeaton's building or in Portland Square. !", self.voice)
if command == 'cashpoint':
self.soundhandle.say("There are cashpoints on the eastern exit of this building.! Don't forget your credit card!", self.voice)
if command == 'visit':
self.soundhandle.say("You can visit gin distillery or aquarium or go on a tour of The Sound.!", self.voice)
if command == 'name':
self.soundhandle.say("My name is CHARLIE.! CHArismatic Robot with Linguistic Interface . ", self.voice)
if command == 'amazing':
self.soundhandle.say("Not as much as you are. But thank you so much.", self.voice)
if command == 'cafe':
self.soundhandle.say("There is one S U shop right opposite to the library. you can eat! or have a coffee there!", self.voice)
if command == 'wash':
self.soundhandle.say("The Gents toilet can be found on the second floor. and a ladies toilet on the third floor.", self.voice)
if command == 'library':
self.soundhandle.say("As you will exit, out of eastern end of this building!. Take a left!. The building! in front of you! is the library!. ", self.voice)
if command == 'labs':
self.soundhandle.say("The labs are on third floor of the Smeaton's building. Infact, we might be standing in one of those. ", self.voice)
def cleanup(self):
# When shutting down
rospy.loginfo("Shutting Down...")
if __name__=="__main__":
rospy.init_node('talkative')
try:
talkative()
except:
pass
|
jdekerautem/TurtleBot-Receptionist
|
pocketsphinx_files/talkative.py
|
Python
|
mit
| 8,719
|
[
"VisIt"
] |
5fefe048e7d2edf7a695ed2b2aa990efa11cc99d83e964f971296aad35ebb23e
|
"""
Sphinx plugins for Django documentation.
"""
import json
import os
import re
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx import addnodes, __version__ as sphinx_ver
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.writers.html import SmartyPantsHTMLTranslator
from sphinx.util.console import bold
from sphinx.util.compat import Directive
from sphinx.util.nodes import set_source_info
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(
r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def setup(app):
app.add_crossref_type(
directivename="setting",
rolename="setting",
indextemplate="pair: %s; setting",
)
app.add_crossref_type(
directivename="templatetag",
rolename="ttag",
indextemplate="pair: %s; template tag"
)
app.add_crossref_type(
directivename="templatefilter",
rolename="tfilter",
indextemplate="pair: %s; template filter"
)
app.add_crossref_type(
directivename="fieldlookup",
rolename="lookup",
indextemplate="pair: %s; field lookup type",
)
app.add_description_unit(
directivename="django-admin",
rolename="djadmin",
indextemplate="pair: %s; django-admin command",
parse_node=parse_django_admin_node,
)
app.add_description_unit(
directivename="django-admin-option",
rolename="djadminopt",
indextemplate="pair: %s; django-admin command-line option",
parse_node=parse_django_adminopt_node,
)
app.add_config_value('django_next_version', '0.0', True)
app.add_directive('versionadded', VersionDirective)
app.add_directive('versionchanged', VersionDirective)
app.add_builder(DjangoStandaloneHTMLBuilder)
# register the snippet directive
app.add_directive('snippet', SnippetWithFilename)
# register a node for snippet directive so that the xml parser
# knows how to handle the enter/exit parsing event
app.add_node(snippet_with_filename,
html=(visit_snippet, depart_snippet_literal),
latex=(visit_snippet_latex, depart_snippet_latex),
man=(visit_snippet_literal, depart_snippet_literal),
text=(visit_snippet_literal, depart_snippet_literal),
texinfo=(visit_snippet_literal, depart_snippet_literal))
class snippet_with_filename(nodes.literal_block):
"""
Subclass the literal_block to override the visit/depart event handlers
"""
pass
def visit_snippet_literal(self, node):
"""
default literal block handler
"""
self.visit_literal_block(node)
def depart_snippet_literal(self, node):
"""
default literal block handler
"""
self.depart_literal_block(node)
def visit_snippet(self, node):
"""
HTML document generator visit handler
"""
lang = self.highlightlang
linenos = node.rawsource.count('\n') >= self.highlightlinenothreshold - 1
fname = node['filename']
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.highlighter.highlight_block(node.rawsource, lang,
warn=warner,
linenos=linenos,
**highlight_args)
starttag = self.starttag(node, 'div', suffix='',
CLASS='highlight-%s' % lang)
self.body.append(starttag)
self.body.append('<div class="snippet-filename">%s</div>\n''' % (fname,))
self.body.append(highlighted)
self.body.append('</div>\n')
raise nodes.SkipNode
def visit_snippet_latex(self, node):
"""
Latex document generator visit handler
"""
self.verbatim = ''
def depart_snippet_latex(self, node):
"""
Latex document generator depart handler.
"""
code = self.verbatim.rstrip('\n')
lang = self.hlsettingstack[-1][0]
linenos = code.count('\n') >= self.hlsettingstack[-1][1] - 1
fname = node['filename']
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
def warner(msg):
self.builder.warn(msg, (self.curfilestack[-1], node.line))
hlcode = self.highlighter.highlight_block(code, lang, warn=warner,
linenos=linenos,
**highlight_args)
self.body.append('\n{\\colorbox[rgb]{0.9,0.9,0.9}'
'{\\makebox[\\textwidth][l]'
'{\\small\\texttt{%s}}}}\n' % (fname,))
if self.table:
hlcode = hlcode.replace('\\begin{Verbatim}',
'\\begin{OriginalVerbatim}')
self.table.has_problematic = True
self.table.has_verbatim = True
hlcode = hlcode.rstrip()[:-14] # strip \end{Verbatim}
hlcode = hlcode.rstrip() + '\n'
self.body.append('\n' + hlcode + '\\end{%sVerbatim}\n' %
(self.table and 'Original' or ''))
self.verbatim = None
class SnippetWithFilename(Directive):
"""
The 'snippet' directive that allows to add the filename (optional)
of a code snippet in the document. This is modeled after CodeBlock.
"""
has_content = True
optional_arguments = 1
option_spec = {'filename': directives.unchanged_required}
def run(self):
code = u'\n'.join(self.content)
literal = snippet_with_filename(code, code)
if self.arguments:
literal['language'] = self.arguments[0]
literal['filename'] = self.options['filename']
set_source_info(self, literal)
return [literal]
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
if len(self.arguments) > 1:
msg = """Only one argument accepted for directive '{directive_name}::'.
Comments should be provided as content,
not as an extra argument.""".format(directive_name=self.name)
raise self.error(msg)
env = self.state.document.settings.env
ret = []
node = addnodes.versionmodified()
ret.append(node)
if self.arguments[0] == env.config.django_next_version:
node['version'] = "Development version"
else:
node['version'] = self.arguments[0]
node['type'] = self.name
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
env.note_versionchange(node['type'], node['version'], node, self.lineno)
return ret
class DjangoHTMLTranslator(SmartyPantsHTMLTranslator):
"""
Django-specific reST to HTML tweaks.
"""
# Don't use border=1, which docutils does by default.
def visit_table(self, node):
self.context.append(self.compact_p)
self.compact_p = True
self._table_row_index = 0 # Needed by Sphinx
self.body.append(self.starttag(node, 'table', CLASS='docutils'))
def depart_table(self, node):
self.compact_p = self.context.pop()
self.body.append('</table>\n')
# <big>? Really?
def visit_desc_parameterlist(self, node):
self.body.append('(')
self.first_param = 1
self.param_separator = node.child_text_separator
def depart_desc_parameterlist(self, node):
self.body.append(')')
if sphinx_ver < '1.0.8':
#
# Don't apply smartypants to literal blocks
#
def visit_literal_block(self, node):
self.no_smarty += 1
SmartyPantsHTMLTranslator.visit_literal_block(self, node)
def depart_literal_block(self, node):
SmartyPantsHTMLTranslator.depart_literal_block(self, node)
self.no_smarty -= 1
#
# Turn the "new in version" stuff (versionadded/versionchanged) into a
# better callout -- the Sphinx default is just a little span,
# which is a bit less obvious that I'd like.
#
# FIXME: these messages are all hardcoded in English. We need to change
# that to accommodate other language docs, but I can't work out how to make
# that work.
#
version_text = {
'deprecated': 'Deprecated in Django %s',
'versionchanged': 'Changed in Django %s',
'versionadded': 'New in Django %s',
}
def visit_versionmodified(self, node):
self.body.append(
self.starttag(node, 'div', CLASS=node['type'])
)
title = "%s%s" % (
self.version_text[node['type']] % node['version'],
":" if len(node) else "."
)
self.body.append('<span class="title">%s</span> ' % title)
def depart_versionmodified(self, node):
self.body.append("</div>\n")
# Give each section a unique ID -- nice for custom CSS hooks
def visit_section(self, node):
old_ids = node.get('ids', [])
node['ids'] = ['s-' + i for i in old_ids]
node['ids'].extend(old_ids)
SmartyPantsHTMLTranslator.visit_section(self, node)
node['ids'] = old_ids
def parse_django_admin_node(env, sig, signode):
command = sig.split(' ')[0]
env._django_curr_admin_command = command
title = "django-admin.py %s" % sig
signode += addnodes.desc_name(title, title)
return sig
def parse_django_adminopt_node(env, sig, signode):
"""A copy of sphinx.directives.CmdoptionDesc.parse_signature()"""
from sphinx.domains.std import option_desc_re
count = 0
firstname = ''
for m in option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not count:
for m in simple_option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not firstname:
raise ValueError
return firstname
class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder):
"""
Subclass to add some extra things we need.
"""
name = 'djangohtml'
def finish(self):
super(DjangoStandaloneHTMLBuilder, self).finish()
self.info(bold("writing templatebuiltins.js..."))
xrefs = self.env.domaindata["std"]["objects"]
templatebuiltins = {
"ttags": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatetag" and l == "ref/templates/builtins"],
"tfilters": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatefilter" and l == "ref/templates/builtins"],
}
outfilename = os.path.join(self.outdir, "templatebuiltins.js")
with open(outfilename, 'w') as fp:
fp.write('var django_template_builtins = ')
json.dump(templatebuiltins, fp)
fp.write(';\n')
|
rogerhu/django
|
docs/_ext/djangodocs.py
|
Python
|
bsd-3-clause
| 11,809
|
[
"VisIt"
] |
c0551fd5925f81303db0cbb869b3b8a7d24dda74b8f7c5d31cf1287fc4c003c7
|
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
from functools import wraps
import types
import warnings
import cf_units
import numpy as np
import iris
import iris.coords as coords
import iris.tests.stock
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import matplotlib.pyplot as plt
import iris.plot as iplt
import iris.quickplot as qplt
import iris.symbols
def simple_cube():
cube = iris.tests.stock.realistic_4d()
cube = cube[:, 0, 0, :]
cube.coord('time').guess_bounds()
return cube
@tests.skip_plot
class TestSimple(tests.GraphicsTest):
def test_points(self):
cube = simple_cube()
qplt.contourf(cube)
self.check_graphic()
def test_bounds(self):
cube = simple_cube()
qplt.pcolor(cube)
self.check_graphic()
@tests.skip_plot
class TestMissingCoord(tests.GraphicsTest):
def _check(self, cube):
qplt.contourf(cube)
self.check_graphic()
qplt.pcolor(cube)
self.check_graphic()
def test_no_u(self):
cube = simple_cube()
cube.remove_coord('grid_longitude')
self._check(cube)
def test_no_v(self):
cube = simple_cube()
cube.remove_coord('time')
self._check(cube)
def test_none(self):
cube = simple_cube()
cube.remove_coord('grid_longitude')
cube.remove_coord('time')
self._check(cube)
@tests.skip_data
@tests.skip_plot
class TestMissingCS(tests.GraphicsTest):
@tests.skip_data
def test_missing_cs(self):
cube = tests.stock.simple_pp()
cube.coord("latitude").coord_system = None
cube.coord("longitude").coord_system = None
qplt.contourf(cube)
qplt.plt.gca().coastlines()
self.check_graphic()
@tests.skip_plot
class TestHybridHeight(tests.GraphicsTest):
def setUp(self):
self.cube = iris.tests.stock.realistic_4d()[0, :15, 0, :]
def _check(self, plt_method, test_altitude=True):
plt_method(self.cube)
self.check_graphic()
plt_method(self.cube, coords=['level_height', 'grid_longitude'])
self.check_graphic()
plt_method(self.cube, coords=['grid_longitude', 'level_height'])
self.check_graphic()
if test_altitude:
plt_method(self.cube, coords=['grid_longitude', 'altitude'])
self.check_graphic()
plt_method(self.cube, coords=['altitude', 'grid_longitude'])
self.check_graphic()
def test_points(self):
self._check(qplt.contourf)
def test_bounds(self):
self._check(qplt.pcolor, test_altitude=False)
def test_orography(self):
qplt.contourf(self.cube)
iplt.orography_at_points(self.cube)
iplt.points(self.cube)
self.check_graphic()
coords = ['altitude', 'grid_longitude']
qplt.contourf(self.cube, coords=coords)
iplt.orography_at_points(self.cube, coords=coords)
iplt.points(self.cube, coords=coords)
self.check_graphic()
# TODO: Test bounds once they are supported.
with self.assertRaises(NotImplementedError):
qplt.pcolor(self.cube)
iplt.orography_at_bounds(self.cube)
iplt.outline(self.cube)
self.check_graphic()
@tests.skip_plot
class Test1dPlotMultiArgs(tests.GraphicsTest):
# tests for iris.plot using multi-argument calling convention
def setUp(self):
self.cube1d = _load_4d_testcube()[0, :, 0, 0]
self.draw_method = iplt.plot
def test_cube(self):
# just plot a cube against its dim coord
self.draw_method(self.cube1d) # altitude vs temp
self.check_graphic()
def test_coord(self):
# plot the altitude coordinate
self.draw_method(self.cube1d.coord('altitude'))
self.check_graphic()
def test_coord_cube(self):
# plot temperature against sigma
self.draw_method(self.cube1d.coord('sigma'), self.cube1d)
self.check_graphic()
def test_cube_coord(self):
# plot a vertical profile of temperature
self.draw_method(self.cube1d, self.cube1d.coord('altitude'))
self.check_graphic()
def test_coord_coord(self):
# plot two coordinates that are not mappable
self.draw_method(self.cube1d.coord('sigma'),
self.cube1d.coord('altitude'))
self.check_graphic()
def test_coord_coord_map(self):
# plot lat-lon aux coordinates of a trajectory, which draws a map
lon = iris.coords.AuxCoord([0, 5, 10, 15, 20, 25, 30, 35, 40, 45],
standard_name='longitude',
units='degrees_north')
lat = iris.coords.AuxCoord([45, 55, 50, 60, 55, 65, 60, 70, 65, 75],
standard_name='latitude',
units='degrees_north')
self.draw_method(lon, lat)
plt.gca().coastlines()
self.check_graphic()
def test_cube_cube(self):
# plot two phenomena against eachother, in this case just dummy data
cube1 = self.cube1d.copy()
cube2 = self.cube1d.copy()
cube1.rename('some phenomenon')
cube2.rename('some other phenomenon')
cube1.units = cf_units.Unit('no_unit')
cube2.units = cf_units.Unit('no_unit')
cube1.data[:] = np.linspace(0, 1, 7)
cube2.data[:] = np.exp(cube1.data)
self.draw_method(cube1, cube2)
self.check_graphic()
def test_incompatible_objects(self):
# incompatible objects (not the same length) should raise an error
with self.assertRaises(ValueError):
self.draw_method(self.cube1d.coord('time'), (self.cube1d))
def test_multimidmensional(self):
# multidimensional cubes are not allowed
cube = _load_4d_testcube()[0, :, :, 0]
with self.assertRaises(ValueError):
self.draw_method(cube)
def test_not_cube_or_coord(self):
# inputs must be cubes or coordinates, otherwise an error should be
# raised
xdim = np.arange(self.cube1d.shape[0])
with self.assertRaises(TypeError):
self.draw_method(xdim, self.cube1d)
def test_plot_old_coords_kwarg(self):
# Coords used to be a valid kwarg to plot, but it was deprecated and
# we are maintaining a reasonable exception, check that it is raised
# here.
with self.assertRaises(TypeError):
self.draw_method(self.cube1d, coords=None)
@tests.skip_plot
class Test1dQuickplotPlotMultiArgs(Test1dPlotMultiArgs):
# tests for iris.plot using multi-argument calling convention
def setUp(self):
self.cube1d = _load_4d_testcube()[0, :, 0, 0]
self.draw_method = qplt.plot
@tests.skip_data
@tests.skip_plot
class Test1dScatter(tests.GraphicsTest):
def setUp(self):
self.cube = iris.load_cube(
tests.get_data_path(('NAME', 'NAMEIII_trajectory.txt')),
'Temperature')
self.draw_method = iplt.scatter
def test_coord_coord(self):
x = self.cube.coord('longitude')
y = self.cube.coord('altitude')
c = self.cube.data
self.draw_method(x, y, c=c, edgecolor='none')
self.check_graphic()
def test_coord_coord_map(self):
x = self.cube.coord('longitude')
y = self.cube.coord('latitude')
c = self.cube.data
self.draw_method(x, y, c=c, edgecolor='none')
plt.gca().coastlines()
self.check_graphic()
def test_coord_cube(self):
x = self.cube.coord('latitude')
y = self.cube
c = self.cube.coord('Travel Time').points
self.draw_method(x, y, c=c, edgecolor='none')
self.check_graphic()
def test_cube_coord(self):
x = self.cube
y = self.cube.coord('altitude')
c = self.cube.coord('Travel Time').points
self.draw_method(x, y, c=c, edgecolor='none')
self.check_graphic()
def test_cube_cube(self):
x = iris.load_cube(
tests.get_data_path(('NAME', 'NAMEIII_trajectory.txt')),
'Rel Humidity')
y = self.cube
c = self.cube.coord('Travel Time').points
self.draw_method(x, y, c=c, edgecolor='none')
self.check_graphic()
def test_incompatible_objects(self):
# cubes/coordinates of different sizes cannot be plotted
x = self.cube
y = self.cube.coord('altitude')[:-1]
with self.assertRaises(ValueError):
self.draw_method(x, y)
def test_multidimensional(self):
# multidimensional cubes/coordinates are not allowed
x = _load_4d_testcube()[0, :, :, 0]
y = x.coord('model_level_number')
with self.assertRaises(ValueError):
self.draw_method(x, y)
def test_not_cube_or_coord(self):
# inputs must be cubes or coordinates
x = np.arange(self.cube.shape[0])
y = self.cube
with self.assertRaises(TypeError):
self.draw_method(x, y)
@tests.skip_data
@tests.skip_plot
class Test1dQuickplotScatter(Test1dScatter):
def setUp(self):
self.cube = iris.load_cube(
tests.get_data_path(('NAME', 'NAMEIII_trajectory.txt')),
'Temperature')
self.draw_method = qplt.scatter
@tests.skip_data
@tests.skip_plot
class TestAttributePositive(tests.GraphicsTest):
def test_1d_positive_up(self):
path = tests.get_data_path(('NetCDF', 'ORCA2', 'votemper.nc'))
cube = iris.load_cube(path)
qplt.plot(cube.coord('depth'), cube[0, :, 60, 80])
self.check_graphic()
def test_1d_positive_down(self):
path = tests.get_data_path(('NetCDF', 'ORCA2', 'votemper.nc'))
cube = iris.load_cube(path)
qplt.plot(cube[0, :, 60, 80], cube.coord('depth'))
self.check_graphic()
def test_2d_positive_up(self):
path = tests.get_data_path(('NetCDF', 'testing',
'small_theta_colpex.nc'))
cube = iris.load_cube(path)[0, :, 42, :]
qplt.pcolormesh(cube)
self.check_graphic()
def test_2d_positive_down(self):
path = tests.get_data_path(('NetCDF', 'ORCA2', 'votemper.nc'))
cube = iris.load_cube(path)[0, :, 42, :]
qplt.pcolormesh(cube)
self.check_graphic()
# Caches _load_4d_testcube so subsequent calls are faster
def cache(fn, cache={}):
def inner(*args, **kwargs):
key = fn.__name__
if key not in cache:
cache[key] = fn(*args, **kwargs)
return cache[key]
return inner
@cache
def _load_4d_testcube():
# Load example 4d data (TZYX).
test_cube = iris.tests.stock.realistic_4d()
# Replace forecast_period coord with a multi-valued version.
time_coord = test_cube.coord('time')
n_times = len(time_coord.points)
forecast_dims = test_cube.coord_dims(time_coord)
test_cube.remove_coord('forecast_period')
# Make up values (including bounds), to roughly match older testdata.
point_values = np.linspace((1 + 1.0 / 6), 2.0, n_times)
point_uppers = point_values + (point_values[1] - point_values[0])
bound_values = np.column_stack([point_values, point_uppers])
# NOTE: this must be a DimCoord
# - an equivalent AuxCoord produces different plots.
new_forecast_coord = iris.coords.DimCoord(
points=point_values,
bounds=bound_values,
standard_name='forecast_period',
units=cf_units.Unit('hours')
)
test_cube.add_aux_coord(new_forecast_coord, forecast_dims)
# Heavily reduce dimensions for faster testing.
# NOTE: this makes ZYX non-contiguous. Doesn't seem to matter for now.
test_cube = test_cube[:, ::10, ::10, ::10]
return test_cube
@cache
def _load_wind_no_bounds():
# Load the COLPEX data => TZYX
path = tests.get_data_path(('PP', 'COLPEX', 'small_eastward_wind.pp'))
wind = iris.load_cube(path, 'x_wind')
# Remove bounds from all coords that have them.
wind.coord('grid_latitude').bounds = None
wind.coord('grid_longitude').bounds = None
wind.coord('level_height').bounds = None
wind.coord('sigma').bounds = None
return wind[:, :, :50, :50]
def _time_series(src_cube):
# Until we have plotting support for multiple axes on the same dimension,
# remove the time coordinate and its axis.
cube = src_cube.copy()
cube.remove_coord('time')
return cube
def _date_series(src_cube):
# Until we have plotting support for multiple axes on the same dimension,
# remove the forecast_period coordinate and its axis.
cube = src_cube.copy()
cube.remove_coord('forecast_period')
return cube
@tests.skip_plot
class SliceMixin(object):
"""Mixin class providing tests for each 2-dimensional permutation of axes.
Requires self.draw_method to be the relevant plotting function,
and self.results to be a dictionary containing the desired test results."""
def test_yx(self):
cube = self.wind[0, 0, :, :]
self.draw_method(cube)
self.check_graphic()
def test_zx(self):
cube = self.wind[0, :, 0, :]
self.draw_method(cube)
self.check_graphic()
def test_tx(self):
cube = _time_series(self.wind[:, 0, 0, :])
self.draw_method(cube)
self.check_graphic()
def test_zy(self):
cube = self.wind[0, :, :, 0]
self.draw_method(cube)
self.check_graphic()
def test_ty(self):
cube = _time_series(self.wind[:, 0, :, 0])
self.draw_method(cube)
self.check_graphic()
def test_tz(self):
cube = _time_series(self.wind[:, :, 0, 0])
self.draw_method(cube)
self.check_graphic()
@tests.skip_data
class TestContour(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.contour routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = iplt.contour
@tests.skip_data
class TestContourf(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.contourf routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = iplt.contourf
@tests.skip_data
class TestPcolor(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.pcolor routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = iplt.pcolor
@tests.skip_data
class TestPcolormesh(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.pcolormesh routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = iplt.pcolormesh
def check_warnings(method):
"""
Decorator that adds a catch_warnings and filter to assert
the method being decorated issues a UserWarning.
"""
@wraps(method)
def decorated_method(self, *args, **kwargs):
# Force reset of iris.coords warnings registry to avoid suppression of
# repeated warnings. warnings.resetwarnings() does not do this.
if hasattr(coords, '__warningregistry__'):
coords.__warningregistry__.clear()
# Check that method raises warning.
with warnings.catch_warnings():
warnings.simplefilter("error")
with self.assertRaises(UserWarning):
return method(self, *args, **kwargs)
return decorated_method
def ignore_warnings(method):
"""
Decorator that adds a catch_warnings and filter to suppress
any warnings issues by the method being decorated.
"""
@wraps(method)
def decorated_method(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return method(self, *args, **kwargs)
return decorated_method
class CheckForWarningsMetaclass(type):
"""
Metaclass that adds a further test for each base class test
that checks that each test raises a UserWarning. Each base
class test is then overriden to ignore warnings in order to
check the underlying functionality.
"""
def __new__(cls, name, bases, local):
def add_decorated_methods(attr_dict, target_dict, decorator):
for key, value in attr_dict.items():
if (isinstance(value, types.FunctionType) and
key.startswith('test')):
new_key = '_'.join((key, decorator.__name__))
if new_key not in target_dict:
wrapped = decorator(value)
wrapped.__name__ = new_key
target_dict[new_key] = wrapped
else:
raise RuntimeError('A attribute called {!r} '
'already exists.'.format(new_key))
def override_with_decorated_methods(attr_dict, target_dict,
decorator):
for key, value in attr_dict.items():
if (isinstance(value, types.FunctionType) and
key.startswith('test')):
target_dict[key] = decorator(value)
# Add decorated versions of base methods
# to check for warnings.
for base in bases:
add_decorated_methods(base.__dict__, local, check_warnings)
# Override base methods to ignore warnings.
for base in bases:
override_with_decorated_methods(base.__dict__, local,
ignore_warnings)
return type.__new__(cls, name, bases, local)
@tests.skip_data
class TestPcolorNoBounds(six.with_metaclass(CheckForWarningsMetaclass,
tests.GraphicsTest, SliceMixin)):
"""
Test the iris.plot.pcolor routine on a cube with coordinates
that have no bounds.
"""
def setUp(self):
self.wind = _load_wind_no_bounds()
self.draw_method = iplt.pcolor
@tests.skip_data
class TestPcolormeshNoBounds(six.with_metaclass(CheckForWarningsMetaclass,
tests.GraphicsTest,
SliceMixin)):
"""
Test the iris.plot.pcolormesh routine on a cube with coordinates
that have no bounds.
"""
def setUp(self):
self.wind = _load_wind_no_bounds()
self.draw_method = iplt.pcolormesh
@tests.skip_plot
class Slice1dMixin(object):
"""Mixin class providing tests for each 1-dimensional permutation of axes.
Requires self.draw_method to be the relevant plotting function,
and self.results to be a dictionary containing the desired test results."""
def test_x(self):
cube = self.wind[0, 0, 0, :]
self.draw_method(cube)
self.check_graphic()
def test_y(self):
cube = self.wind[0, 0, :, 0]
self.draw_method(cube)
self.check_graphic()
def test_z(self):
cube = self.wind[0, :, 0, 0]
self.draw_method(cube)
self.check_graphic()
def test_t(self):
cube = _time_series(self.wind[:, 0, 0, 0])
self.draw_method(cube)
self.check_graphic()
def test_t_dates(self):
cube = _date_series(self.wind[:, 0, 0, 0])
self.draw_method(cube)
plt.gcf().autofmt_xdate()
plt.xlabel('Phenomenon time')
self.check_graphic()
@tests.skip_data
class TestPlot(tests.GraphicsTest, Slice1dMixin):
"""Test the iris.plot.plot routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = iplt.plot
@tests.skip_data
class TestQuickplotPlot(tests.GraphicsTest, Slice1dMixin):
"""Test the iris.quickplot.plot routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = qplt.plot
_load_cube_once_cache = {}
def load_cube_once(filename, constraint):
"""Same syntax as load_cube, but will only load a file once,
then cache the answer in a dictionary.
"""
global _load_cube_once_cache
key = (filename, str(constraint))
cube = _load_cube_once_cache.get(key, None)
if cube is None:
cube = iris.load_cube(filename, constraint)
_load_cube_once_cache[key] = cube
return cube
class LambdaStr(object):
"""Provides a callable function which has a sensible __repr__."""
def __init__(self, repr, lambda_fn):
self.repr = repr
self.lambda_fn = lambda_fn
def __call__(self, *args, **kwargs):
return self.lambda_fn(*args, **kwargs)
def __repr__(self):
return self.repr
@tests.skip_data
@tests.skip_plot
class TestPlotCoordinatesGiven(tests.GraphicsTest):
def setUp(self):
filename = tests.get_data_path(('PP', 'COLPEX',
'theta_and_orog_subset.pp'))
self.cube = load_cube_once(filename, 'air_potential_temperature')
self.draw_module = iris.plot
self.contourf = LambdaStr('iris.plot.contourf',
lambda cube, *args, **kwargs:
iris.plot.contourf(cube, *args, **kwargs))
self.contour = LambdaStr('iris.plot.contour',
lambda cube, *args, **kwargs:
iris.plot.contour(cube, *args, **kwargs))
self.points = LambdaStr('iris.plot.points',
lambda cube, *args, **kwargs:
iris.plot.points(cube, c=cube.data,
*args, **kwargs))
self.plot = LambdaStr('iris.plot.plot',
lambda cube, *args, **kwargs:
iris.plot.plot(cube, *args, **kwargs))
self.results = {'yx': ([self.contourf, ['grid_latitude',
'grid_longitude']],
[self.contourf, ['grid_longitude',
'grid_latitude']],
[self.contour, ['grid_latitude',
'grid_longitude']],
[self.contour, ['grid_longitude',
'grid_latitude']],
[self.points, ['grid_latitude',
'grid_longitude']],
[self.points, ['grid_longitude',
'grid_latitude']],),
'zx': ([self.contourf, ['model_level_number',
'grid_longitude']],
[self.contourf, ['grid_longitude',
'model_level_number']],
[self.contour, ['model_level_number',
'grid_longitude']],
[self.contour, ['grid_longitude',
'model_level_number']],
[self.points, ['model_level_number',
'grid_longitude']],
[self.points, ['grid_longitude',
'model_level_number']],),
'tx': ([self.contourf, ['time', 'grid_longitude']],
[self.contourf, ['grid_longitude', 'time']],
[self.contour, ['time', 'grid_longitude']],
[self.contour, ['grid_longitude', 'time']],
[self.points, ['time', 'grid_longitude']],
[self.points, ['grid_longitude', 'time']],),
'x': ([self.plot, ['grid_longitude']],),
'y': ([self.plot, ['grid_latitude']],)
}
def draw(self, draw_method, *args, **kwargs):
draw_fn = getattr(self.draw_module, draw_method)
draw_fn(*args, **kwargs)
self.check_graphic()
def run_tests(self, cube, results):
for draw_method, coords in results:
draw_method(cube, coords=coords)
try:
self.check_graphic()
except AssertionError as err:
self.fail('Draw method %r failed with coords: %r. '
'Assertion message: %s' % (draw_method, coords, err))
def run_tests_1d(self, cube, results):
# there is a different calling convention for 1d plots
for draw_method, coords in results:
draw_method(cube.coord(coords[0]), cube)
try:
self.check_graphic()
except AssertionError as err:
msg = 'Draw method {!r} failed with coords: {!r}. ' \
'Assertion message: {!s}'
self.fail(msg.format(draw_method, coords, err))
def test_yx(self):
test_cube = self.cube[0, 0, :, :]
self.run_tests(test_cube, self.results['yx'])
def test_zx(self):
test_cube = self.cube[0, :15, 0, :]
self.run_tests(test_cube, self.results['zx'])
def test_tx(self):
test_cube = self.cube[:, 0, 0, :]
self.run_tests(test_cube, self.results['tx'])
def test_x(self):
test_cube = self.cube[0, 0, 0, :]
self.run_tests_1d(test_cube, self.results['x'])
def test_y(self):
test_cube = self.cube[0, 0, :, 0]
self.run_tests_1d(test_cube, self.results['y'])
def test_badcoords(self):
cube = self.cube[0, 0, :, :]
draw_fn = getattr(self.draw_module, 'contourf')
self.assertRaises(ValueError, draw_fn, cube,
coords=['grid_longitude', 'grid_longitude'])
self.assertRaises(ValueError, draw_fn, cube,
coords=['grid_longitude', 'grid_longitude',
'grid_latitude'])
self.assertRaises(iris.exceptions.CoordinateNotFoundError, draw_fn,
cube, coords=['grid_longitude', 'wibble'])
self.assertRaises(ValueError, draw_fn, cube, coords=[])
self.assertRaises(ValueError, draw_fn, cube,
coords=[cube.coord('grid_longitude'),
cube.coord('grid_longitude')])
self.assertRaises(ValueError, draw_fn, cube,
coords=[cube.coord('grid_longitude'),
cube.coord('grid_longitude'),
cube.coord('grid_longitude')])
def test_non_cube_coordinate(self):
cube = self.cube[0, :, :, 0]
pts = -100 + np.arange(cube.shape[1]) * 13
x = coords.DimCoord(pts, standard_name='model_level_number',
attributes={'positive': 'up'})
self.draw('contourf', cube, coords=['grid_latitude', x])
@tests.skip_data
@tests.skip_plot
class TestPlotDimAndAuxCoordsKwarg(tests.GraphicsTest):
def setUp(self):
filename = tests.get_data_path(('NetCDF', 'rotated', 'xy',
'rotPole_landAreaFraction.nc'))
self.cube = iris.load_cube(filename)
def test_default(self):
iplt.contourf(self.cube)
plt.gca().coastlines()
self.check_graphic()
def test_coords(self):
# Pass in dimension coords.
rlat = self.cube.coord('grid_latitude')
rlon = self.cube.coord('grid_longitude')
iplt.contourf(self.cube, coords=[rlon, rlat])
plt.gca().coastlines()
self.check_graphic()
# Pass in auxiliary coords.
lat = self.cube.coord('latitude')
lon = self.cube.coord('longitude')
iplt.contourf(self.cube, coords=[lon, lat])
plt.gca().coastlines()
self.check_graphic()
def test_coord_names(self):
# Pass in names of dimension coords.
iplt.contourf(self.cube, coords=['grid_longitude', 'grid_latitude'])
plt.gca().coastlines()
self.check_graphic()
# Pass in names of auxiliary coords.
iplt.contourf(self.cube, coords=['longitude', 'latitude'])
plt.gca().coastlines()
self.check_graphic()
def test_yx_order(self):
# Do not attempt to draw coastlines as it is not a map.
iplt.contourf(self.cube, coords=['grid_latitude', 'grid_longitude'])
self.check_graphic()
iplt.contourf(self.cube, coords=['latitude', 'longitude'])
self.check_graphic()
@tests.skip_plot
class TestSymbols(tests.GraphicsTest):
def test_cloud_cover(self):
iplt.symbols(list(range(10)),
[0] * 10,
[iris.symbols.CLOUD_COVER[i] for i in range(10)],
0.375)
iplt.plt.axis('off')
self.check_graphic()
@tests.skip_plot
class TestPlottingExceptions(tests.IrisTest):
def setUp(self):
self.bounded_cube = tests.stock.lat_lon_cube()
self.bounded_cube.coord("latitude").guess_bounds()
self.bounded_cube.coord("longitude").guess_bounds()
def test_boundmode_multidim(self):
# Test exception translation.
# We can't get contiguous bounded grids from multi-d coords.
cube = self.bounded_cube
cube.remove_coord("latitude")
cube.add_aux_coord(coords.AuxCoord(points=cube.data,
standard_name='latitude',
units='degrees'), [0, 1])
with self.assertRaises(ValueError):
iplt.pcolormesh(cube, coords=['longitude', 'latitude'])
def test_boundmode_4bounds(self):
# Test exception translation.
# We can only get contiguous bounded grids with 2 bounds per point.
cube = self.bounded_cube
lat = coords.AuxCoord.from_coord(cube.coord("latitude"))
lat.bounds = np.array([lat.points, lat.points + 1,
lat.points + 2, lat.points + 3]).transpose()
cube.remove_coord("latitude")
cube.add_aux_coord(lat, 0)
with self.assertRaises(ValueError):
iplt.pcolormesh(cube, coords=['longitude', 'latitude'])
def test_different_coord_systems(self):
cube = self.bounded_cube
lat = cube.coord('latitude')
lon = cube.coord('longitude')
lat.coord_system = iris.coord_systems.GeogCS(7000000)
lon.coord_system = iris.coord_systems.GeogCS(7000001)
with self.assertRaises(ValueError):
iplt.pcolormesh(cube, coords=['longitude', 'latitude'])
@tests.skip_data
@tests.skip_plot
class TestPlotOtherCoordSystems(tests.GraphicsTest):
def test_plot_tmerc(self):
filename = tests.get_data_path(('NetCDF', 'transverse_mercator',
'tmean_1910_1910.nc'))
self.cube = iris.load_cube(filename)
iplt.pcolormesh(self.cube[0])
plt.gca().coastlines()
self.check_graphic()
if __name__ == "__main__":
tests.main()
|
mo-g/iris
|
lib/iris/tests/test_plot.py
|
Python
|
gpl-3.0
| 32,082
|
[
"NetCDF"
] |
7b7b3e62fc84eaa2e6b4ca603e651a0ecf7595a85d2eb04acb508fc04fb8a2bd
|
# Hidden Markov Model Implementation
import math
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
import matplotlib.cm as cm
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
from hrl_haptic_manipulation_in_clutter_msgs.msg import SkinContact
from hrl_haptic_manipulation_in_clutter_msgs.msg import TaxelArray
from m3skin_ros.msg import TaxelArray as TaxelArray_Meka
from hrl_msgs.msg import FloatArrayBare
from geometry_msgs.msg import Point
from geometry_msgs.msg import Vector3
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Exploration')
from data_fixed_length_force_sample import Fmat_original
# Returns mu,sigma for 10 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models
def feature_to_mu_sigma(fvec):
index = 0
m,n = np.shape(fvec)
#print m,n
mu = np.matrix(np.zeros((10,1)))
sigma = np.matrix(np.zeros((10,1)))
DIVS = m/10
while (index < 10):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),0:]
#if index == 1:
#print temp_fvec
mu[index] = scp.mean(temp_fvec)
sigma[index] = scp.std(temp_fvec)
index = index+1
return mu,sigma
def callback(data):
rospy.loginfo('Getting data and Saving pics!')
force_vectors = np.row_stack([data.values_x, data.values_y, data.values_z])
fmags_instant = ut.norm(force_vectors)
threshold = 0.01
fmags_tuned = fmags_instant - threshold
fmags_tuned[np.where(fmags_tuned<0)]=0
fmags_instant_tuned = fmags_tuned
global fmags
fmags = np.row_stack([fmags,fmags_instant_tuned])
# Calculating no. of contact regions with hand-tuned force threshold
contact_regions = fmags_instant > threshold
lb,ls = ni.label(contact_regions)
total_contact = ni.sum(lb)
# Calculating force data for contact with hand-tuned force threshold
#total_forces = ni.sum(fmags_instant,lb)
#mean_forces = ni.mean(fmags_instant,lb)
temp = fmags_instant*lb
max_force = np.max(temp)
global time_varying_data
time_varying_data.append(max_force)
test_data()
def test_data():
global FLAG_Trunk
global FLAG_Trunk_List
global FLAG_Leaf
global FLAG_Leaf_List
# For Testing
global time_varying_data
global max_force
if (max_force > 0):
ts_obj = time_varying_data
final_ts_obj = ghmm.EmissionSequence(F,ts_obj)
# Find Viterbi Path
global model_ff
global model_tf
path_ff_obj = model_ff.viterbi(final_ts_obj)
path_tf_obj = model_tf.viterbi(final_ts_obj)
#print path_ff_obj[1], path_tf_obj[1]
diff_ff = abs(path_ff_obj[1]-path_tf_obj[1])
diff_tf = abs(path_tf_obj[1]-path_ff_obj[1])
obj = max(path_ff_obj[1],path_tf_obj[1])
obj_min = min(abs(path_ff_obj[1]),abs(path_tf_obj[1]))
#if ((obj == path_ff_obj[1]) and (diff_ff > 500)):
if ((obj == path_ff_obj[1]) and (obj_min > 1800)):
print 'Foliage :'
FLAG_Trunk = False
FLAG_Leaf = True
elif ((obj == path_tf_obj[1]) and (obj_min > 1800)):
print 'Trunk :'
FLAG_Trunk = True
FLAG_Leaf = False
else:
print 'Unknown'
FLAG_Trunk = False
FLAG_Leaf = False
FLAG_Trunk_List.append(FLAG_Trunk)
FLAG_Leaf_List.append(FLAG_Leaf)
else:
print 'Unknown'
FLAG_Trunk = False
FLAG_Leaf = False
time_varying_data = [0]
def getdata():
rospy.init_node('getdata', anonymous=True)
rospy.Subscriber("/skin_patch_forearm_right/taxels/forces", TaxelArray_Meka, callback)
rospy.spin()
def getpics():
global FLAG_Trunk_List
global FLAG_Leaf_List
j = 0
#index = 0 # For Fixed Pillow
index = 16 #For Fixed Styrofoam, Fixed Foliage, Movable Styrofoam, and Movable Pillow
for i in fmags:
force_arr_raw = fmags[j,:]
force_arr_raw_reshaped = force_arr_raw.reshape((16,24))
force_arr = np.column_stack((force_arr_raw_reshaped[:,index:],force_arr_raw_reshaped[:,:index]))
global frame
frame = frame + 1
if ((not FLAG_Trunk_List[j]) and (not FLAG_Leaf_List[j])):
#Keep it Black
pp.imshow(force_arr, interpolation='nearest', cmap=cm.binary,
origin='upper', vmin=0, vmax=1)
elif ((FLAG_Trunk_List[j]) and (not FLAG_Leaf_List[j])):
#Make it Red
pp.imshow(force_arr, interpolation='nearest', cmap=cm.Reds,
origin='upper', vmin=0, vmax=1)
elif ((not FLAG_Trunk_List[j]) and (FLAG_Leaf_List[j])):
#Make it Green
pp.imshow(force_arr, interpolation='nearest', cmap=cm.Greens,
origin='upper', vmin=0, vmax=1)
pp.title('Unrolled Taxel Array')
pp.xlabel('Along the circumference')
pp.ylabel('Along the forearm')
pp.xlim(-0.5,23.5)
pp.ylim(15.5, -0.5)
pyl.savefig('%05d.%s'% (frame, format))
j = j+1
if __name__ == '__main__':
Fmat = Fmat_original
Foliage_Trials = 5
Trunk_Trials = 5
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
#print " "
#print 'Total_Matrix_Shape:',m_tot,n_tot
mu_ff,sigma_ff = feature_to_mu_sigma(Fmat[0:121,0:Foliage_Trials])
mu_tf,sigma_tf = feature_to_mu_sigma(Fmat[0:121,Foliage_Trials:(Foliage_Trials + Trunk_Trials)])
#print [mu_ff, sigma_ff]
# HMM - Implementation:
# 10 Hidden States
# Max. Force(For now), Contact Area(Not now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state
# Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_ff = np.zeros((10,2))
B_tf = np.zeros((10,2))
for num_states in range(10):
B_ff[num_states,0] = mu_ff[num_states]
B_ff[num_states,1] = sigma_ff[num_states]
B_tf[num_states,0] = mu_tf[num_states]
B_tf[num_states,1] = sigma_tf[num_states]
B_ff = B_ff.tolist()
B_tf = B_tf.tolist()
# pi - initial probabilities per state
pi = [0.1] * 10
# generate RF, RM, SF, SM models from parameters
model_ff = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_ff, pi) # Will be Trained
model_tf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_tf, pi) # Will be Trained
total_seq = Fmat[0:121,:]
# For Training
total_seq_ff = total_seq[0:Foliage_Trials]
total_seq_tf = total_seq[Foliage_Trials:(Foliage_Trials + Trunk_Trials)]
train_seq_ff = (np.array(total_seq_ff).T).tolist()
train_seq_tf = (np.array(total_seq_tf).T).tolist()
final_ts_ff = ghmm.SequenceSet(F,train_seq_ff)
final_ts_tf = ghmm.SequenceSet(F,train_seq_tf)
model_ff.baumWelch(final_ts_ff)
model_tf.baumWelch(final_ts_tf)
# Gather Data from Robot Online
index = 0
frame = 0
max_force = 0
format = 'png'
fmags = np.zeros(384)
time_varying_data = [0]
FLAG_Trunk = False
FLAG_Leaf = False
FLAG_Trunk_List = [False]
FLAG_Leaf_List = [False]
getdata()
getpics()
|
tapomayukh/projects_in_python
|
rapid_categorization/fixed_length_training_data/hmm_force_10_states.py
|
Python
|
mit
| 8,710
|
[
"Gaussian",
"Mayavi"
] |
23649bdc5e49bcff44faf9d94b11b3574aeb1632c0d2188ccc8649dbead6e388
|
# kchannel.py ---
#
# Filename: kchannel.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Wed Jun 4 12:30:31 2014 (+0530)
# Version:
# Last-Updated:
# By:
# Update #: 0
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import numpy as np
import moose
from settings import *
def rates_kv(v, Ra, Rb, tha, qa):
a = Ra * (v - tha) / (1 - np.exp(-(v - tha)/qa))
b = -Rb * (v - tha) / (1 - np.exp((v - tha)/qa))
ntau = 1e-3/tadj/(a+b)
ninf = a/(a+b)
return ntau, ninf
def make_kv():
"""Translated to pymoose from original model by Zach Mainen. - Subhasis Ray 2014-06-04
Original comment:
COMMENT
26 Ago 2002 Modification of original channel to allow variable time step and to correct an initialization error.
Done by Michael Hines(michael.hines@yale.e) and Ruggero Scorcioni(rscorcio@gmu.edu) at EU Advance Course in Computational Neuroscience. Obidos, Portugal
kv.mod
Potassium channel, Hodgkin-Huxley style kinetics
Kinetic rates based roughly on Sah et al. and Hamill et al. (1991)
Author: Zach Mainen, Salk Institute, 1995, zach@salk.edu
ENDCOMMENT
"""
gbar = 5 * tadj # (pS/um2) : 0.03 mho/cm2
tha = 25 # (mV) : v 1/2 for inf
qa = 9 # (mV) : inf slope
Ra = 0.02 # (/ms) : max act rate
Rb = 0.002 # (/ms) : max deact rate
vmin = -120 # (mV)
vmax = 100 # (mV)
v = np.linspace(vmin, vmax, 3000)
ntau, ninf = rates_kv(v, Ra, Rb, tha, qa)
channel = moose.HHChannel('/library/Kv')
channel.Xpower = 1
channel.gateX[0].tableA = 1e3 * ninf / ntau
channel.gateX[0].tableB = 1e3 / ntau
channel.gateX[0].min = vmin * 1e-3
channel.gateX[0].max = vmax * 1e-3
channel.Gbar = 0.0
channel.Ek = -90e-3
return channel
def make_km():
"""Ported to pymoose. - Subhasis Ray, Thu Jun 5 15:07:58 IST 2014
Original comment:
COMMENT
26 Ago 2002 Modification of original channel to allow variable time step and to correct an initialization error.
Done by Michael Hines(michael.hines@yale.e) and Ruggero Scorcioni(rscorcio@gmu.edu) at EU Advance Course in Computational Neuroscience. Obidos, Portugal
km.mod
Potassium channel, Hodgkin-Huxley style kinetics
Based on I-M (muscarinic K channel)
Slow, noninactivating
Author: Zach Mainen, Salk Institute, 1995, zach@salk.edu
ENDCOMMENT
"""
gbar = 10 # (pS/um2) = S/m2 : 0.03 mho/cm2
tha = -30 # (mV) : v 1/2 for inf
qa = 9 # (mV) : inf slope
Ra = 0.001 # (/ms) : max act rate (slow)
Rb = 0.001 # (/ms) : max deact rate (slow)
vmin = -120 # (mV)
vmax = 100 # (mV)
v = np.linspace(vmin, vmax, 3000)
a = Ra * (v - tha) / (1 - np.exp(-(v - tha)/qa))
b = -Rb * (v - tha) / (1 - np.exp((v - tha)/qa))
ntau = 1 / tadj /(a + b)
ninf = a/(a+b)
channel = moose.HHChannel('/library/Km')
channel.Gbar = 0.0 # tadj * gbar # not really used
channel.Xpower = 1
channel.gateX[0].min = vmin * 1e-3
channel.gateX[0].max = vmax * 1e-3
channel.gateX[0].tableA = 1e3 * ninf / ntau
channel.gateX[0].tableB = 1e3 / ntau
channel.Ek = -90e-3
return channel
def make_kca():
"""Ported from NEURON code. - Subhasis Ray, Thu Jun 5 16:18:14 IST 2014
original file comment:
COMMENT
26 Ago 2002 Modification of original channel to allow variable time step and to correct an initialization error.
Done by Michael Hines(michael.hines@yale.e) and Ruggero Scorcioni(rscorcio@gmu.edu) at EU Advance Course in Computational Neuroscience. Obidos, Portugal
kca.mod
Calcium-dependent potassium channel
Based on
Pennefather (1990) -- sympathetic ganglion cells
taken from
Reuveni et al (1993) -- neocortical cells
Author: Zach Mainen, Salk Institute, 1995, zach@salk.edu
ENDCOMMENT
"""
gbar = 10 # (pS/um2) : 0.03 mho/cm2
caix = 1 #
Ra = 0.01 # (/ms) : max act rate
Rb = 0.02 # (/ms) : max deact rate
vmin = -120 # (mV)
vmax = 100 # (mV)
# In MOOSE we set up lookup tables beforehand
camin = 0.0
camax = 500.0
cai = np.linspace(camin, camax)
a = Ra * cai**caix
b = Rb
ntau = 1 / tadj / ( a + b)
ninf = a / (a + b)
channel = moose.HHChannel('/library/KCa')
channel.Zpower = 1
channel.useConcentration = 1
channel.gateZ[0].min = camin
channel.gateZ[0].max = camax
channel.gateZ[0].tableA = 1e3 * ninf / ntau
channel.gateZ[0].tableB = 1e3 / ntau
channel.Gbar = 0.0
channel.Ek = -90e-3
# addmsg1 = moose.Mstring('%s/addmsg1' % (channel.path))
# addmsg1.value = '../Ca_conc concOut . concen'
capool = moose.BufPool('%s/CaPool' % (channel.path)) # send a constant [Ca2+] to this channel
capool.n = 5e-5 # n is actually number of molecules (in
# floating point though, but I am using it for
# conc, because only nOut src msg is available
moose.connect(capool, 'nOut', channel, 'concen')
#
# kchannel.py ends here
|
subhacom/moose-core
|
tests/python/mus/kchannel.py
|
Python
|
gpl-3.0
| 5,916
|
[
"MOOSE",
"NEURON"
] |
08e03cf85a446a700052bb36c513fdaefc5be2c2a7de41cfcf01f4c11c08d01a
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from ..engine.topology import ZooKerasLayer
if sys.version >= '3':
long = int
unicode = str
class GaussianNoise(ZooKerasLayer):
"""
Apply additive zero-centered Gaussian noise.
This is useful to mitigate overfitting (you could see it as a form of random data augmentation).
Gaussian Noise is a natural choice as corruption process for real valued inputs.
As it is a regularization layer, it is only active at training time.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
sigma: Float, standard deviation of the noise distribution.
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> gaussiannoise = GaussianNoise(0.45, input_shape=(3, 4, 5), name="gaussiannoise1")
creating: createZooKerasGaussianNoise
"""
def __init__(self, sigma, input_shape=None, **kwargs):
super(GaussianNoise, self).__init__(None,
float(sigma),
list(input_shape) if input_shape else None,
**kwargs)
class GaussianDropout(ZooKerasLayer):
"""
Apply multiplicative 1-centered Gaussian noise.
As it is a regularization layer, it is only active at training time.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
p: Drop probability. Float between 0 and 1.
The multiplicative noise will have standard deviation 'sqrt(p/(1-p))'.
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> gaussiandropout = GaussianDropout(0.45, input_shape=(4, 8))
creating: createZooKerasGaussianDropout
"""
def __init__(self, p, input_shape=None, **kwargs):
super(GaussianDropout, self).__init__(None,
float(p),
list(input_shape) if input_shape else None,
**kwargs)
|
intel-analytics/analytics-zoo
|
pyzoo/zoo/pipeline/api/keras/layers/noise.py
|
Python
|
apache-2.0
| 3,021
|
[
"Gaussian"
] |
d13779a97d36836fd34635d22ebe9fdb6713ab41024cca8dc6fbbcc8f8a173fe
|
import ctypes
from ..codeprinter import CodePrinter
from .. import expression as e
from .. import functions as f
import expresso.visitor as visitor
from mpmath import mp
class c_complex(ctypes.Structure):
_fields_ = [('real',ctypes.c_double),('imag',ctypes.c_double)]
def __str__(self):
return str(self.real) + '+' + str(self.imag) + 'j'
def __repr__(self):
return '(' + str(self.real) + ',' + str(self.imag) + ')'
def __complex__(self):
return complex(self.real,self.imag)
def is_complex(self):
return True
@staticmethod
def np_type():
import numpy
return numpy.complex128
class CCodePrinter(CodePrinter):
def __init__(self):
super(CCodePrinter,self).__init__()
self.includes = {'cmath','complex','thread','future','vector'}
self.namespaces = {'std'}
self.typenames = {
f.Types.Boolean:'bool',
f.Types.Natural:'unsigned',
f.Types.Integer:'int',
f.Types.Rational:'double',
f.Types.Real:'double',
f.Types.Complex:'c_complex',
None:'c_complex'
}
self.ctype_map = {
'bool':ctypes.c_bool,
'unsigned':ctypes.c_uint,
'int':ctypes.c_int,
'double':ctypes.c_double,
'c_complex':c_complex
}
self.type_converters = {}
self.need_conversion = {}
self.preamble = set()
self.globals = set()
c_complex_type = '''
struct c_complex{ double real; double imag; };
inline std::complex<double> to_cpp_complex(c_complex z){ return std::complex<double>(z.real,z.imag); }
inline c_complex to_c_complex(std::complex<double> z){ return c_complex{z.real(),z.imag()}; }
'''
self.preamble.add(c_complex_type)
self.type_converters['c_complex'] = (lambda x:'to_cpp_complex(%s)' % x,lambda x:'to_c_complex(%s)' % x)
complex_operators = '''
inline complex<double> operator{0}(complex<double> lhs, const double & rhs){{
return lhs {0} complex<double>(rhs);
}}
inline complex<double> operator{0}(const double & lhs,complex<double> rhs){{
return complex<double>(lhs) {0} rhs;
}}
'''
self.preamble.update(set([complex_operators.format(op) for op in ['+', '-', '*', '/']]))
parallel_for = '''
inline unsigned hardware_thread_count(){ return std::thread::hardware_concurrency(); }
template<typename C1,typename C2,typename F> void parallel_for(C1 start,C2 end,F f,uintptr_t thread_count = hardware_thread_count()){
if(end-start < thread_count) thread_count = end-start;
std::vector<std::future<void>> handles(thread_count);
C2 block_size = (end - start)/thread_count;
for(uintptr_t i=0;i<thread_count-1;++i){
handles[i] = std::async(std::launch::async,[=](){
C2 begin = start+block_size*i, end = start+block_size*(i+1);
for(C2 j=begin;j<end;++j){ f(j); }
});
}
handles[thread_count-1] = std::async([&](){
C2 begin = start+block_size*(thread_count-1);
for(C2 j=begin;j<end;++j)f(j);
});
for(auto & handle:handles) handle.wait();
}
'''
self.preamble.add(parallel_for)
ndarray = '''
template<size_t _size,size_t _stride, size_t... sizes> struct ndarray_index_calculator {
using rest = ndarray_index_calculator<sizes...>;
static size_t size(){ return _size; }
template <typename ... Args> static bool is_valid(size_t idx,Args ... args){ if(!rest::is_valid(args...)) return false; return idx < size(); }
static size_t stride(){ return _stride; }
template <typename ... Args> static size_t get_index(size_t idx,Args ... args){ return stride() * idx + rest::get_index(args...); }
};
template<size_t _size,size_t _stride> struct ndarray_index_calculator <_size,_stride> {
static size_t size(){ return _size; }
static bool is_valid(size_t idx){ return idx < size(); }
static size_t stride(){ return _stride; }
static size_t get_index(size_t idx){ return idx; }
};
template <class T,size_t ... size_stride> struct mapped_ndarray{
T * data;
T default_value;
using index_calculator = ndarray_index_calculator<size_stride...>;
mapped_ndarray(T * d,const T &_default_value = 0):data(d),default_value(_default_value){ }
template <typename ... Args> T & operator()(Args ... indices){
if(!index_calculator::is_valid(indices...)){ return default_value; }
return data[index_calculator::get_index(indices...)];
}
};
'''
self.preamble.add(ndarray)
self.preamble.add('''template <typename T> int sign(T val) { return (T(0) <= val) - (val < T(0)); }''')
def needs_brackets_in(self,expr,parent):
if expr.is_atomic:
return False
return expr.function.is_operator
@visitor.on('expr',parent = CodePrinter)
def visit(self,expr):
raise ValueError('cannot print expression %s' % expr)
@visitor.function(f.CustomFunction)
def visit(self,expr):
f = expr.args[0].value
if hasattr(f,'ccode'):
self.preamble.add(f.ccode)
else:
raise ValueError('cannot compile custom function %s' % expr)
return "%s(%s)" % (f.name,','.join([self(arg) for arg in expr.args[1:]]))
@visitor.function(f.exponentiation)
def visit(self,expr):
return 'pow(%s,%s)' % (self(expr.args[0]),self(expr.args[1]))
@visitor.atomic(f.I)
def visit(self,expr):
return "std::complex<double>(0,1)"
@visitor.atomic(f.pi)
def visit(self,expr):
return "M_PI"
@visitor.atomic(f.e)
def visit(self,expr):
return "M_E"
@visitor.function(f.Xor)
def visit(self,expr):
return self.print_binary_operator(expr,symbol='^')
@visitor.function(f.Not)
def visit(self,expr):
return "!(%s)" % self(expr.args[0])
@visitor.function(f.equal)
def visit(self,expr):
return self.print_binary_operator(expr,'==')
@visitor.function(f.fraction)
def visit(self,expr):
return "1./(%s)" % self(expr.args[0])
@visitor.function(f.mod)
def visit(self,expr):
return "fmod(%s,%s)" % (self(expr.args[0]),self(expr.args[1]))
@visitor.function(f.InnerPiecewise)
def visit(self,expr):
parts = ['(%s)?(%s):' % (self(arg.args[1]),self(arg.args[0])) for arg in expr.args]
return '(%s%s)' % (''.join(parts),self(e.S(0)))
@visitor.symbol
def visit(self,expr):
converter = self.need_conversion.get(expr)
if converter:
if isinstance(converter,tuple):
return converter[0](expr)
else:
return converter(expr)
return expr.name
@visitor.atomic(e.S(True))
def visit(self,expr):
return 'true'
@visitor.atomic(e.S(False))
def visit(self,expr):
return 'false'
def print_includes(self):
return '\n'.join(['#include <%s>' % name for name in self.includes])
def print_namespaces(self):
return '\n'.join(['using namespace %s;' % namespace for namespace in self.namespaces])
def print_auxiliary_code(self):
return '%s\n%s' % ('\n'.join(self.preamble),'\n'.join(self.globals))
def print_file(self,*function_definitions):
function_code = [self.generate_function(f) for f in function_definitions]
function_code += [self.generate_vector_function(f,use_previous_definition=True) for f in function_definitions]
return "\n\n".join([self.print_includes(),
self.print_namespaces(),
self.print_auxiliary_code()] + function_code )
def print_typename(self,expr):
return self.typenames.get(expr,self.typenames[None])
def print_vector_typename(self,expr):
return "%s*" % self.typenames.get(expr,self.typenames[None])
def get_ctype(self,typename):
if typename[-1] == '*':
return ctypes.POINTER(self.get_ctype(typename[:-1]))
return self.ctype_map[typename]
@visitor.function(f.unfoldable)
def visit(self,expr):
return self.visit(expr.args[0])
@visitor.function(f.ArrayAccess)
def visit(self,expr):
arr = expr.args[0].value
pointer = arr.ctypes.data
type = f.type_converters.numpy_c_typenames[arr.dtype.name]
size = ','.join(["%s,%s" % (size,stride/arr.itemsize) for size,stride in zip(arr.shape,arr.strides)])
name = expr.args[0].name
self.globals.add('mapped_ndarray<%s,%s> %s((%s*)%s);' % (type,size,name,type,pointer))
return "%s(%s)" % (name,','.join([self(arg) for arg in reversed(expr.args[1:])]))
@visitor.obj(mp.mpf)
def visit(self,expr):
return repr(float(expr.value))
@visitor.obj(mp.mpc)
def visit(self,expr):
v = expr.value
return "complex<double>(%s,%s)" % (repr(float(v.real)),repr(float(v.imag)))
def optimize_function(self,expr):
from expresso.pycas.evaluators.optimizers import optimize_for_compilation
return optimize_for_compilation(expr)
def get_body_code(self,definition):
if definition.return_type == None:
return_type = self.print_typename(f.Type(definition.expr).evaluate())
else:
return_type = self.print_typename(definition.return_type)
f_code = self(self.optimize_function(definition.expr))
if return_type in self.type_converters and isinstance(self.type_converters[return_type],tuple):
f_code = self.type_converters[return_type][1](f_code)
return f_code
def generate_function(self,definition):
if definition.return_type == None:
return_type = self.print_typename(f.Type(definition.expr).evaluate())
else:
return_type = self.print_typename(definition.return_type)
args = definition.args
if definition.arg_types == None:
argument_types = [self.print_typename(f.Type(arg).evaluate()) for arg in args]
else:
argument_types = [self.print_typename(f.Type(arg).evaluate()) for arg in definition.arg_types]
self.need_conversion = {arg:self.type_converters[t]
for arg,t in zip(args,argument_types)
if t in self.type_converters}
f_code = self.get_body_code(definition)
formatted = (return_type, definition.name,
','.join(['%s %s' % (type,arg.name) for arg,type in zip(args,argument_types)]),
f_code)
definition.c_return_type = self.get_ctype(return_type)
definition.c_arg_types = [self.get_ctype(arg_type) for arg_type in argument_types]
return 'extern "C"{\n%s %s(%s){\n\treturn %s;\n}\n}' % formatted
def vectorized_name(self,name):
return "__%s_vector" % name
def generate_vector_function(self,definition,use_previous_definition = False):
if definition.return_type == None:
return_type = self.print_vector_typename(f.Type(definition.expr).evaluate())
else:
return_type = self.print_vector_typename(definition.return_type)
args = definition.args
if definition.arg_types == None:
argument_types = [self.print_vector_typename(f.Type(arg).evaluate()) for arg in args]
else:
argument_types = [self.print_vector_typename(f.Type(arg).evaluate()) for arg in definition.arg_types]
self.need_conversion.update({arg:lambda a:'%s[__i]' % a for arg in args})
argument_types = ['unsigned',return_type] + argument_types
if not use_previous_definition :
f_code = self.get_body_code(definition)
else:
f_code = '%s(%s)' % (definition.name,','.join(self(arg) for arg in definition.args))
if definition.parallel:
f_code = 'parallel_for(0,__size,[&](unsigned __i){ __res[__i] = %s; }); ' % f_code
else:
f_code = 'for(unsigned __i = 0; __i<__size;++__i) __res[__i] = %s;' % f_code
rargument_types = [argument_types[0]] + ['%s __restrict__ ' % t for t in argument_types[1:]]
formatted_args = ','.join(['%s %s' % vardef for vardef in
zip(rargument_types,['__size','__res'] + list(args))])
formatted = (self.vectorized_name(definition.name), formatted_args, f_code)
definition.c_vectorized_arg_types = [self.get_ctype(arg_type) for arg_type in argument_types]
return 'extern "C"{\nvoid %s(%s){\n\t%s\n}\n}' % formatted
class CompilerError(Exception):
def __init__(self, message):
if isinstance(message, unicode):
super(CompilerError, self).__init__(message.encode('utf-8'))
self.message = message
elif isinstance(message, str):
super(CompilerError, self).__init__(message)
self.message = message.decode('utf-8')
else:
raise TypeError
def __unicode__(self):
return self.message
def ccompile(*function_definitions,**kwargs):
import tempfile
import shutil
import ctypes
import numpy as np
from subprocess import Popen, PIPE
from os import environ
ccode_printer = CCodePrinter()
code = ccode_printer.print_file(*function_definitions)
output_directory = tempfile.mkdtemp()
object_file = output_directory+'/'+'pycas_compiled_expression.o'
flags = kwargs.pop('flags',[])
p = Popen([environ.get('CXX','g++'),'-o',object_file] + flags + ['-c','-xc++','-std=c++11','-ffast-math','-O3','-fPIC','-'],stdin=PIPE, stdout=PIPE, stderr=PIPE)
p.stdin.write(code)
p.stdin.close()
return_code = p.wait()
if(return_code!=0):
raise CompilerError("Cannot compile expression: " + p.stderr.read().decode('utf-8'))
print_output = kwargs.pop('print_output',False)
print_warnings = print_output or kwargs.pop('print_warnings',False)
if print_warnings:
print p.stderr.read()
if print_output:
print p.stdout.read()
shared_library = output_directory+'/'+'pycas_compiled_expression.so'
p = Popen(['g++','-shared','-o',shared_library,object_file],stdin=PIPE, stdout=PIPE, stderr=PIPE)
p.stdin.close()
return_code = p.wait()
if(return_code!=0):
raise RuntimeError("Cannot convert to shared library: " + p.stderr.read())
if print_warnings:
print p.stderr.read()
if print_output:
print p.stdout.read()
lib = ctypes.cdll.LoadLibrary(shared_library)
shutil.rmtree(output_directory)
compiled_functions = {}
class CompiledFunction(object):
def __init__(self,cf,cf_vector):
self.cf = cf
self.cf_vector = cf_vector
def __call__(self,*args,**kwargs):
if(len(args) == 0):
return self.cf()
if any([isinstance(arg,(list,tuple)) for arg in args]):
argtypes = self.cf_vector.argtypes
args = [np.array(arg,dtype=t) for t,arg in zip(argtypes[2:],args)]
if any([isinstance(arg,np.ndarray) for arg in args]):
argtypes = self.cf_vector.argtypes
shape = None
for arg in args:
if isinstance(arg,np.ndarray):
if shape == None:
shape = arg.shape
else:
if arg.shape != shape:
raise AttributeError('c function got arguments with different shapes')
args = [arg if isinstance(arg,np.ndarray) else arg * np.ones(shape) for arg in args]
args = [np.ascontiguousarray(arg,dtype=t._type_) for t,arg in zip(argtypes[2:],args)]
if argtypes[1]._type_ == c_complex:
restype = c_complex.np_type()
else:
restype = argtypes[1]._type_
res = kwargs.get('res')
if res is None:
res = np.zeros(args[0].shape,dtype=restype)
else:
assert res.dtype == restype
assert res.shape == shape
assert res.flags['C_CONTIGUOUS']
call_args = [res.size,res.ctypes.data_as(argtypes[1])]
call_args += [arg.ctypes.data_as(t) for t,arg in zip(argtypes[2:],args)]
self.cf_vector(*call_args)
return res
return self.cf(*args)
def address(self):
return ctypes.cast(self.cf, ctypes.c_void_p).value
class CompiledLibrary(object):
def __init__(self,lib,code):
self.lib = lib
self.code = code
res = CompiledLibrary(lib,code)
for definition in function_definitions:
f = getattr(lib,definition.name)
f.argtypes = definition.c_arg_types
f.restype = definition.c_return_type
f_vector = getattr(lib, ccode_printer.vectorized_name(definition.name))
f_vector.argtypes = definition.c_vectorized_arg_types
f_vector.restype = None
setattr(res,definition.name,CompiledFunction(f,f_vector))
return res
|
TheLartians/Expresso
|
expresso/pycas/compilers/c_compiler.py
|
Python
|
mit
| 17,186
|
[
"VisIt"
] |
b5be8805a1e4ea7924ef1bcd431c3099101ab552c5611e9d9dee7e5c4eacd04d
|
# Author: Kyle A. Beauchamp <kyleabeauchamp@gmail.com>
# Contributors: Robert McGibbon <rmcgibbo@gmail.com>,
# Matthew Harrigan <matthew.p.harrigan@gmail.com>
# Copyright (c) 2014, Stanford University and the Authors
# All rights reserved.
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function, division, absolute_import
from six.moves import cPickle
import numpy as np
import mdtraj as md
from sklearn.base import TransformerMixin
import sklearn.pipeline
from sklearn.externals.joblib import Parallel, delayed
from ..base import BaseEstimator
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def featurize_all(filenames, featurizer, topology, chunk=1000, stride=1):
"""Load and featurize many trajectory files.
Parameters
----------
filenames : list of strings
List of paths to MD trajectory files
featurizer : Featurizer
The featurizer to be invoked on each trajectory trajectory as
it is loaded
topology : str, Topology, Trajectory
Topology or path to a topology file, used to load trajectories with
MDTraj
chunk : {int, None}
If chunk is an int, load the trajectories up in chunks using
md.iterload for better memory efficiency (less trajectory data needs
to be in memory at once)
stride : int, default=1
Only read every stride-th frame.
Returns
-------
data : np.ndarray, shape=(total_length_of_all_trajectories, n_features)
indices : np.ndarray, shape=(total_length_of_all_trajectories)
fns : np.ndarray shape=(total_length_of_all_trajectories)
These three arrays all share the same indexing, such that data[i] is
the featurized version of indices[i]-th frame in the MD trajectory
with filename fns[i].
"""
data = []
indices = []
fns = []
for file in filenames:
kwargs = {} if file.endswith('.h5') else {'top': topology}
count = 0
for t in md.iterload(file, chunk=chunk, stride=stride, **kwargs):
x = featurizer.partial_transform(t)
n_frames = len(x)
data.append(x)
indices.append(count + (stride*np.arange(n_frames)))
fns.extend([file] * n_frames)
count += (stride*n_frames)
if len(data) == 0:
raise ValueError("None!")
return np.concatenate(data), np.concatenate(indices), np.array(fns)
def load(filename):
"""Load a featurizer from a cPickle file."""
with open(filename, 'rb') as f:
featurizer = cPickle.load(f)
return featurizer
class Featurizer(BaseEstimator, TransformerMixin):
"""Base class for objects that featurize Trajectories.
Notes
-----
At the bare minimum, a featurizer must implement the `partial_transform(traj)`
member function. A `transform(traj_list)` for featurizing multiple
trajectories in batch will be provided.
"""
def __init__(self):
pass
def featurize(self, traj):
raise NotImplementedError('This API was removed. Use partial_transform instead')
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space.
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
pass
def fit(self, traj_list, y=None):
return self
def transform(self, traj_list, y=None):
"""Featurize a several trajectories.
Parameters
----------
traj_list : list(mdtraj.Trajectory)
Trajectories to be featurized.
Returns
-------
features : list(np.ndarray), length = len(traj_list)
The featurized trajectories. features[i] is the featurized
version of traj_list[i] and has shape
(n_samples_i, n_features)
"""
return [self.partial_transform(traj) for traj in traj_list]
def save(self, filename):
with open(filename, 'wb') as f:
cPickle.dump(self, f)
class SuperposeFeaturizer(Featurizer):
"""Featurizer based on euclidian atom distances to reference structure.
This featurizer transforms a dataset containing MD trajectories into
a vector dataset by representing each frame in each of the MD trajectories
by a vector containing the distances from a specified set of atoms to
the 'reference position' of those atoms, in ``reference_traj``.
Parameters
----------
atom_indices : np.ndarray, shape=(n_atoms,), dtype=int
The indices of the atoms to superpose and compute the distances with
reference_traj : md.Trajectory
The reference conformation to superpose each frame with respect to
(only the first frame in reference_traj is used)
superpose_atom_indices : np.ndarray, shape=(n_atoms,), dtype=int
If not None, these atom_indices are used for the superposition
"""
def __init__(self, atom_indices, reference_traj, superpose_atom_indices=None):
self.atom_indices = atom_indices
if superpose_atom_indices is None:
self.superpose_atom_indices = atom_indices
else:
self.superpose_atom_indices = superpose_atom_indices
self.reference_traj = reference_traj
self.n_features = len(self.atom_indices)
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space via distance
after superposition
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
traj.superpose(self.reference_traj, atom_indices=self.superpose_atom_indices)
diff2 = (traj.xyz[:, self.atom_indices] -
self.reference_traj.xyz[0, self.atom_indices]) ** 2
x = np.sqrt(np.sum(diff2, axis=2))
return x
class AtomPairsFeaturizer(Featurizer):
"""Featurizer based on distances between specified pairs of atoms.
This featurizer transforms a dataset containing MD trajectories into
a vector dataset by representing each frame in each of the MD trajectories
by a vector of the distances between the specified pairs of atoms.
Parameters
----------
pair_indices : np.ndarray, shape=(n_pairs, 2), dtype=int
Each row gives the indices of two atoms involved in the interaction.
periodic : bool, default=False
If `periodic` is True and the trajectory contains unitcell
information, we will compute distances under the minimum image
convention.
exponent : float
Modify the distances by raising them to this exponent.
"""
def __init__(self, pair_indices, periodic=False, exponent=1.):
# TODO: We might want to implement more error checking here. Or during
# featurize(). E.g. are the pair_indices supplied valid?
self.pair_indices = pair_indices
self.n_features = len(self.pair_indices)
self.periodic = periodic
self.exponent = exponent
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space via pairwise
atom-atom distances
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
d = md.geometry.compute_distances(traj, self.pair_indices, periodic=self.periodic)
return d ** self.exponent
class DihedralFeaturizer(Featurizer):
"""Featurizer based on dihedral angles.
This featurizer transforms a dataset containing MD trajectories into
a vector dataset by representing each frame in each of the MD trajectories
by a vector containing one or more of the backbone or side-chain dihedral
angles, or the sin and cosine of these angles.
Parameters
----------
types : list
One or more of ['phi', 'psi', 'omega', 'chi1', 'chi2', 'chi3', 'chi4']
sincos : bool
Instead of outputting the angle, return the sine and cosine of the
angle as separate features.
"""
def __init__(self, types=['phi', 'psi'], sincos=True):
if isinstance(types, str):
types = [types]
self.types = list(types) # force a copy
self.sincos = sincos
known = {'phi', 'psi', 'omega', 'chi1', 'chi2', 'chi3', 'chi4'}
if not set(types).issubset(known):
raise ValueError('angles must be a subset of %s. you supplied %s' % (
str(known), str(types)))
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space via calculation
of dihedral (torsion) angles
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
x = []
for a in self.types:
func = getattr(md, 'compute_%s' % a)
y = func(traj)[1]
if self.sincos:
x.extend([np.sin(y), np.cos(y)])
else:
x.append(y)
return np.hstack(x)
class KappaAngleFeaturizer(Featurizer):
"""Featurizer to extract kappa angles.
The kappa angle of residue `i` is the angle formed by the three CA atoms
of residues `i-2`, `i` and `i+2`. This featurizer extracts the
`n_residues - 4` kappa angles of each frame in a trajectory.
Parameters
----------
cos : bool
Compute the cosine of the angle instead of the angle itself.
"""
def __init__(self, cos=True):
self.cos = cos
def partial_transform(self, traj):
ca = [a.index for a in traj.top.atoms if a.name == 'CA']
if len(ca) < 5:
return np.zeros((len(traj), 0), dtype=np.float32)
angle_indices = np.array(
[(ca[i - 2], ca[i], ca[i + 2]) for i in range(2, len(ca) - 2)])
result = md.compute_angles(traj, angle_indices)
if self.cos:
return np.cos(result)
assert result.shape == (traj.n_frames, traj.n_residues - 4)
return result
class SASAFeaturizer(Featurizer):
"""Featurizer based on solvent-accessible surface areas.
Parameters
----------
mode : {'atom', 'residue'}, default='residue'
In mode == 'atom', the extracted features are the per-atom
SASA. In mode == 'residue', this is consolidated down to
the per-residue SASA by summing over the atoms in each
residue.
Other Parameters
----------------
probe_radius : float
n_sphere_points : int
If supplied, these arguments will be passed directly to
`mdtraj.shrake_rupley`, overriding default values.
See Also
--------
mdtraj.shrake_rupley
"""
def __init__(self, mode='residue', **kwargs):
self.mode = mode
self.kwargs = kwargs
def partial_transform(self, traj):
return md.shrake_rupley(traj, mode=self.mode, **self.kwargs)
class ContactFeaturizer(Featurizer):
"""Featurizer based on residue-residue distances
This featurizer transforms a dataset containing MD trajectories into
a vector dataset by representing each frame in each of the MD trajectories
by a vector of the distances between pairs of amino-acid residues.
The exact method for computing the the distance between two residues
is configurable with the ``scheme`` parameter.
Parameters
----------
contacts : np.ndarray or 'all'
array containing (0-indexed) indices of the residues to compute the
contacts for. (e.g. np.array([[0, 10], [0, 11]]) would compute
the contact between residue 0 and residue 10 as well as
the contact between residue 0 and residue 11.) [NOTE: if no
array is passed then 'all' contacts are calculated. This means
that the result will contain all contacts between residues
separated by at least 3 residues.]
scheme : {'ca', 'closest', 'closest-heavy'}
scheme to determine the distance between two residues:
'ca' : distance between two residues is given by the distance
between their alpha carbons
'closest' : distance is the closest distance between any
two atoms in the residues
'closest-heavy' : distance is the closest distance between
any two non-hydrogen atoms in the residues
ignore_nonprotein : bool
When using `contact==all`, don't compute contacts between
"residues" which are not protein (i.e. do not contain an alpha
carbon).
"""
def __init__(self, contacts='all', scheme='closest-heavy', ignore_nonprotein=True):
self.contacts = contacts
self.scheme = scheme
self.ignore_nonprotein = ignore_nonprotein
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space via of residue-residue
distances
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
distances, _ = md.compute_contacts(traj, self.contacts, self.scheme, self.ignore_nonprotein)
return distances
class GaussianSolventFeaturizer(Featurizer):
"""Featurizer on weighted pairwise distance between solute and solvent.
We apply a Gaussian kernel to each solute-solvent pairwise distance
and sum the kernels for each solute atom, resulting in a vector
of len(solute_indices).
The values can be physically interpreted as the degree of solvation
of each solute atom.
Parameters
----------
solute_indices : np.ndarray, shape=(n_solute,)
Indices of solute atoms
solvent_indices : np.ndarray, shape=(n_solvent,)
Indices of solvent atoms
sigma : float
Sets the length scale for the gaussian kernel
periodic : bool
Whether to consider a periodic system in distance calculations
References
----------
..[1] Gu, Chen, et al. BMC Bioinformatics 14, no. Suppl 2
(January 21, 2013): S8. doi:10.1186/1471-2105-14-S2-S8.
"""
def __init__(self, solute_indices, solvent_indices, sigma, periodic=False):
self.solute_indices = solute_indices
self.solvent_indices = solvent_indices
self.sigma = sigma
self.periodic = periodic
self.n_features = len(self.solute_indices)
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space via calculation
of solvent fingerprints
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
# The result vector
fingerprints = np.zeros((traj.n_frames, self.n_features))
atom_pairs = np.zeros((len(self.solvent_indices), 2))
sigma = self.sigma
for i, solute_i in enumerate(self.solute_indices):
# For each solute atom, calculate distance to all solvent
# molecules
atom_pairs[:, 0] = solute_i
atom_pairs[:, 1] = self.solvent_indices
distances = md.compute_distances(traj, atom_pairs, periodic=True)
distances = np.exp(-distances / (2 * sigma * sigma))
# Sum over water atoms for all frames
fingerprints[:, i] = np.sum(distances, axis=1)
return fingerprints
class RawPositionsFeaturizer(Featurizer):
"""Featurize an MD trajectory into a vector space with the raw
cartesian coordinates
Parameters
----------
atom_indices : None or array-like, dtype=int, shape=(n_atoms)
If specified, only return the coordinates for the atoms
given by atom_indices. Otherwise return all atoms
ref_traj : None or md.Trajectory
If specified, superpose each trajectory to the first frame of
ref_traj before getting positions. If atom_indices is also
specified, only superpose based on those atoms. The superposition
will modify each transformed trajectory *in place*.
"""
def __init__(self, atom_indices=None, ref_traj=None):
super(RawPositionsFeaturizer, self).__init__()
self.atom_indices = atom_indices
if atom_indices is not None and ref_traj is not None:
self.ref_traj = ref_traj.atom_slice(atom_indices)
else:
self.ref_traj = ref_traj
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space with the raw
cartesian coordinates.
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
Notes
-----
If you requested superposition (gave `ref_traj` in __init__) the
input trajectory will be modified.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
# Optionally take only certain atoms
if self.atom_indices is not None:
p_traj = traj.atom_slice(self.atom_indices)
else:
p_traj = traj
# Optionally superpose to a reference trajectory.
if self.ref_traj is not None:
p_traj.superpose(self.ref_traj, parallel=False)
# Get the positions and reshape.
value = p_traj.xyz.reshape(len(p_traj), -1)
return value
class RMSDFeaturizer(Featurizer):
"""Featurizer based on RMSD to a series of reference frames.
Parameters
----------
trj0 : mdtraj.Trajectory
Reference trajectory. trj0.n_frames gives the number of features
in this Featurizer.
atom_indices : np.ndarray, default=None
Which atom indices to use during RMSD calculation. If None, MDTraj
should default to all atoms.
"""
def __init__(self, trj0, atom_indices=None):
self.n_features = trj0.n_frames
self.trj0 = trj0
self.atom_indices = atom_indices
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space by calculating
the RMSD to each frame in a reference trajectory.
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
X = np.zeros((traj.n_frames, self.n_features))
for frame in range(self.n_features):
X[:, frame] = md.rmsd(traj, self.trj0, atom_indices=self.atom_indices, frame=frame)
return X
class DRIDFeaturizer(Featurizer):
"""Featurizer based on distribution of reciprocal interatomic
distances (DRID)
This featurizer transforms a dataset containing MD trajectories into
a vector dataset by representing each frame in each of the MD trajectories
by a vector containing the first three moments of a collection of
reciprocal interatomic distances. For details, see [1].
References
----------
.. [1] Zhou, Caflisch; Distribution of Reciprocal of Interatomic Distances:
A Fast Structural Metric. JCTC 2012 doi:10.1021/ct3003145
Parameters
----------
atom_indices : array-like of ints, default=None
Which atom indices to use during DRID featurization. If None,
all atoms are used
"""
def __init__(self, atom_indices=None):
self.atom_indices = atom_indices
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space using the distribution
of reciprocal interatomic distance (DRID) method.
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
return md.geometry.compute_drid(traj, self.atom_indices)
class TrajFeatureUnion(BaseEstimator, sklearn.pipeline.FeatureUnion):
"""Mixtape version of sklearn.pipeline.FeatureUnion
Notes
-----
Works on lists of trajectories.
"""
def fit_transform(self, traj_list, y=None, **fit_params):
"""Fit all transformers using `trajectories`, transform the data
and concatenate results.
Parameters
----------
traj_list : list (of mdtraj.Trajectory objects)
Trajectories to featurize
y : Unused
Unused
Returns
-------
Y : list (of np.ndarray)
Y[i] is the featurized version of X[i]
Y[i] will have shape (n_samples_i, n_features), where
n_samples_i is the length of trajectory i and n_features
is the total (concatenated) number of features in the
concatenated list of featurizers.
"""
self.fit(traj_list, y, **fit_params)
return self.transform(traj_list)
def transform(self, traj_list):
"""Transform traj_list separately by each transformer, concatenate results.
Parameters
----------
trajectories : list (of mdtraj.Trajectory objects)
Trajectories to featurize
Returns
-------
Y : list (of np.ndarray)
Y[i] is the featurized version of X[i]
Y[i] will have shape (n_samples_i, n_features), where
n_samples_i is the length of trajectory i and n_features
is the total (concatenated) number of features in the
concatenated list of featurizers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(sklearn.pipeline._transform_one)(trans, name, traj_list, self.transformer_weights)
for name, trans in self.transformer_list)
X_i_stacked = [np.hstack([Xs[feature_ind][trj_ind] for feature_ind in range(len(Xs))]) for trj_ind in range(len(Xs[0]))]
return X_i_stacked
class Slicer(Featurizer):
"""Extracts slices (e.g. subsets) from data along the feature dimension.
Parameters
----------
index : list of integers, optional, default=None
These indices are the feature indices that will be selected
by the Slicer.transform() function.
"""
def __init__(self, index=None):
self.index = index
def partial_transform(self, X):
"""Slice a single input array along to select a subset of features.
Parameters
----------
X : np.ndarray, shape=(n_samples, n_features)
A sample to slice.
Returns
-------
X2 : np.ndarray shape=(n_samples, n_feature_subset)
Slice of X
"""
return X[:, self.index]
class FirstSlicer(Slicer):
"""Extracts slices (e.g. subsets) from data along the feature dimension.
Parameters
----------
first : int, optional, default=None
Select the first N features. This is essentially a shortcut for
`Slicer(index=arange(first))`
"""
def __init__(self, first=None):
self.first = first
@property
def index(self):
return np.arange(self.first)
|
dotsdl/msmbuilder
|
msmbuilder/featurizer/featurizer.py
|
Python
|
lgpl-2.1
| 27,412
|
[
"Gaussian",
"MDTraj"
] |
77a46d17c09c3119754fd8819cdd220397f6321fb1834067d05cc05d370a6056
|
#* This file is part of Zapdos, an open-source
#* application for the simulation of plasmas
#* https://github.com/shannon-lab/zapdos
#*
#* Zapdos is powered by the MOOSE Framework
#* https://www.mooseframework.org
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import numpy as np
import numpy.matlib
# J_diff = np.mat('1 -1; -1 1')
# J_diff_diri_left = np.mat('1 0; -1 1')
# J_diri_left = J_diff_diri_left - J_diff
# print J_diri_left
# J = np.mat('1 0; 0 1')
# Jinv = np.linalg.inv(J)
# R = np.array([1,0])
# x = np.dot(Jinv,R)
# print x
J_FD_old = np.mat('1 0 0 0; -4 4 0 0; 0 0 2 -2; 0 0 0 1')
J_FD_new = np.mat('1 0 0 0; -2 2 1 -1; -2 2 1 -1; 0 0 0 1')
J_FD_delta = J_FD_new - J_FD_old
print J_FD_delta
J_H_old = np.mat('1 0 0 0; -4 4 0 0; 0 0 2 -2; 0 0 0 1')
J_H_new = np.mat('1 0 0 0; 0 0 2 -2; -2 2 2 -2; 0 0 0 1')
J_H_delta = J_H_new - J_H_old
print J_H_delta
''' Confident Jacobians:
m-DGDiffusionInterface
m-Diffusion
m-DirichletBC
m-MatchedValueBC
InterfaceKernels
z-DGAdvectionInterface
z-DGMatDiffusionLogInt
z-DGMatDiffusionInt
z-DGPenaltyTiedValue
z-InterfaceAdvection (confirmed with interpolated electron transport coefficients)
z-InterfaceLogDiffusionElectrons (confirmed with interpolated electron transport coefficients)
Kernels
z-ElectronTimeDerivative (confirmed with interpolated electron transport coefficients)
z-EFieldAdvection (confirmed with interpolated electron transport coefficients)
z-CoeffDiffusion (confirmed with interpolated electron transport coefficients)
z-ElectronsFromIonizationLFA_KV
z-LogStabilizationMoles (confirmed with interpolated electron transport coefficients)
z-ReactantFirstOrderRxn
z-ReactantAARxn
z-CoeffDiffusionLin (confirmed with interpolated electron transport coefficients)
z-ChargeSourceMoles_KV (confirmed with interpolated electron transport coefficients)
z-IonsFromIonizationLFA_KV
z-EFieldArtDiff
z-ProductFirstOrderRxn
z-ProductAABBRxn
z-NeumannCircuitVoltageMoles_KV
z-EFieldAdvectionElectrons (confirmed with interpolated electron transport coefficients)
z-EFieldAdvectionEnergy (confirmed with interpolated electron transport coefficients)
z-CoeffDiffusionElectrons (confirmed with interpolated electron transport coefficients)
z-CoeffDiffusionEnergy (confirmed with interpolated electron transport coefficients)
z-InterpCoeffDiffusion
z-ElectronsFromIonization (Now confirmed with interpolated electron transport coefficients)
z-JouleHeating (Now confirmed with interpolated electron transport coefficients)
z-IonsFromIonization (Now confirmed with interpolated electron transport coefficients)
z-ElectronEnergyLossFromIonization (Now confirmed with interpolated electron transport coefficients)
z-ElectronEnergyLossFromElastic (Now confirmed with interpolated electron transport coefficients)
z-ElectronEnergyLossFromExcitation (Now confirmed with interpolated electron transport coefficients)
Boundary Conditions
z-DCElectronBC
z-DCIonBC
z-GradMeanEnZeroBC (confirmed with interpolated electron transport coefficients)
z-HagelaarAnodicBC
z-HagelaarIonBC (confirmed with interpolated electron transport coefficients)
z-HagelaarElectronBC (confirmed with interpolated electron transport coefficients)
z-HagelaarEnergyBC (confirmed with interpolated electron transport coefficients)
z-MatchedValueLogBC (confirmed with interpolated electron transport coefficients)
DGKernels
z-DGEFieldAdvection # This will only work as long as the variable and coupled variable have the same number of shape functions in each element
z-DGCoeffDiffusion
Check list for units changed:
ElectronTimeDerivative
EFieldAdvectionElectrons
CoeffDiffusionElectrons
ElectronsFromIonization
LogStabilizationMoles
EFieldAdvection
CoeffDiffusion
ReactantFirstOrderRxn
ReactantAARxn
CoeffDiffusionLin
ChargeSourceMoles_KV
IonsFromIonization
ProductFirstOrderRxn
ProductAABBRxn
EfieldAdvectionEnergy
CoeffDiffusionEnergy
JouleHeating
ElectronEnergyLossFromIonization
ElectronEnergyLossFromExcitation
ElectronEnergyLossFromElastic
HagelaarIonBC
HagelaarElectronBC
HagelaarEnergyBC
DCIonBC
InterfaceAdvection
InterfaceLogDiffusionElectrons
ElectronRateIonization
IonRateIonization
ElectronEnergyRateIonization
ElectronEnergyRateExcitation
ElectronEnergyRateElastic (as long as kel is a constant)
'''
|
lindsayad/zapdos
|
problems/PetscJacobians.py
|
Python
|
lgpl-2.1
| 4,310
|
[
"MOOSE"
] |
5642969b821183807770e19b38f0b18ee100463384a1bcb882036c117a569fb5
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Find synonymous and non-synonymous SNP variants in fasta alignments
__authors__ = "Eric Normandeau"
__program_name__ = "synonym_storm"
__version_info__ = ('0', '0', '3')
__version__ = '.'.join(__version_info__)
__revision_date__ = "2010-06-14"
# Importing modules
import os
import sys
import getopt
import platform
from Bio import SeqIO
# Class definitions
class AutoDict(dict):
"""Implementation of perl's autovivification feature."""
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
# Function definitions
def help():
_plateform = platform.system()
name = __program_name__
text = """
%s(1) User Commands %s(1)
\033[1mNAME\033[0m
\t%s - Synonymous vs. non-synonymous SNPs in fasta alignments
\033[1mSYNOPSIS\033[0m
\t\033[1mpython %s.py \033[0m[\033[4mOPTION\033[0m] [\033[4mFILE\033[0m]...
\033[1mDESCRIPTION\033[0m
\tCount SNP variants in a fasta alignment file.
\t%s uses the Biopython library to parse a fasta file
\tcontaining aligned sequences all trimmed to the same length
\t(including missing nucleotides, represented by "-") and
\tstarting at the +1 nucleotide of the proper reading frame.
\tIt then counts possible SNP variants for each position in the aligned
\tsequences and determines whether the substitution is synonymous or
\tnon-synonymous. Optionally, the variant count can be done separatly
\tfor user specified groups. The output is redirected to a file
\tcontaining the name of the input file plus one variant per line with
\tthe position, count for the frequent and rare variants, either total
\tor separated per group, and the impact of the substitution (synonymous
\tor not). To use more than one group, specify a group text file
\tcontaining one group identifier per line. These names must be found
\tin the names of the sequences in the alignment input file. See the
\toutput format below.
\033[1mOUTPUT FORMAT\033[0m
\tInput_filename.txt
\tPos Freq Rare Syn
\t34 A:22 T:3 Yes
\t62 T:8 G:4 No
\t88 C:32 G:6 Yes
\t107 A:22 C:3 Yes
\t131 G:12 T:11 No
\033[1mOPTIONS\033[0m
\t\033[1m-h, --help\033[0m
\t\tDisplay the help of this program
\t\033[1m-i, --input\033[0m
\t\tInput file, in fasta format
\t\033[1m-o, --output\033[0m
\t\tOutput file, in fasta format
\t\033[1m-g, --group\033[0m
\t\tOptional group file
\033[1mAUTHORS\033[0m
\t%s
%s %s %s %s(1)
"""%(name, name, name, name, name, __authors__, name, __version__, __revision_date__, name)
if _plateform != 'Windows' and "this is great news":
print text
else:
__Windows__ = "This is an abomination"
remove = ["\033[1m","\033[0m","\033[4m"]
for i in remove:
text = text.replace(i, "")
print text
del(__Windows__) # If only we could...
def import_groups(group_file):
groups = []
try:
with open(group_file) as test:
pass
except:
print "Input Error: Group file not found."
print "Use -h for help."
sys.exit(0)
with open(group_file) as f:
for line in f:
if line.strip() != "":
groups.append(line.strip())
return groups
def good_positions(in_file, out_folder, out_file, groups, min_count=2):
"""Count SNP variants in a fasta alignment file"""
fasta_sequences = SeqIO.parse(open(in_file),'fasta')
output_path = os.path.join(out_folder, out_file)
try:
with open (output_path, "w") as test:
pass
except:
print "Created", "'"+ out_folder +"'", "folder to put result files in"
os.mkdir(out_folder)
end = 0
names = []
sequences = []
mega_dict = AutoDict()
codon_length = 3
while end == 0:
try:
temp_fasta = fasta_sequences.next()
temp_name, temp_sequence = temp_fasta.id, temp_fasta.seq.tostring()
names.append(temp_name)
if len(temp_sequence) % 3 != 0:
print "All sequence lengths must be factors of 3"
sys.exit(0)
sequences.append(temp_sequence)
except:
print "All sequences treated"
end = 1
try:
len_seqs = len(sequences[0])
except:
print "No sequences were found"
sys.exit(0)
num_seqs = len(names)
all_groups = "all"
for i in xrange(0, len_seqs, 3):
codon_start = i
codon_end = codon_start + 3
codons = [seq[codon_start:codon_end] for seq in sequences]
for j in xrange(codon_length):
pos = str(i) + "-" + str(j)
for k in xrange(num_seqs):
codon = codons[k]
if "-" not in codon:
name = names[k]
group = "No_group"
variant = codon[j]
for g in groups:
if name.startswith(g):
group = g
try:
mega_dict[pos][codon][group] += 1
except:
mega_dict[pos][codon][group] = 1
snp_dict = AutoDict()
with open(output_path, "w") as f:
many_variants = set()
for p in sorted(mega_dict):
if len(mega_dict[p]) > 2:
many_variants.add(p.split("-")[0])
good_pos = True
for v in sorted(mega_dict[p]):
count = 0
for g in sorted(mega_dict[p][v]):
count += mega_dict[p][v][g]
if count < min_count:
good_pos = False
if good_pos == True and len(mega_dict[p]) == 2:
snp_dict[p] = mega_dict[p]
for i in sorted([int(x) for x in many_variants]):
f.write(str(i + 1) + " has more than 2 variants\n")
f.write("\nBegining of data:\n-----------------\n")
good_snps = AutoDict()
for p in sorted(snp_dict):
pos_codon, pos_nuc = [int(x) for x in p.split("-")]
c1 = sorted(snp_dict[p])[0]
c2 = sorted(snp_dict[p])[1]
if c1[pos_nuc] != c2[pos_nuc]:
good_snps[pos_codon + pos_nuc] = snp_dict[p]
print len(good_snps), "interesting positions found"
return good_snps
def synonym_output(good_snps, output_path, groups):
"""Output synonym results to file.
"""
with open(output_path, "a") as f:
if len(good_snps.keys()) == 0:
f.write("No interesting snps found for this contig")
sys.exit(0)
for p in sorted(good_snps):
c1 = sorted(good_snps[p])[0]
c2 = sorted(good_snps[p])[1]
n1 = good_snps[p][c1]
n2 = good_snps[p][c2]
s = are_synonym(c1, c2)
line_begin = str(p + 1)
line_c1 = c1
line_c2 = c2
for g in groups:
line_c1 += "\t" + g + "\t" + str(n1[g]).replace("{}", "0")
line_c2 += "\t" + g + "\t" + str(n2[g]).replace("{}", "0")
line = line_begin + "\t" + line_c1 + "\t" + line_c2 + "\t" + s + "\n"
f.write(line)
#f.write("And a merry Christmas to you tiny Tim!\n\n")
def are_synonym(c1, c2):
"""Determine if two codons are synonymous
"""
if translate(c1) == translate(c2):
synonym = "synonymous"
else:
synonym = "non-synonymous"
return synonym
def translate(codon): # VERIFY translation matrix!!!
"""Translate codons in amino acids using the XXXX code
"""
code = { 'ttt': 'F', 'tct': 'S', 'tat': 'Y', 'tgt': 'C',
'ttc': 'F', 'tcc': 'S', 'tac': 'Y', 'tgc': 'C',
'tta': 'L', 'tca': 'S', 'taa': '*', 'tga': '*',
'ttg': 'L', 'tcg': 'S', 'tag': '*', 'tgg': 'W',
'ctt': 'L', 'cct': 'P', 'cat': 'H', 'cgt': 'R',
'ctc': 'L', 'ccc': 'P', 'cac': 'H', 'cgc': 'R',
'cta': 'L', 'cca': 'P', 'caa': 'Q', 'cga': 'R',
'ctg': 'L', 'ccg': 'P', 'cag': 'Q', 'cgg': 'R',
'att': 'I', 'act': 'T', 'aat': 'N', 'agt': 'S',
'atc': 'I', 'acc': 'T', 'aac': 'N', 'agc': 'S',
'ata': 'I', 'aca': 'T', 'aaa': 'K', 'aga': 'R',
'atg': 'M', 'acg': 'T', 'aag': 'K', 'agg': 'R',
'gtt': 'V', 'gct': 'A', 'gat': 'D', 'ggt': 'G',
'gtc': 'V', 'gcc': 'A', 'gac': 'D', 'ggc': 'G',
'gta': 'V', 'gca': 'A', 'gaa': 'E', 'gga': 'G',
'gtg': 'V', 'gcg': 'A', 'gag': 'E', 'ggg': 'G'
}
assert len(codon) == 3, "Codon of wrong length"
assert codon.isalpha(), "Codon contains non alphabetic characters"
codon = codon.lower().replace("u", "t")
standard_nuc = ["a", "c", "t", "g"]
for n in codon:
assert n in standard_nuc, "Unknown nucleotide found in codon"
try:
aa = code[codon]
except:
aa = "!"
print "Warning! Found an inexistant codon, translating as '!'"
return aa
# Main function
# (This is the heart of the program
# Without it, nothing will happen...)
# Synonymous storm
# From sequence jumbo-mumbo
# Brings Truth and Order
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:o:g:", ["help", "input=",
"output=", "group="])
except getopt.GetoptError, e:
print "Input error. Use -h for help"
sys.exit(0)
output_file = None
group_file = None
for option, value in opts:
if option in ('-h', '--help'):
help()
sys.exit(0)
elif option in ('-i', '--input'):
input_file = value
elif option in ('-o', '--output'):
output_file = value
elif option in ('-g', '--group'):
group_file = value
try:
with open(input_file) as test:
pass
except:
print "Input Error: No input file specified or file not found."
print "Use -h for help."
sys.exit(0)
if output_file == None:
print "Input Error: No output file specified."
print "Use -h for help."
sys.exit(0)
output_folder = "synonym_results"
output_path = os.path.join(output_folder, output_file)
print "Using version:", __version__, "of", __program_name__
print "Last revision:", __revision_date__
print "By:", __authors__
if group_file == None:
groups = []
else:
groups = import_groups(group_file)
print
min_count = 2 # Implement in input options
good_snps = good_positions(input_file, output_folder, output_file, groups, min_count)
synonym_output(good_snps, output_path, groups)
if __name__ == "__main__":
main()
|
wkh124/wkh124
|
snp_synonymy_explorer.py
|
Python
|
gpl-3.0
| 10,895
|
[
"Biopython"
] |
8faf2ccc957aded51d9abef58b4367b7449c9502d5920148816b723980666b8a
|
"""test_analysis.py - test the analysis server framework
CellProfiler is distributed under the GNU General Public License.
See the accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2015 Broad Institute
All rights reserved.
Please see the AUTHORS file for credits.
Website: http://www.cellprofiler.org
"""
import logging
logger = logging.getLogger(__name__)
#logger.addHandler(logging.StreamHandler())
#logger.setLevel(logging.DEBUG)
from cStringIO import StringIO
import inspect
import numpy as np
import os
import Queue
import tempfile
import threading
import traceback
import unittest
import uuid
import zmq
import cellprofiler.analysis as cpanalysis
import cellprofiler.pipeline as cpp
import cellprofiler.cpmodule as cpm
import cellprofiler.preferences as cpprefs
import cellprofiler.measurements as cpmeas
import cellprofiler.utilities.zmqrequest as cpzmq
from cellprofiler.modules.tests import example_images_directory, testimages_directory
IMAGE_NAME = "imagename"
OBJECTS_NAME = "objectsname"
IMAGE_FEATURE = "imagefeature"
OBJECTS_FEATURE = "objectsfeature"
class TestAnalysis(unittest.TestCase):
class FakeWorker(threading.Thread):
'''A mockup of a ZMQ client to the boundary
'''
def __init__(self, name="Client thread"):
threading.Thread.__init__(self, name = name)
self.setDaemon(True)
self.zmq_context = zmq.Context()
self.queue = Queue.Queue()
self.response_queue = Queue.Queue()
self.start_signal = threading.Semaphore(0)
self.keep_going = True
self.notify_addr = "inproc://%s" % uuid.uuid4().hex
self.notify_socket = self.zmq_context.socket(zmq.PUB)
self.notify_socket.bind(self.notify_addr)
self.start()
self.start_signal.acquire()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.stop()
self.join()
self.notify_socket.close()
def run(self):
logger.debug("Client thread starting")
try:
self.work_socket = self.zmq_context.socket(zmq.REQ)
self.recv_notify_socket = self.zmq_context.socket(zmq.SUB)
self.recv_notify_socket.setsockopt(zmq.SUBSCRIBE, '')
self.recv_notify_socket.connect(self.notify_addr)
self.announce_socket = None
self.poller = zmq.Poller()
self.poller.register(self.recv_notify_socket, zmq.POLLIN)
self.start_signal.release()
while self.keep_going:
socks = dict(self.poller.poll(1000))
if socks.get(self.recv_notify_socket, None) == zmq.POLLIN:
# Discard whatever comes down the notify socket.
# It's only used to wake us up.
msg = self.recv_notify_socket.recv()
while True:
try:
if not self.keep_going:
break
fn_and_args = self.queue.get_nowait()
except Queue.Empty:
break
try:
response = fn_and_args[0](*fn_and_args[1:])
self.response_queue.put((None, response))
except Exception,e:
traceback.print_exc()
self.response_queue.put((e, None))
except:
logger.warn("Client thread caught exception", exc_info=10)
self.start_signal.release()
finally:
logger.debug("Client thread exiting")
def stop(self):
self.keep_going = False
self.notify_socket.send("Stop")
def send(self, req):
logger.debug(" Enqueueing send of %s" % str(type(req)))
self.queue.put((self.do_send, req))
self.notify_socket.send("Send")
return self.recv
def request_work(self):
'''Send a work request until we get a WorkReply'''
while True:
reply = self.send(cpanalysis.WorkRequest(self.analysis_id))()
if isinstance(reply, cpanalysis.WorkReply):
return reply
elif not isinstance(reply, cpanalysis.NoWorkReply):
raise NotImplementedError(
"Received a reply of %s for a work request" %
str(type(reply)))
def do_send(self, req):
logger.debug(" Sending %s" % str(type(req)))
cpzmq.Communicable.send(req, self.work_socket)
self.poller.register(self.work_socket, zmq.POLLIN)
try:
while True:
socks = dict(self.poller.poll())
if socks.get(self.recv_notify_socket, None) == zmq.POLLIN:
msg = self.recv_notify_socket.recv()
if not self.keep_going:
raise Exception("Cancelled")
if socks.get(self.work_socket, None) == zmq.POLLIN:
logger.debug(" Received response for %s" % str(type(req)))
return cpzmq.Communicable.recv(self.work_socket)
finally:
self.poller.unregister(self.work_socket)
def recv(self):
logger.debug(" Waiting for client thread")
exception, result = self.response_queue.get()
if exception is not None:
logger.debug(" Client thread communicated exception")
raise exception
else:
logger.debug(" Client thread communicated result")
return result
def listen_for_announcements(self, work_announce_address):
self.queue.put((self.do_listen_for_announcements,
work_announce_address))
self.notify_socket.send("Listen for announcements")
return self.recv
def do_listen_for_announcements(self, work_announce_address):
self.announce_socket = self.zmq_context.socket(zmq.SUB)
self.announce_socket.setsockopt(zmq.SUBSCRIBE, '')
self.announce_socket.connect(work_announce_address)
self.poller.register(self.announce_socket, zmq.POLLIN)
try:
while True:
socks = dict(self.poller.poll())
if socks.get(self.recv_notify_socket, None) == zmq.POLLIN:
msg = self.recv_notify_socket.recv()
if not self.keep_going:
raise Exception("Cancelled")
if socks.get(self.announce_socket, None) == zmq.POLLIN:
announcements = \
self.announce_socket.recv_json()
return announcements
finally:
self.poller.unregister(self.announce_socket)
self.announce_socket.close()
self.announce_socket = None
def connect(self, work_announce_address):
self.analysis_id, work_queue_address = \
self.listen_for_announcements(work_announce_address)()[0]
self.queue.put((self.do_connect, work_queue_address))
self.notify_socket.send("Do connect")
return self.recv()
def do_connect(self, work_queue_address):
self.work_socket.connect(work_queue_address)
@classmethod
def setUpClass(cls):
cls.zmq_context = zmq.Context()
from cellprofiler.modules import fill_modules
fill_modules()
@classmethod
def tearDownClass(cls):
cpzmq.join_to_the_boundary()
try:
from ilastik.core.jobMachine import GLOBAL_WM
GLOBAL_WM.stopWorkers()
except:
pass
cls.zmq_context.term()
def setUp(self):
fd, self.filename = tempfile.mkstemp(".h5")
os.close(fd)
self.event_queue = Queue.Queue()
self.analysis = None
self.wants_analysis_finished = False
self.wants_pipeline_events = False
self.measurements_to_close = None
def tearDown(self):
self.cancel_analysis()
if self.measurements_to_close is not None:
self.measurements_to_close.close()
if os.path.exists(self.filename):
os.unlink(self.filename)
def cancel_analysis(self):
if self.analysis is not None:
self.analysis.cancel()
self.analysis = None
def analysis_event_handler(self, event):
if isinstance(event, cpanalysis.AnalysisProgress):
return
if isinstance(event, cpanalysis.AnalysisFinished):
self.measurements_to_close = event.measurements
if not self.wants_analysis_finished:
return
if (isinstance(event, cpp.AbstractPipelineEvent) and
not self.wants_pipeline_events):
return
self.event_queue.put(event)
def make_pipeline_and_measurements(self,
nimage_sets = 1,
group_numbers = None,
group_indexes = None,
**kwargs):
m = cpmeas.Measurements(mode="memory")
for i in range(1, nimage_sets + 1):
if group_numbers is not None:
group_number = group_numbers[i-1]
group_index = group_indexes[i-1]
else:
group_number = 1
group_index = i
m[cpmeas.IMAGE, cpmeas.C_URL + "_" + IMAGE_NAME, i] = "file:/%d.tif" % i
m[cpmeas.IMAGE, cpmeas.GROUP_NUMBER, i] = group_number
m[cpmeas.IMAGE, cpmeas.GROUP_INDEX, i] = group_index
pipeline = cpp.Pipeline()
pipeline.loadtxt(StringIO(SBS_PIPELINE), raise_on_error=True)
return pipeline, m
def make_pipeline_and_measurements_and_start(self, **kwargs):
pipeline, m = self.make_pipeline_and_measurements(**kwargs)
if "status" in kwargs:
overwrite = False
for i, status in enumerate(kwargs["status"]):
m.add_measurement(cpmeas.IMAGE,
cpanalysis.AnalysisRunner.STATUS,
status, image_set_number = i+1)
else:
overwrite = True
self.analysis = cpanalysis.Analysis(pipeline, self.filename, m)
self.analysis.start(self.analysis_event_handler,
num_workers = 0, overwrite=overwrite)
analysis_started = self.event_queue.get()
self.assertIsInstance(analysis_started, cpanalysis.AnalysisStarted)
return pipeline, m
def check_display_post_run_requests(self, pipeline):
'''Read the DisplayPostRunRequest messages during the post_run phase'''
for module in pipeline.modules():
if module.show_window and\
module.__class__.display_post_run != cpm.CPModule.display_post_run:
result = self.event_queue.get()
self.assertIsInstance(
result, cpanalysis.DisplayPostRunRequest)
self.assertEqual(result.module_num, module.module_num)
def test_01_01_start_and_stop(self):
logger.debug("Entering %s" % inspect.getframeinfo(inspect.currentframe()).function)
self.make_pipeline_and_measurements_and_start()
self.wants_analysis_finished = True
self.cancel_analysis()
# The last should be AnalysisFinished. There may be AnalysisProgress
# prior to that.
analysis_finished = self.event_queue.get()
self.assertIsInstance(analysis_finished, cpanalysis.AnalysisFinished)
self.assertTrue(analysis_finished.cancelled)
self.assertIsInstance(analysis_finished.measurements,
cpmeas.Measurements)
logger.debug("Exiting %s" % inspect.getframeinfo(inspect.currentframe()).function)
def test_02_01_announcement(self):
logger.debug("Entering %s" % inspect.getframeinfo(inspect.currentframe()).function)
pipeline, m = self.make_pipeline_and_measurements_and_start()
with self.FakeWorker() as worker:
work_announce_address = self.analysis.runner.work_announce_address
response = worker.listen_for_announcements(work_announce_address)()
self.assertEqual(len(response), 1)
analysis_id, request_address = response[0]
self.assertEqual(analysis_id, self.analysis.analysis_in_progress)
self.cancel_analysis()
response = worker.listen_for_announcements(work_announce_address)()
self.assertEqual(len(response), 0)
logger.debug("Exiting %s" % inspect.getframeinfo(inspect.currentframe()).function)
def test_03_01_get_work(self):
pipeline, m = self.make_pipeline_and_measurements_and_start()
with self.FakeWorker() as worker:
worker.connect(self.analysis.runner.work_announce_address)
response = worker.send(cpanalysis.WorkRequest(worker.analysis_id))()
self.assertIsInstance(response, cpanalysis.WorkReply)
self.assertSequenceEqual(response.image_set_numbers, (1,))
self.assertFalse(response.worker_runs_post_group)
self.assertTrue(response.wants_dictionary)
logger.debug("Exiting %s" % inspect.getframeinfo(inspect.currentframe()).function)
def test_03_02_get_work_twice(self):
logger.debug("Entering %s" % inspect.getframeinfo(inspect.currentframe()).function)
pipeline, m = self.make_pipeline_and_measurements_and_start()
with self.FakeWorker() as worker:
worker.connect(self.analysis.runner.work_announce_address)
response = worker.send(cpanalysis.WorkRequest(worker.analysis_id))()
self.assertIsInstance(response, cpanalysis.WorkReply)
response = worker.send(cpanalysis.WorkRequest(worker.analysis_id))()
self.assertIsInstance(response, cpanalysis.NoWorkReply)
logger.debug("Exiting %s" % inspect.getframeinfo(inspect.currentframe()).function)
def test_03_03_cancel_before_work(self):
logger.debug("Entering %s" % inspect.getframeinfo(inspect.currentframe()).function)
pipeline, m = self.make_pipeline_and_measurements_and_start()
with self.FakeWorker() as worker:
worker.connect(self.analysis.runner.work_announce_address)
self.cancel_analysis()
response = worker.send(cpanalysis.WorkRequest(worker.analysis_id))()
self.assertIsInstance(response, cpzmq.BoundaryExited)
logger.debug("Exiting %s" % inspect.getframeinfo(inspect.currentframe()).function)
def test_04_01_pipeline_preferences(self):
logger.debug("Entering %s" % inspect.getframeinfo(inspect.currentframe()).function)
pipeline, m = self.make_pipeline_and_measurements_and_start()
cpprefs.set_headless()
title_font_name = "Rosewood Std Regular"
cpprefs.set_title_font_name(title_font_name)
cpprefs.set_default_image_directory(example_images_directory())
cpprefs.set_default_output_directory(testimages_directory())
with self.FakeWorker() as worker:
worker.connect(self.analysis.runner.work_announce_address)
response = worker.send(cpanalysis.PipelinePreferencesRequest(
worker.analysis_id))()
#
# Compare pipelines
#
client_pipeline = cpp.Pipeline()
pipeline_txt = response.pipeline_blob.tostring()
client_pipeline.loadtxt(StringIO(pipeline_txt),
raise_on_error = True)
self.assertEqual(len(pipeline.modules()),
len(client_pipeline.modules()))
for smodule, cmodule in zip(pipeline.modules(),
client_pipeline.modules()):
self.assertEqual(smodule.module_name, cmodule.module_name)
self.assertEqual(len(smodule.settings()),
len(cmodule.settings()))
for ssetting, csetting in zip(smodule.settings(),
cmodule.settings()):
self.assertEqual(ssetting.get_value_text(),
csetting.get_value_text())
preferences = response.preferences
self.assertIn(cpprefs.TITLE_FONT_NAME, preferences)
self.assertEqual(preferences[cpprefs.TITLE_FONT_NAME],
title_font_name)
self.assertIn(cpprefs.DEFAULT_IMAGE_DIRECTORY, preferences)
self.assertEqual(preferences[cpprefs.DEFAULT_IMAGE_DIRECTORY],
cpprefs.get_default_image_directory())
self.assertIn(cpprefs.DEFAULT_OUTPUT_DIRECTORY, preferences)
self.assertEqual(preferences[cpprefs.DEFAULT_OUTPUT_DIRECTORY],
cpprefs.get_default_output_directory())
logger.debug("Exiting %s" % inspect.getframeinfo(inspect.currentframe()).function)
def test_04_02_initial_measurements_request(self):
logger.debug("Entering %s" % inspect.getframeinfo(inspect.currentframe()).function)
pipeline, m = self.make_pipeline_and_measurements_and_start()
with self.FakeWorker() as worker:
worker.connect(self.analysis.runner.work_announce_address)
response = worker.send(cpanalysis.InitialMeasurementsRequest(
worker.analysis_id))()
client_measurements = cpmeas.load_measurements_from_buffer(
response.buf)
try:
assert isinstance(client_measurements, cpmeas.Measurements)
assert isinstance(m, cpmeas.Measurements)
self.assertSequenceEqual(
m.get_image_numbers(),
client_measurements.get_image_numbers())
image_numbers = m.get_image_numbers()
self.assertItemsEqual(m.get_object_names(),
client_measurements.get_object_names())
for object_name in m.get_object_names():
self.assertItemsEqual(
m.get_feature_names(object_name),
client_measurements.get_feature_names(object_name))
for feature_name in m.get_feature_names(object_name):
for image_number in image_numbers:
sv = m.get_measurement(
object_name, feature_name,
image_set_number = image_number)
cv = client_measurements.get_measurement(
object_name, feature_name,
image_set_number = image_number)
self.assertEqual(np.isscalar(sv),
np.isscalar(cv))
if np.isscalar(sv):
self.assertEqual(sv, cv)
else:
np.testing.assert_almost_equal(sv, cv)
finally:
client_measurements.close()
logger.debug("Exiting %s" % inspect.getframeinfo(inspect.currentframe()).function)
def test_04_03_interaction(self):
logger.debug("Entering %s" % inspect.getframeinfo(inspect.currentframe()).function)
pipeline, m = self.make_pipeline_and_measurements_and_start()
with self.FakeWorker() as worker:
worker.connect(self.analysis.runner.work_announce_address)
fn_interaction_reply = worker.send(
cpanalysis.InteractionRequest(
worker.analysis_id,
foo = "bar"))
request = self.event_queue.get()
self.assertIsInstance(request, cpanalysis.InteractionRequest)
self.assertEqual(request.foo, "bar")
request.reply(cpanalysis.InteractionReply(hello = "world"))
reply = fn_interaction_reply()
self.assertIsInstance(reply, cpanalysis.InteractionReply)
self.assertEqual(reply.hello, "world")
logger.debug("Exiting %s" % inspect.getframeinfo(inspect.currentframe()).function)
def test_04_04_01_display(self):
logger.debug("Entering %s" % inspect.getframeinfo(inspect.currentframe()).function)
pipeline, m = self.make_pipeline_and_measurements_and_start()
with self.FakeWorker() as worker:
worker.connect(self.analysis.runner.work_announce_address)
fn_interaction_reply = worker.send(
cpanalysis.DisplayRequest(
worker.analysis_id,
foo = "bar"))
#
# The event queue should be hooked up to the interaction callback
#
request = self.event_queue.get()
self.assertIsInstance(request, cpanalysis.DisplayRequest)
self.assertEqual(request.foo, "bar")
request.reply(cpanalysis.Ack(message = "Gimme Pony"))
reply = fn_interaction_reply()
self.assertIsInstance(reply, cpanalysis.Ack)
self.assertEqual(reply.message, "Gimme Pony")
logger.debug("Exiting %s" % inspect.getframeinfo(inspect.currentframe()).function)
def test_04_04_02_display_post_group(self):
logger.debug("Entering %s" % inspect.getframeinfo(inspect.currentframe()).function)
pipeline, m = self.make_pipeline_and_measurements_and_start()
with self.FakeWorker() as worker:
worker.connect(self.analysis.runner.work_announce_address)
fn_interaction_reply = worker.send(
cpanalysis.DisplayPostGroupRequest(
worker.analysis_id, 1,
dict(foo = "bar"), 3))
#
# The event queue should be hooked up to the interaction callback
#
request = self.event_queue.get()
self.assertIsInstance(request, cpanalysis.DisplayPostGroupRequest)
display_data = request.display_data
self.assertEqual(display_data["foo"], "bar")
request.reply(cpanalysis.Ack(message = "Gimme Pony"))
reply = fn_interaction_reply()
self.assertIsInstance(reply, cpanalysis.Ack)
self.assertEqual(reply.message, "Gimme Pony")
logger.debug("Exiting %s" % inspect.getframeinfo(inspect.currentframe()).function)
def test_04_05_exception(self):
logger.debug("Entering %s" % inspect.getframeinfo(inspect.currentframe()).function)
pipeline, m = self.make_pipeline_and_measurements_and_start()
with self.FakeWorker() as worker:
worker.connect(self.analysis.runner.work_announce_address)
fn_interaction_reply = worker.send(
cpanalysis.ExceptionReport(
worker.analysis_id,
image_set_number = 1,
module_name ="Images",
exc_type = "Exception",
exc_message = "Not really an exception",
exc_traceback = traceback.extract_stack(),
filename = "test_analysis.py",
line_number = 374))
#
# The event queue should be hooked up to the interaction callback
#
request = self.event_queue.get()
self.assertIsInstance(request, cpanalysis.ExceptionReport)
function = request.exc_traceback[-1][2]
self.assertEqual(function, inspect.getframeinfo(inspect.currentframe()).function)
self.assertEqual(request.filename, "test_analysis.py")
request.reply(cpanalysis.ExceptionPleaseDebugReply(
disposition=1, verification_hash="corned beef"))
reply = fn_interaction_reply()
self.assertIsInstance(reply, cpanalysis.ExceptionPleaseDebugReply)
self.assertEqual(reply.verification_hash, "corned beef")
self.assertEqual(reply.disposition, 1)
#
# Try DebugWaiting and DebugComplete as well
#
for req in (cpanalysis.DebugWaiting(worker.analysis_id, 8080),
cpanalysis.DebugComplete(worker.analysis_id)):
fn_interaction_reply = worker.send(req)
request = self.event_queue.get()
self.assertEqual(type(request), type(req))
request.reply(cpanalysis.Ack())
reply = fn_interaction_reply()
self.assertIsInstance(reply, cpanalysis.Ack)
logger.debug("Exiting %s" % inspect.getframeinfo(inspect.currentframe()).function)
def test_05_01_imageset_with_dictionary(self):
#
# Go through the steps for the first imageset and see if the
# dictionary that we sent is the one we get.
#
# WorkRequest - to get the rights to report the dictionary
# ImageSetSuccessWithDictionary - to report the dictionary
# WorkRequest (with spin until WorkReply received)
# SharedDictionaryRequest
#
logger.debug("Entering %s" % inspect.getframeinfo(inspect.currentframe()).function)
pipeline, m = self.make_pipeline_and_measurements_and_start(
nimage_sets = 2)
r = np.random.RandomState()
r.seed(51)
with self.FakeWorker() as worker:
worker.connect(self.analysis.runner.work_announce_address)
response = worker.request_work()
dictionaries = [ dict([(uuid.uuid4().hex, r.uniform(size=(10,15)))
for _ in range(10)])
for module in pipeline.modules()]
response = worker.send(cpanalysis.ImageSetSuccessWithDictionary(
worker.analysis_id, response.image_set_numbers[0],
dictionaries))()
self.assertIsInstance(response, cpanalysis.Ack)
response = worker.request_work()
self.assertSequenceEqual(response.image_set_numbers, [2])
response = worker.send(cpanalysis.SharedDictionaryRequest(
worker.analysis_id))()
self.assertIsInstance(response, cpanalysis.SharedDictionaryReply)
result = response.dictionaries
self.assertEqual(len(dictionaries), len(result))
for ed, d in zip(dictionaries, result):
self.assertItemsEqual(ed.keys(), d.keys())
for k in ed.keys():
np.testing.assert_almost_equal(ed[k], d[k])
logger.debug("Exiting %s" % inspect.getframeinfo(inspect.currentframe()).function)
def test_05_02_groups(self):
logger.debug("Entering %s" % inspect.getframeinfo(inspect.currentframe()).function)
pipeline, m = self.make_pipeline_and_measurements_and_start(
nimage_sets = 4,
group_numbers = [1, 1, 2, 2],
group_indexes = [1, 2, 1, 2])
r = np.random.RandomState()
r.seed(52)
with self.FakeWorker() as worker:
worker.connect(self.analysis.runner.work_announce_address)
response = worker.request_work()
self.assertTrue(response.worker_runs_post_group)
self.assertFalse(response.wants_dictionary)
self.assertSequenceEqual(response.image_set_numbers, [1,2])
response = worker.send(cpanalysis.ImageSetSuccess(
worker.analysis_id, response.image_set_numbers[0]))()
response = worker.request_work()
self.assertSequenceEqual(response.image_set_numbers, [3, 4])
self.assertTrue(response.worker_runs_post_group)
self.assertFalse(response.wants_dictionary)
logger.debug("Exiting %s" % inspect.getframeinfo(inspect.currentframe()).function)
def test_06_01_single_imageset(self):
#
# Test a full cycle of analysis with an image set list
# with a single image set
#
logger.debug("Entering %s" % inspect.getframeinfo(inspect.currentframe()).function)
self.wants_analysis_finished = True
pipeline, m = self.make_pipeline_and_measurements_and_start()
r = np.random.RandomState()
r.seed(61)
with self.FakeWorker() as worker:
#####################################################
#
# Connect the worker to the analysis server and get
# the initial measurements.
#
#####################################################
worker.connect(self.analysis.runner.work_announce_address)
response = worker.request_work()
response = worker.send(cpanalysis.InitialMeasurementsRequest(
worker.analysis_id))()
client_measurements = cpmeas.load_measurements_from_buffer(
response.buf)
#####################################################
#
# Report the dictionary, add some measurements and
# report the results of the first job
#
#####################################################
dictionaries = [ dict([(uuid.uuid4().hex, r.uniform(size=(10,15)))
for _ in range(10)])
for module in pipeline.modules()]
response = worker.send(cpanalysis.ImageSetSuccessWithDictionary(
worker.analysis_id, 1, dictionaries))()
objects_measurements = r.uniform(size=10)
client_measurements[cpmeas.IMAGE, IMAGE_FEATURE, 1] = "Hello"
client_measurements[OBJECTS_NAME, OBJECTS_FEATURE, 1] = \
objects_measurements
req = cpanalysis.MeasurementsReport(
worker.analysis_id,
client_measurements.file_contents(),
image_set_numbers = [1])
client_measurements.close()
response_fn = worker.send(req)
self.check_display_post_run_requests(pipeline)
#####################################################
#
# The server should receive the measurements report.
# It should merge the measurements and post an
# AnalysisFinished event.
#
#####################################################
result = self.event_queue.get()
self.assertIsInstance(result, cpanalysis.AnalysisFinished)
self.assertFalse(result.cancelled)
measurements = result.measurements
self.assertSequenceEqual(measurements.get_image_numbers(), [1])
self.assertEqual(measurements[cpmeas.IMAGE, IMAGE_FEATURE, 1],
"Hello")
np.testing.assert_almost_equal(
measurements[OBJECTS_NAME, OBJECTS_FEATURE, 1],
objects_measurements)
def test_06_02_test_three_imagesets(self):
# Test an analysis of three imagesets
#
logger.debug("Entering %s" % inspect.getframeinfo(inspect.currentframe()).function)
self.wants_analysis_finished = True
pipeline, m = self.make_pipeline_and_measurements_and_start(
nimage_sets = 3)
r = np.random.RandomState()
r.seed(62)
with self.FakeWorker() as worker:
#####################################################
#
# Connect the worker to the analysis server and get
# the initial measurements.
#
#####################################################
worker.connect(self.analysis.runner.work_announce_address)
response = worker.request_work()
response = worker.send(cpanalysis.InitialMeasurementsRequest(
worker.analysis_id))()
client_measurements = cpmeas.load_measurements_from_buffer(
response.buf)
#####################################################
#
# Report the dictionary, add some measurements and
# report the results of the first job
#
#####################################################
dictionaries = [ dict([(uuid.uuid4().hex, r.uniform(size=(10,15)))
for _ in range(10)])
for module in pipeline.modules()]
response = worker.send(cpanalysis.ImageSetSuccessWithDictionary(
worker.analysis_id, 1, dictionaries))()
#####################################################
#
# The analysis server should be ready to send us two
# more jobs to do.
#
#####################################################
expected_jobs = [2, 3]
for _ in range(2):
response = worker.request_work()
image_numbers = response.image_set_numbers
self.assertEqual(len(image_numbers), 1)
self.assertIn(image_numbers[0], expected_jobs)
expected_jobs.remove(image_numbers[0])
#####################################################
#
# Send the measurement groups
#
#####################################################
objects_measurements = [r.uniform(size=10) for _ in range(3)]
for i, om in enumerate(objects_measurements):
image_number = i+1
if image_number > 0:
worker.send(cpanalysis.ImageSetSuccess(
worker.analysis_id,
image_set_number = image_number))
m = cpmeas.Measurements(copy = client_measurements)
m[cpmeas.IMAGE, IMAGE_FEATURE, image_number] = \
"Hello %d" % image_number
m[OBJECTS_NAME, OBJECTS_FEATURE, image_number] = om
req = cpanalysis.MeasurementsReport(
worker.analysis_id,
m.file_contents(),
image_set_numbers = [image_number])
m.close()
response = worker.send(req)()
client_measurements.close()
#####################################################
#
# The server should receive the measurements reports,
# It should merge the measurements and post an
# AnalysisFinished event.
#
#####################################################
self.check_display_post_run_requests(pipeline)
result = self.event_queue.get()
self.assertIsInstance(result, cpanalysis.AnalysisFinished)
self.assertFalse(result.cancelled)
measurements = result.measurements
self.assertSequenceEqual(list(measurements.get_image_numbers()),
[1, 2, 3])
for i in range(1, 4):
self.assertEqual(measurements[cpmeas.IMAGE, IMAGE_FEATURE, i],
"Hello %d" % i)
np.testing.assert_almost_equal(
measurements[OBJECTS_NAME, OBJECTS_FEATURE, i],
objects_measurements[i-1])
def test_06_03_test_grouped_imagesets(self):
# Test an analysis of four imagesets in two groups
#
logger.debug("Entering %s" % inspect.getframeinfo(inspect.currentframe()).function)
self.wants_analysis_finished = True
pipeline, m = self.make_pipeline_and_measurements_and_start(
nimage_sets = 4,
group_numbers = [1, 1, 2, 2],
group_indexes = [1, 2, 1, 2])
r = np.random.RandomState()
r.seed(62)
with self.FakeWorker() as worker:
#####################################################
#
# Connect the worker to the analysis server and get
# the initial measurements.
#
#####################################################
worker.connect(self.analysis.runner.work_announce_address)
response = worker.request_work()
response = worker.send(cpanalysis.InitialMeasurementsRequest(
worker.analysis_id))()
client_measurements = cpmeas.load_measurements_from_buffer(
response.buf)
response = worker.send(cpanalysis.ImageSetSuccess(
worker.analysis_id, 1))()
#####################################################
#
# Get the second group.
#
#####################################################
response = worker.request_work()
image_numbers = response.image_set_numbers
self.assertSequenceEqual(list(image_numbers), [3, 4])
#####################################################
#
# Send the measurement groups
#
#####################################################
objects_measurements = [r.uniform(size=10) for _ in range(4)]
for image_number in range(2,5):
worker.send(cpanalysis.ImageSetSuccess(
worker.analysis_id,
image_set_number = image_number))
for image_numbers in ((1, 2), (3, 4)):
m = cpmeas.Measurements(copy = client_measurements)
for image_number in image_numbers:
m[cpmeas.IMAGE, IMAGE_FEATURE, image_number] = \
"Hello %d" % image_number
m[OBJECTS_NAME, OBJECTS_FEATURE, image_number] = \
objects_measurements[image_number-1]
req = cpanalysis.MeasurementsReport(
worker.analysis_id,
m.file_contents(),
image_set_numbers = image_numbers)
m.close()
response = worker.send(req)()
client_measurements.close()
#####################################################
#
# The server should receive the measurements reports,
# It should merge the measurements and post an
# AnalysisFinished event.
#
#####################################################
self.check_display_post_run_requests(pipeline)
result = self.event_queue.get()
self.assertIsInstance(result, cpanalysis.AnalysisFinished)
self.assertFalse(result.cancelled)
measurements = result.measurements
self.assertSequenceEqual(list(measurements.get_image_numbers()),
[1, 2, 3, 4])
for i in range(1, 5):
self.assertEqual(measurements[cpmeas.IMAGE, IMAGE_FEATURE, i],
"Hello %d" % i)
np.testing.assert_almost_equal(
measurements[OBJECTS_NAME, OBJECTS_FEATURE, i],
objects_measurements[i-1])
def test_06_04_test_restart(self):
# Test a restart of an analysis
#
logger.debug("Entering %s" % inspect.getframeinfo(inspect.currentframe()).function)
self.wants_analysis_finished = True
pipeline, m = self.make_pipeline_and_measurements_and_start(
nimage_sets = 3,
status = [cpanalysis.AnalysisRunner.STATUS_UNPROCESSED,
cpanalysis.AnalysisRunner.STATUS_DONE,
cpanalysis.AnalysisRunner.STATUS_IN_PROCESS])
r = np.random.RandomState()
r.seed(62)
with self.FakeWorker() as worker:
#####################################################
#
# Connect the worker to the analysis server and get
# the initial measurements.
#
#####################################################
worker.connect(self.analysis.runner.work_announce_address)
response = worker.request_work()
response = worker.send(cpanalysis.InitialMeasurementsRequest(
worker.analysis_id))()
client_measurements = cpmeas.load_measurements_from_buffer(
response.buf)
#####################################################
#
# Report the dictionary, add some measurements and
# report the results of the first job
#
#####################################################
dictionaries = [ dict([(uuid.uuid4().hex, r.uniform(size=(10,15)))
for _ in range(10)])
for module in pipeline.modules()]
response = worker.send(cpanalysis.ImageSetSuccessWithDictionary(
worker.analysis_id, 1, dictionaries))()
#####################################################
#
# The analysis server should be ready to send us just
# the third job.
#
#####################################################
response = worker.request_work()
image_numbers = response.image_set_numbers
self.assertEqual(len(image_numbers), 1)
self.assertEqual(image_numbers[0], 3)
#####################################################
#
# Send the measurement groups
#
#####################################################
objects_measurements = [r.uniform(size=10) for _ in range(3)]
for image_number, om in ((1, objects_measurements[0]),
(3, objects_measurements[2])):
worker.send(cpanalysis.ImageSetSuccess(
worker.analysis_id,
image_set_number = image_number))
m = cpmeas.Measurements(copy = client_measurements)
m[cpmeas.IMAGE, IMAGE_FEATURE, image_number] = \
"Hello %d" % image_number
m[OBJECTS_NAME, OBJECTS_FEATURE, image_number] = om
req = cpanalysis.MeasurementsReport(
worker.analysis_id,
m.file_contents(),
image_set_numbers = [image_number])
m.close()
response = worker.send(req)()
client_measurements.close()
#####################################################
#
# The server should receive the measurements reports,
# It should merge the measurements and post an
# AnalysisFinished event.
#
#####################################################
self.check_display_post_run_requests(pipeline)
result = self.event_queue.get()
self.assertIsInstance(result, cpanalysis.AnalysisFinished)
self.assertFalse(result.cancelled)
measurements = result.measurements
assert isinstance(measurements, cpmeas.Measurements)
self.assertSequenceEqual(list(measurements.get_image_numbers()),
[1, 2, 3])
for i in range(1, 4):
if i == 2:
for feature in (IMAGE_FEATURE, OBJECTS_FEATURE):
self.assertFalse(measurements.has_measurements(
cpmeas.IMAGE, feature, 2))
else:
self.assertEqual(measurements[cpmeas.IMAGE, IMAGE_FEATURE, i],
"Hello %d" % i)
np.testing.assert_almost_equal(
measurements[OBJECTS_NAME, OBJECTS_FEATURE, i],
objects_measurements[i-1])
def test_06_05_test_grouped_restart(self):
# Test an analysis of four imagesets in two groups with all but one
# complete.
#
logger.debug("Entering %s" % inspect.getframeinfo(inspect.currentframe()).function)
self.wants_analysis_finished = True
pipeline, m = self.make_pipeline_and_measurements_and_start(
nimage_sets = 4,
group_numbers = [1, 1, 2, 2],
group_indexes = [1, 2, 1, 2],
status = [ cpanalysis.AnalysisRunner.STATUS_DONE,
cpanalysis.AnalysisRunner.STATUS_UNPROCESSED,
cpanalysis.AnalysisRunner.STATUS_DONE,
cpanalysis.AnalysisRunner.STATUS_DONE]
)
r = np.random.RandomState()
r.seed(62)
with self.FakeWorker() as worker:
#####################################################
#
# Connect the worker to the analysis server and get
# the initial measurements.
#
#####################################################
worker.connect(self.analysis.runner.work_announce_address)
response = worker.request_work()
self.assertSequenceEqual(response.image_set_numbers, [1, 2])
response = worker.send(cpanalysis.InitialMeasurementsRequest(
worker.analysis_id))()
client_measurements = cpmeas.load_measurements_from_buffer(
response.buf)
for image_number in (1, 2):
response = worker.send(cpanalysis.ImageSetSuccess(
worker.analysis_id, image_number))()
m = cpmeas.Measurements(copy = client_measurements)
objects_measurements = [r.uniform(size=10) for _ in range(2)]
for image_number in (1,2):
m[cpmeas.IMAGE, IMAGE_FEATURE, image_number] = \
"Hello %d" % image_number
m[OBJECTS_NAME, OBJECTS_FEATURE, image_number] = \
objects_measurements[image_number-1]
req = cpanalysis.MeasurementsReport(
worker.analysis_id,
m.file_contents(),
image_set_numbers = (1,2))
m.close()
response = worker.send(req)()
client_measurements.close()
#####################################################
#
# The server should receive the measurements reports,
# It should merge the measurements and post an
# AnalysisFinished event.
#
#####################################################
self.check_display_post_run_requests(pipeline)
result = self.event_queue.get()
self.assertIsInstance(result, cpanalysis.AnalysisFinished)
self.assertFalse(result.cancelled)
measurements = result.measurements
for i in range(1, 3):
self.assertEqual(measurements[cpmeas.IMAGE, IMAGE_FEATURE, i],
"Hello %d" % i)
np.testing.assert_almost_equal(
measurements[OBJECTS_NAME, OBJECTS_FEATURE, i],
objects_measurements[i-1])
def test_06_06_relationships(self):
#
# Test a transfer of the relationships table.
#
logger.debug("Entering %s" % inspect.getframeinfo(inspect.currentframe()).function)
self.wants_analysis_finished = True
pipeline, m = self.make_pipeline_and_measurements_and_start()
r = np.random.RandomState()
r.seed(61)
with self.FakeWorker() as worker:
#####################################################
#
# Connect the worker to the analysis server and get
# the initial measurements.
#
#####################################################
worker.connect(self.analysis.runner.work_announce_address)
response = worker.request_work()
response = worker.send(cpanalysis.InitialMeasurementsRequest(
worker.analysis_id))()
client_measurements = cpmeas.load_measurements_from_buffer(
response.buf)
#####################################################
#
# Report the dictionary, add some measurements and
# report the results of the first job
#
#####################################################
dictionaries = [ dict([(uuid.uuid4().hex, r.uniform(size=(10,15)))
for _ in range(10)])
for module in pipeline.modules()]
response = worker.send(cpanalysis.ImageSetSuccessWithDictionary(
worker.analysis_id, 1, dictionaries))()
n_objects = 10
objects_measurements = r.uniform(size=n_objects)
objects_relationship = r.permutation(n_objects) + 1
client_measurements[cpmeas.IMAGE, IMAGE_FEATURE, 1] = "Hello"
client_measurements[OBJECTS_NAME, OBJECTS_FEATURE, 1] = \
objects_measurements
client_measurements.add_relate_measurement(
1, "Foo", OBJECTS_NAME, OBJECTS_NAME,
np.ones(n_objects, int), np.arange(1, n_objects+1),
np.ones(n_objects, int), objects_relationship)
req = cpanalysis.MeasurementsReport(
worker.analysis_id,
client_measurements.file_contents(),
image_set_numbers = [1])
client_measurements.close()
response_fn = worker.send(req)
self.check_display_post_run_requests(pipeline)
#####################################################
#
# The server should receive the measurements report.
# It should merge the measurements and post an
# AnalysisFinished event.
#
#####################################################
result = self.event_queue.get()
self.assertIsInstance(result, cpanalysis.AnalysisFinished)
self.assertFalse(result.cancelled)
measurements = result.measurements
assert isinstance(measurements, cpmeas.Measurements)
self.assertSequenceEqual(measurements.get_image_numbers(), [1])
self.assertEqual(measurements[cpmeas.IMAGE, IMAGE_FEATURE, 1],
"Hello")
np.testing.assert_almost_equal(
measurements[OBJECTS_NAME, OBJECTS_FEATURE, 1],
objects_measurements)
rg = measurements.get_relationship_groups()
self.assertEqual(len(rg), 1)
rk = rg[0]
assert isinstance(rk, cpmeas.RelationshipKey)
self.assertEqual(rk.module_number, 1)
self.assertEqual(rk.object_name1, OBJECTS_NAME)
self.assertEqual(rk.object_name2, OBJECTS_NAME)
self.assertEqual(rk.relationship, "Foo")
r = measurements.get_relationships(
1, "Foo", OBJECTS_NAME, OBJECTS_NAME)
self.assertEqual(len(r), n_objects)
np.testing.assert_array_equal(r[cpmeas.R_FIRST_IMAGE_NUMBER], 1)
np.testing.assert_array_equal(r[cpmeas.R_SECOND_IMAGE_NUMBER], 1)
np.testing.assert_array_equal(r[cpmeas.R_FIRST_OBJECT_NUMBER],
np.arange(1, n_objects+1))
np.testing.assert_array_equal(r[cpmeas.R_SECOND_OBJECT_NUMBER],
objects_relationship)
def test_06_07_worker_cancel(self):
#
# Test worker sending AnalysisCancelRequest
#
logger.debug("Entering %s" % inspect.getframeinfo(inspect.currentframe()).function)
self.wants_analysis_finished = True
pipeline, m = self.make_pipeline_and_measurements_and_start()
r = np.random.RandomState()
r.seed(61)
with self.FakeWorker() as worker:
#####################################################
#
# Connect the worker to the analysis server and get
# the initial measurements.
#
#####################################################
worker.connect(self.analysis.runner.work_announce_address)
response = worker.request_work()
response = worker.send(cpanalysis.InitialMeasurementsRequest(
worker.analysis_id))()
#####################################################
#
# The worker sends an AnalysisCancelRequest. The
# server should send AnalysisFinished.
#
#####################################################
response = worker.send(cpanalysis.AnalysisCancelRequest(
worker.analysis_id))()
result = self.event_queue.get()
self.assertIsInstance(result, cpanalysis.AnalysisFinished)
self.assertTrue(result.cancelled)
SBS_PIPELINE = r"""CellProfiler Pipeline: http://www.cellprofiler.org
Version:3
DateRevision:20120424205644
ModuleCount:8
HasImagePlaneDetails:False
Images:[module_num:1|svn_version:\'Unknown\'|variable_revision_number:1|show_window:True|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)]
:
Filter based on rules:No
Filter:or (file does contain "")
Metadata:[module_num:2|svn_version:\'Unknown\'|variable_revision_number:1|show_window:True|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)]
Extract metadata?:Yes
Extraction method count:1
Extraction method:Manual
Source:From file name
Regular expression:Channel(?P<C>\x5B12\x5D)-\x5B0-9\x5D{2}-(?P<WellRow>\x5BA-H\x5D)-(?P<WellColumn>\x5B0-9\x5D{2})
Regular expression:(?P<Date>\x5B0-9\x5D{4}_\x5B0-9\x5D{2}_\x5B0-9\x5D{2})$
Filter images:All images
:or (file does contain "")
Metadata file location\x3A:
Match file and image metadata:\x5B\x5D
NamesAndTypes:[module_num:3|svn_version:\'Unknown\'|variable_revision_number:1|show_window:True|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)]
Assignment method:Assign images matching rules
Load as:Grayscale image
Image name:DNA
:\x5B{u\'Cytoplasm\'\x3A u\'WellRow\', u\'DNACorr\'\x3A None, \'DNA\'\x3A u\'WellRow\', u\'CytoplasmCorr\'\x3A None}, {u\'Cytoplasm\'\x3A u\'WellColumn\', u\'DNACorr\'\x3A None, \'DNA\'\x3A u\'WellColumn\', u\'CytoplasmCorr\'\x3A None}\x5D
Match channels by:Metadata
Assignments count:4
Match this rule:and (extension does istif) (metadata does C "2")
Image name:DNA
Objects name:Cells
Load as:Grayscale image
Match this rule:and (extension does istif) (metadata does C "1")
Image name:Cytoplasm
Objects name:Cells
Load as:Grayscale image
Match this rule:or (file does startwith "Channel1ILLUM")
Image name:DNACorr
Objects name:Cells
Load as:Grayscale image
Match this rule:or (file does contain "Channel2ILLUM")
Image name:CytoplasmCorr
Objects name:Cells
Load as:Grayscale image
Groups:[module_num:4|svn_version:\'Unknown\'|variable_revision_number:1|show_window:True|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)]
Do you want to group your images?:No
grouping metadata count:1
Image name:DNA
Metadata category:None
CorrectIlluminationApply:[module_num:5|svn_version:\'Unknown\'|variable_revision_number:3|show_window:True|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)]
Select the input image:Cytoplasm
Name the output image:CorrCytoplasm
Select the illumination function:CytoplasmCorr
Select how the illumination function is applied:Divide
CorrectIlluminationApply:[module_num:6|svn_version:\'Unknown\'|variable_revision_number:3|show_window:True|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)]
Select the input image:DNA
Name the output image:CorrDNA
Select the illumination function:DNACorr
Select how the illumination function is applied:Divide
IdentifyPrimaryObjects:[module_num:7|svn_version:\'Unknown\'|variable_revision_number:9|show_window:True|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)]
Select the input image:CorrDNA
Name the primary objects to be identified:Nuclei
Typical diameter of objects, in pixel units (Min,Max):10,40
Discard objects outside the diameter range?:Yes
Try to merge too small objects with nearby larger objects?:No
Discard objects touching the border of the image?:Yes
Select the thresholding method:Otsu Global
Threshold correction factor:1
Lower and upper bounds on threshold:0.000000,1.000000
Approximate fraction of image covered by objects?:0.01
Method to distinguish clumped objects:Intensity
Method to draw dividing lines between clumped objects:Intensity
Size of smoothing filter:10
Suppress local maxima that are closer than this minimum allowed distance:7
Speed up by using lower-resolution image to find local maxima?:Yes
Name the outline image:PrimaryOutlines
Fill holes in identified objects?:Yes
Automatically calculate size of smoothing filter?:Yes
Automatically calculate minimum allowed distance between local maxima?:Yes
Manual threshold:0.0
Select binary image:None
Retain outlines of the identified objects?:No
Automatically calculate the threshold using the Otsu method?:Yes
Enter Laplacian of Gaussian threshold:0.5
Two-class or three-class thresholding?:Two classes
Minimize the weighted variance or the entropy?:Weighted variance
Assign pixels in the middle intensity class to the foreground or the background?:Foreground
Automatically calculate the size of objects for the Laplacian of Gaussian filter?:Yes
Enter LoG filter diameter:5
Handling of objects if excessive number of objects identified:Continue
Maximum number of objects:500
Select the measurement to threshold with:None
Method to calculate adaptive window size:Image size
Size of adaptive window:10
IdentifySecondaryObjects:[module_num:8|svn_version:\'Unknown\'|variable_revision_number:8|show_window:True|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)]
Select the input objects:Nuclei
Name the objects to be identified:Cells
Select the method to identify the secondary objects:Propagation
Select the input image:CorrCytoplasm
Select the thresholding method:Otsu Global
Threshold correction factor:1
Lower and upper bounds on threshold:0.000000,1.000000
Approximate fraction of image covered by objects?:0.01
Number of pixels by which to expand the primary objects:10
Regularization factor:0.05
Name the outline image:SecondaryOutlines
Manual threshold:0.0
Select binary image:None
Retain outlines of the identified secondary objects?:No
Two-class or three-class thresholding?:Two classes
Minimize the weighted variance or the entropy?:Weighted variance
Assign pixels in the middle intensity class to the foreground or the background?:Foreground
Discard secondary objects touching the border of the image?:No
Discard the associated primary objects?:No
Name the new primary objects:FilteredNuclei
Retain outlines of the new primary objects?:No
Name the new primary object outlines:FilteredNucleiOutlines
Select the measurement to threshold with:None
Fill holes in identified objects?:Yes
Method to calculate adaptive window size:Image size
Size of adaptive window:10
"""
|
sstoma/CellProfiler
|
cellprofiler/tests/test_analysis.py
|
Python
|
gpl-2.0
| 60,855
|
[
"Gaussian"
] |
8f908122a57969b0b46d2aedf9f8763cfe4c2b04450a977966095ecd76165a99
|
# -*- coding: utf-8 -*-
"""
Cython related magics.
Author:
* Brian Granger
Parts of this code were taken from Cython.inline.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011, IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import io
import os, sys
import imp
try:
import hashlib
except ImportError:
import md5 as hashlib
from distutils.core import Distribution, Extension
from distutils.command.build_ext import build_ext
from IPython.core.magic import Magics, magics_class, cell_magic
from IPython.testing.skipdoctest import skip_doctest
from IPython.core.magic_arguments import (
argument, magic_arguments, parse_argstring
)
from IPython.utils import py3compat
import Cython
from Cython.Compiler.Errors import CompileError
from Cython.Compiler.Main import Context, default_options
from Cython.Build.Dependencies import cythonize
@magics_class
class CythonMagics(Magics):
def __init__(self, shell):
super(CythonMagics,self).__init__(shell)
self._reloads = {}
self._code_cache = {}
def _import_all(self, module):
for k,v in module.__dict__.items():
if not k.startswith('__'):
self.shell.push({k:v})
@cell_magic
def cython_inline(self, line, cell):
"""Compile and run a Cython code cell using Cython.inline.
This magic simply passes the body of the cell to Cython.inline
and returns the result. If the variables `a` and `b` are defined
in the user's namespace, here is a simple example that returns
their sum::
%%cython_inline
return a+b
For most purposes, we recommend the usage of the `%%cython` magic.
"""
locs = self.shell.user_global_ns
globs = self.shell.user_ns
return Cython.inline(cell, locals=locs, globals=globs)
@cell_magic
def cython_pyximport(self, line, cell):
"""Compile and import a Cython code cell using pyximport.
The contents of the cell are written to a `.pyx` file in the current
working directory, which is then imported using `pyximport`. This
magic requires a module name to be passed::
%%cython_pyximport modulename
def f(x):
return 2.0*x
The compiled module is then imported and all of its symbols are injected into
the user's namespace. For most purposes, we recommend the usage of the
`%%cython` magic.
"""
module_name = line.strip()
if not module_name:
raise ValueError('module name must be given')
fname = module_name + '.pyx'
with io.open(fname, 'w', encoding='utf-8') as f:
f.write(cell)
if 'pyximport' not in sys.modules:
import pyximport
pyximport.install(reload_support=True)
if module_name in self._reloads:
module = self._reloads[module_name]
reload(module)
else:
__import__(module_name)
module = sys.modules[module_name]
self._reloads[module_name] = module
self._import_all(module)
@magic_arguments()
@argument(
'-c', '--compile-args', action='append', default=[],
help="Extra flags to pass to compiler via the `extra_compile_args` Extension flag (can be specified multiple times)."
)
@argument(
'-l', '--lib', action='append', default=[],
help="Add a library to link the extension against (can be specified multiple times)."
)
@argument(
'-I', '--include', action='append', default=[],
help="Add a path to the list of include directories (can be specified multiple times)."
)
@argument(
'-f', '--force', action='store_true', default=False,
help="Force the compilation of the pyx module even if it hasn't changed"
)
@cell_magic
def cython(self, line, cell):
"""Compile and import everything from a Cython code cell.
The contents of the cell are written to a `.pyx` file in the
directory `IPYTHONDIR/cython` using a filename with the hash of the code.
This file is then cythonized and compiled. The resulting module
is imported and all of its symbols are injected into the user's
namespace. The usage is similar to that of `%%cython_pyximport` but
you don't have to pass a module name::
%%cython
def f(x):
return 2.0*x
"""
args = parse_argstring(self.cython, line)
code = cell if cell.endswith('\n') else cell+'\n'
lib_dir = os.path.join(self.shell.ipython_dir, 'cython')
cython_include_dirs = ['.']
force = args.force
quiet = True
ctx = Context(cython_include_dirs, default_options)
key = code, sys.version_info, sys.executable, Cython.__version__
module_name = "_cython_magic_" + hashlib.md5(str(key).encode('utf-8')).hexdigest()
so_ext = [ ext for ext,_,mod_type in imp.get_suffixes() if mod_type == imp.C_EXTENSION ][0]
module_path = os.path.join(lib_dir, module_name+so_ext)
if not os.path.exists(lib_dir):
os.makedirs(lib_dir)
if force or not os.path.isfile(module_path):
c_include_dirs = args.include
if 'numpy' in code:
import numpy
c_include_dirs.append(numpy.get_include())
pyx_file = os.path.join(lib_dir, module_name + '.pyx')
pyx_file = py3compat.cast_bytes_py2(pyx_file, encoding=sys.getfilesystemencoding())
with io.open(pyx_file, 'w', encoding='utf-8') as f:
f.write(code)
extension = Extension(
name = module_name,
sources = [pyx_file],
include_dirs = c_include_dirs,
extra_compile_args = args.compile_args,
libraries = args.lib,
)
dist = Distribution()
config_files = dist.find_config_files()
try:
config_files.remove('setup.cfg')
except ValueError:
pass
dist.parse_config_files(config_files)
build_extension = build_ext(dist)
build_extension.finalize_options()
try:
build_extension.extensions = cythonize([extension], ctx=ctx, quiet=quiet)
except CompileError:
return
build_extension.build_temp = os.path.dirname(pyx_file)
build_extension.build_lib = lib_dir
build_extension.run()
self._code_cache[key] = module_name
module = imp.load_dynamic(module_name, module_path)
self._import_all(module)
_loaded = False
def load_ipython_extension(ip):
"""Load the extension in IPython."""
global _loaded
if not _loaded:
ip.register_magics(CythonMagics)
_loaded = True
|
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/extensions/cythonmagic.py
|
Python
|
lgpl-3.0
| 7,191
|
[
"Brian"
] |
4bd609ad02b57a25988e3e4031b55276e89c89feee2f4da7f4dcac528b893b7a
|
from __future__ import (absolute_import, division, print_function)
import unittest
from mantid.simpleapi import (DeleteWorkspace, CreateSampleWorkspace,
AddSampleLog, EditInstrumentGeometry,
CloneWorkspace, CompareWorkspaces, FindEPP)
from testhelpers import run_algorithm
from mantid.api import AnalysisDataService
from scipy.constants import N_A, hbar, k
import numpy as np
class ComputeCalibrationCoefVanTest(unittest.TestCase):
def setUp(self):
input_ws = CreateSampleWorkspace(
Function="User Defined",
UserDefinedFunction="name=LinearBackground, " +
"A0=0.3;name=Gaussian, PeakCentre=5, Height=10, Sigma=0.3",
NumBanks=2, BankPixelWidth=1, XMin=0, XMax=10, BinWidth=0.1,
BankDistanceFromSample=4.0)
self._input_ws = input_ws
self._table = FindEPP(input_ws, OutputWorkspace="table")
AddSampleLog(self._input_ws, LogName='wavelength', LogText='4.0',
LogType='Number', LogUnit='Angstrom')
# These ranges correspond to 6*FWHM of the gaussian above,
# the integration ranges of ComputeCalibrationCoefVan.
self._lowerBoundRange = slice(28, 73)
self._upperBoundRange = slice(27, 74)
def test_output(self):
outputWorkspaceName = "output_ws"
alg_test = run_algorithm("ComputeCalibrationCoefVan",
VanadiumWorkspace=self._input_ws,
EPPTable=self._table,
OutputWorkspace=outputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
wsoutput = AnalysisDataService.retrieve(outputWorkspaceName)
# Output = Vanadium ws
self.assertEqual(wsoutput.getRun().getLogData('run_title').value,
self._input_ws.getRun().getLogData('run_title').value)
# Size of output workspace
self.assertEqual(wsoutput.getNumberHistograms(),
self._input_ws.getNumberHistograms())
DeleteWorkspace(wsoutput)
return
def test_sum(self):
outputWorkspaceName = "output_ws"
alg_test = run_algorithm("ComputeCalibrationCoefVan",
VanadiumWorkspace=self._input_ws,
EPPTable=self._table,
OutputWorkspace=outputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
wsoutput = AnalysisDataService.retrieve(outputWorkspaceName)
# Check whether the sum is calculated correctly, for theta=0, dwf=1
# The result should be somewhere between the full bin sums.
y_sumMin = np.sum(self._input_ws.readY(0)[self._lowerBoundRange])
y_sumMax = np.sum(self._input_ws.readY(0)[self._upperBoundRange])
e_sumMin = np.sqrt(np.sum(np.square(self._input_ws.readE(0)[self._lowerBoundRange])))
e_sumMax = np.sqrt(np.sum(np.square(self._input_ws.readE(0)[self._upperBoundRange])))
self.assertLess(y_sumMin, wsoutput.readY(0)[0])
self.assertGreater(y_sumMax, wsoutput.readY(0)[0])
self.assertLess(e_sumMin, wsoutput.readE(0)[0])
self.assertGreater(e_sumMax, wsoutput.readE(0)[0])
DeleteWorkspace(wsoutput)
def test_dwf_using_default_temperature(self):
outputWorkspaceName = "output_ws"
# change theta to make dwf != 1
EditInstrumentGeometry(self._input_ws, L2="4,8", Polar="0,15",
Azimuthal="0,0", DetectorIDs="1,2")
alg_test = run_algorithm("ComputeCalibrationCoefVan",
VanadiumWorkspace=self._input_ws,
EPPTable=self._table,
OutputWorkspace=outputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
wsoutput = AnalysisDataService.retrieve(outputWorkspaceName)
self._checkDWF(wsoutput, 293.0)
DeleteWorkspace(wsoutput)
def test_temperature_from_sample_log(self):
self._input_ws.mutableRun().addProperty('temperature', 0.0, True)
outputWorkspaceName = "output_ws"
EditInstrumentGeometry(self._input_ws, L2="4,8", Polar="0,15",
Azimuthal="0,0", DetectorIDs="1,2")
alg_test = run_algorithm("ComputeCalibrationCoefVan",
VanadiumWorkspace=self._input_ws,
EPPTable=self._table,
OutputWorkspace=outputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
wsoutput = AnalysisDataService.retrieve(outputWorkspaceName)
self._checkDWF(wsoutput, 0.0)
DeleteWorkspace(wsoutput)
def test_temperature_input_overrides_sample_log(self):
self._input_ws.mutableRun().addProperty('temperature', 567.0, True)
outputWorkspaceName = "output_ws"
EditInstrumentGeometry(self._input_ws, L2="4,8", Polar="0,15",
Azimuthal="0,0", DetectorIDs="1,2")
alg_test = run_algorithm("ComputeCalibrationCoefVan",
VanadiumWorkspace=self._input_ws,
EPPTable=self._table,
OutputWorkspace=outputWorkspaceName,
Temperature=0.0)
self.assertTrue(alg_test.isExecuted())
wsoutput = AnalysisDataService.retrieve(outputWorkspaceName)
self._checkDWF(wsoutput, 0.0)
DeleteWorkspace(wsoutput)
def test_input_not_modified(self):
backup = CloneWorkspace(self._input_ws)
outputWorkspaceName = "output_ws"
alg_test = run_algorithm("ComputeCalibrationCoefVan",
VanadiumWorkspace=self._input_ws,
EPPTable=self._table,
OutputWorkspace=outputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
self.assertTrue(CompareWorkspaces(backup, self._input_ws)[0])
DeleteWorkspace(backup)
def tearDown(self):
if AnalysisDataService.doesExist(self._input_ws.name()):
DeleteWorkspace(self._input_ws)
if AnalysisDataService.doesExist(self._table.name()):
DeleteWorkspace(self._table)
def _checkDWF(self, wsoutput, temperature):
if temperature == 0.0:
integral = 0.5
elif temperature == 293.0:
integral = 4.736767162094296 / 3.0
else:
raise RuntimeError("Unsupported temperature supplied to " +
"_checkDWF(). Use 0K or 293K only.")
y_sumMin = np.sum(self._input_ws.readY(1)[self._lowerBoundRange])
y_sumMax = np.sum(self._input_ws.readY(1)[self._upperBoundRange])
e_sumMin = np.sqrt(np.sum(np.square(self._input_ws.readE(1)[self._lowerBoundRange])))
e_sumMax = np.sqrt(np.sum(np.square(self._input_ws.readE(1)[self._upperBoundRange])))
mvan = 0.001*50.942/N_A
Bcoef = 3.0*integral*1e+20*hbar*hbar/(2.0*mvan*k*389.0)
dwf = np.exp(
-1.0*Bcoef*(4.0*np.pi*np.sin(0.5*np.radians(15.0))/4.0)**2)
self.assertLess(y_sumMin/dwf, wsoutput.readY(1)[0])
self.assertGreater(y_sumMax/dwf, wsoutput.readY(1)[0])
self.assertLess(e_sumMin/dwf, wsoutput.readE(1)[0])
self.assertGreater(e_sumMax/dwf, wsoutput.readE(1)[0])
if __name__ == "__main__":
unittest.main()
|
ScreamingUdder/mantid
|
Framework/PythonInterface/test/python/plugins/algorithms/ComputeCalibrationCoefVanTest.py
|
Python
|
gpl-3.0
| 7,519
|
[
"Gaussian"
] |
c2323bc740a42eb71b96ba543db96eb93d1123e4e7dd7e3287ce0047e29a9a4f
|
# ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
This test compares the NEON GRU layer against a numpy reference GRU
implementation and compares the NEON GRU bprop deltas to the gradients
estimated by finite differences.
The numpy reference GRU contains static methods for forward pass
and backward pass.
It runs a SINGLE layer of GRU and compare numerical values
The following are made sure to be the same in both GRUs
- initial h values (all zeros)
- initial W, b (ones or random values)
- input data (random data matrix)
- input error (random data matrix)
- the data shape inside GRU_ref is seq_len, 1, input_size.
Need transpose
- the data shape inside GRU (neon) is is batch_size, seq_len * batch_size
"""
import itertools as itt
import numpy as np
from neon import NervanaObject, logger as neon_logger
from neon.initializers.initializer import Constant, Gaussian
from neon.layers import GRU
from neon.transforms import Logistic, Tanh
from neon.layers.container import DeltasTree
from gru_ref import GRU as RefGRU
from utils import allclose_with_out
def pytest_generate_tests(metafunc):
bsz_rng = [1]
if 'refgruargs' in metafunc.fixturenames:
fargs = []
if metafunc.config.option.all:
seq_rng = [2, 3, 4]
inp_rng = [3, 5, 10]
out_rng = [3, 5, 10]
else:
seq_rng = [3]
inp_rng = [5]
out_rng = [10]
fargs = itt.product(seq_rng, inp_rng, out_rng, bsz_rng)
metafunc.parametrize('refgruargs', fargs)
if 'gradgruargs' in metafunc.fixturenames:
fargs = []
if metafunc.config.option.all:
seq_rng = [2, 3]
inp_rng = [5, 10]
out_rng = [3, 5, 10]
else:
seq_rng = [3]
inp_rng = [5]
out_rng = [10]
fargs = itt.product(seq_rng, inp_rng, out_rng, bsz_rng)
metafunc.parametrize('gradgruargs', fargs)
def test_ref_compare_ones(backend_default, refgruargs):
# run comparison with reference code
# for all ones init
seq_len, input_size, hidden_size, batch_size = refgruargs
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
check_gru(seq_len, input_size, hidden_size,
batch_size, Constant(val=1.0), [1.0, 0.0])
def test_ref_compare_rand(backend_default, refgruargs):
# run comparison with reference code
# for all ones init
seq_len, input_size, hidden_size, batch_size = refgruargs
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
check_gru(seq_len, input_size, hidden_size, batch_size,
Gaussian())
def test_ref_compare_rand_init_state(backend_default, refgruargs):
seq_len, input_size, hidden_size, batch_size = refgruargs
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
check_gru(seq_len, input_size, hidden_size, batch_size,
Gaussian(), add_init_state=True)
# compare neon GRU to reference GRU implementation
def check_gru(seq_len, input_size, hidden_size,
batch_size, init_func, inp_moms=[0.0, 1.0], add_init_state=False):
# init_func is the initializer for the model params
# inp_moms is the [ mean, std dev] of the random input
input_shape = (input_size, seq_len * batch_size)
output_shape = (hidden_size, seq_len * batch_size)
slice_shape = (hidden_size, batch_size)
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
# neon GRU
gru = GRU(hidden_size,
init_func,
activation=Tanh(),
gate_activation=Logistic())
# generate random input tensor
inp = np.random.rand(*input_shape) * inp_moms[1] + inp_moms[0]
inp_dev = gru.be.array(inp)
# generate random deltas tensor
deltas = np.random.randn(*output_shape)
# run neon fprop
gru.configure((input_size, seq_len))
gru.prev_layer = True
gru.allocate()
test_buffer = DeltasTree()
gru.allocate_deltas(test_buffer)
test_buffer.allocate_buffers()
gru.set_deltas(test_buffer)
if add_init_state:
init_state = np.random.rand(*slice_shape)*inp_moms[1] + inp_moms[0]
init_state_dev = gru.be.array(init_state)
gru.fprop(inp_dev, init_state=init_state_dev)
else:
gru.fprop(inp_dev)
# reference numpy GRU
gru_ref = RefGRU(input_size, hidden_size)
WGRU = gru_ref.weights
# make ref weights and biases the same with neon model
r_range = list(range(hidden_size))
z_range = list(range(hidden_size, hidden_size * 2))
c_range = list(range(hidden_size * 2, hidden_size * 3))
WGRU[gru_ref.weights_ind_br][:] = gru.b.get()[r_range]
WGRU[gru_ref.weights_ind_bz][:] = gru.b.get()[z_range]
WGRU[gru_ref.weights_ind_bc][:] = gru.b.get()[c_range]
WGRU[gru_ref.weights_ind_Wxr][:] = gru.W_input.get()[r_range]
WGRU[gru_ref.weights_ind_Wxz][:] = gru.W_input.get()[z_range]
WGRU[gru_ref.weights_ind_Wxc][:] = gru.W_input.get()[c_range]
WGRU[gru_ref.weights_ind_Rhr][:] = gru.W_recur.get()[r_range]
WGRU[gru_ref.weights_ind_Rhz][:] = gru.W_recur.get()[z_range]
WGRU[gru_ref.weights_ind_Rhc][:] = gru.W_recur.get()[c_range]
# transpose input X and do fprop
# the reference code expects these shapes:
# input_shape: (seq_len, input_size, batch_size)
# output_shape: (seq_len, hidden_size, batch_size)
inp_ref = inp.copy().T.reshape(
seq_len, batch_size, input_size).swapaxes(1, 2)
deltas_ref = deltas.copy().T.reshape(
seq_len, batch_size, hidden_size).swapaxes(1, 2)
if add_init_state:
init_state_ref = init_state.copy()
(dWGRU_ref, h_ref_list, dh_ref_list,
dr_ref_list, dz_ref_list, dc_ref_list) = gru_ref.lossFun(inp_ref,
deltas_ref,
init_state_ref)
else:
(dWGRU_ref, h_ref_list, dh_ref_list,
dr_ref_list, dz_ref_list, dc_ref_list) = gru_ref.lossFun(inp_ref,
deltas_ref)
neon_logger.display('====Verifying hidden states====')
assert allclose_with_out(gru.outputs.get(),
h_ref_list,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('fprop is verified')
# now test the bprop
neon_logger.display('Making sure neon GRU matches numpy GRU in bprop')
gru.bprop(gru.be.array(deltas))
# grab the delta W from gradient buffer
dWinput_neon = gru.dW_input.get()
dWrecur_neon = gru.dW_recur.get()
db_neon = gru.db.get()
dWxr_neon = dWinput_neon[r_range]
dWxz_neon = dWinput_neon[z_range]
dWxc_neon = dWinput_neon[c_range]
dWrr_neon = dWrecur_neon[r_range]
dWrz_neon = dWrecur_neon[z_range]
dWrc_neon = dWrecur_neon[c_range]
dbr_neon = db_neon[r_range]
dbz_neon = db_neon[z_range]
dbc_neon = db_neon[c_range]
drzc_neon = gru.rzhcan_delta_buffer.get()
dr_neon = drzc_neon[r_range]
dz_neon = drzc_neon[z_range]
dc_neon = drzc_neon[c_range]
dWxr_ref = dWGRU_ref[gru_ref.dW_ind_Wxr]
dWxz_ref = dWGRU_ref[gru_ref.dW_ind_Wxz]
dWxc_ref = dWGRU_ref[gru_ref.dW_ind_Wxc]
dWrr_ref = dWGRU_ref[gru_ref.dW_ind_Rhr]
dWrz_ref = dWGRU_ref[gru_ref.dW_ind_Rhz]
dWrc_ref = dWGRU_ref[gru_ref.dW_ind_Rhc]
dbr_ref = dWGRU_ref[gru_ref.dW_ind_br]
dbz_ref = dWGRU_ref[gru_ref.dW_ind_bz]
dbc_ref = dWGRU_ref[gru_ref.dW_ind_bc]
# neon_logger.display '====Verifying hidden deltas ===='
neon_logger.display('====Verifying r deltas ====')
assert allclose_with_out(dr_neon,
dr_ref_list,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('====Verifying z deltas ====')
assert allclose_with_out(dz_neon,
dz_ref_list,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('====Verifying hcan deltas ====')
assert allclose_with_out(dc_neon,
dc_ref_list,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('====Verifying update on W_input====')
neon_logger.display('dWxr')
assert allclose_with_out(dWxr_neon,
dWxr_ref,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('dWxz')
assert allclose_with_out(dWxz_neon,
dWxz_ref,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('dWxc')
assert allclose_with_out(dWxc_neon,
dWxc_ref,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('====Verifying update on W_recur====')
neon_logger.display('dWrr')
assert allclose_with_out(dWrr_neon,
dWrr_ref,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('dWrz')
assert allclose_with_out(dWrz_neon,
dWrz_ref,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('dWrc')
assert allclose_with_out(dWrc_neon,
dWrc_ref,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('====Verifying update on bias====')
neon_logger.display('dbr')
assert allclose_with_out(dbr_neon,
dbr_ref,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('dbz')
assert allclose_with_out(dbz_neon,
dbz_ref,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('dbc')
assert allclose_with_out(dbc_neon,
dbc_ref,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('bprop is verified')
return
def reset_gru(gru):
# in order to run fprop multiple times
# for the gradient check tests the
# gru internal variables need to be
# cleared
gru.x = None
gru.xs = None # just in case
gru.outputs = None
return
def test_gradient_neon_gru(backend_default, gradgruargs):
seq_len, input_size, hidden_size, batch_size = gradgruargs
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
gradient_check(seq_len, input_size, hidden_size, batch_size)
def test_gradient_neon_gru_init_state(backend_default, gradgruargs):
seq_len, input_size, hidden_size, batch_size = gradgruargs
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
gradient_check(seq_len, input_size, hidden_size, batch_size, True)
def gradient_check(seq_len, input_size, hidden_size, batch_size,
add_init_state=False, threshold=1.0e-3):
# 'threshold' is the max fractional difference
# between gradient estimate and
# bprop deltas (def is 5%)
# for a given set of layer parameters calculate
# the gradients and compare to the derivatives
# obtained with the bprop function. repeat this
# for a range of perturbations and use the
# perturbation size with the best results.
# This is necessary for 32 bit computations
min_max_err = -1.0 # minimum max error
neon_logger.display('Perturb mag, max grad diff')
for pert_exp in range(-5, 0):
# need to generate the scaling and input outside
# having an issue with the random number generator
# when these are generated inside the gradient_calc
# function
input_shape = (input_size, seq_len * batch_size)
output_shape = (hidden_size, seq_len * batch_size)
rand_scale = np.random.random(output_shape) * 2.0 - 1.0
inp = np.random.randn(*input_shape)
pert_mag = 10.0**pert_exp
(grad_est, deltas) = gradient_calc(seq_len,
input_size,
hidden_size,
batch_size,
add_init_state=add_init_state,
epsilon=pert_mag,
rand_scale=rand_scale,
inp_bl=inp)
dd = np.max(np.abs(grad_est - deltas))
neon_logger.display('%e, %e' % (pert_mag, dd))
if min_max_err < 0.0 or dd < min_max_err:
min_max_err = dd
# reset the seed so models are same in each run
# allclose_with_out(grad_est,deltas, rtol=0.0, atol=0.0)
NervanaObject.be.rng_reset()
# check that best value of worst case error is less than threshold
neon_logger.display('Worst case error %e with perturbation %e' % (min_max_err, pert_mag))
neon_logger.display('Threshold %e' % (threshold))
assert min_max_err < threshold
def gradient_calc(seq_len, input_size, hidden_size, batch_size, add_init_state=False,
epsilon=None, rand_scale=None, inp_bl=None):
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
input_shape = (input_size, seq_len * batch_size)
# generate input if one is not given
if inp_bl is None:
inp_bl = np.random.randn(*input_shape)
# neon gru instance
gru = GRU(hidden_size, init=Gaussian(), activation=Tanh(), gate_activation=Logistic())
inpa = gru.be.array(np.copy(inp_bl))
# run fprop on the baseline input
gru.configure((input_size, seq_len))
gru.prev_layer = True
gru.allocate()
test_buffer = DeltasTree()
gru.allocate_deltas(test_buffer)
test_buffer.allocate_buffers()
gru.set_deltas(test_buffer)
if add_init_state is True:
slice_shape = (hidden_size, batch_size)
ini_s = np.random.randn(*slice_shape)
ini_s_dev = gru.be.array(ini_s.copy())
out_bl = gru.fprop(inpa, ini_s_dev).get()
else:
out_bl = gru.fprop(inpa).get()
# random scaling/hash to generate fake loss
if rand_scale is None:
rand_scale = np.random.random(out_bl.shape) * 2.0 - 1.0
# loss function would be:
# loss_bl = np.sum(rand_scale * out_bl)
# run back prop with rand_scale as the errors
# use copy to avoid any interactions
deltas_neon = gru.bprop(gru.be.array(np.copy(rand_scale))).get()
# add a perturbation to each input element
grads_est = np.zeros(inpa.shape)
inp_pert = inp_bl.copy()
for pert_ind in range(inpa.size):
save_val = inp_pert.flat[pert_ind]
inp_pert.flat[pert_ind] = save_val + epsilon
reset_gru(gru)
gru.allocate()
if add_init_state is True:
ini_s_dev = gru.be.array(ini_s.copy())
out_pos = gru.fprop(gru.be.array(inp_pert), ini_s_dev).get()
else:
out_pos = gru.fprop(gru.be.array(inp_pert)).get()
inp_pert.flat[pert_ind] = save_val - epsilon
reset_gru(gru)
gru.allocate()
if add_init_state is True:
ini_s_dev = gru.be.array(ini_s.copy())
out_neg = gru.fprop(gru.be.array(inp_pert), ini_s_dev).get()
else:
out_neg = gru.fprop(gru.be.array(inp_pert)).get()
# calculate the loss with perturbations
loss_pos = np.sum(rand_scale * out_pos)
loss_neg = np.sum(rand_scale * out_neg)
# compute the gradient estimate
grad = 0.5 / float(epsilon) * (loss_pos - loss_neg)
grads_est.flat[pert_ind] = grad
# reset the perturbed input element
inp_pert.flat[pert_ind] = save_val
del gru
return (grads_est, deltas_neon)
|
Jokeren/neon
|
tests/test_gru.py
|
Python
|
apache-2.0
| 16,734
|
[
"Gaussian"
] |
a04b8fd81fdbe3586a64a11c1b855d81a314d779074704ecca9b3e1552961859
|
import numpy as np
import h5py
import matplotlib.pyplot as plt
import os
def input_layer(input_center, neuron_center, x_sigma, x_d_sigma):
rj = np.exp((-(input_center[0] - neuron_center[0]) ** 2) /
x_sigma - ((input_center[1] - neuron_center[1]) ** 2) /
x_d_sigma)
return rj
episode = 0
N = 10
neuronCount = N * N
_x_space_, x_centers_distance = np.linspace(-150, 30, N,
retstep=True)
_x_d_space_, phi_centers_distance = np.linspace(-15, 15, N,
retstep=True)
x_sigma = x_centers_distance ** 2
x_d_sigma = phi_centers_distance ** 2
x_direction = np.zeros(neuronCount)
phi_direction = np.zeros(neuronCount)
x_vector = np.zeros((N, N))
phi_vector = np.zeros((N, N))
Q_val = np.zeros((neuronCount, 3))
plt.figure('default')
data_set_centers = []
data_set_weights = []
filename = "vector-01-16-14.01.34s.hdf5"
# filename = "vector-01-16-17.04.01s.hdf5"
print(filename)
f0 = h5py.File(filename, 'r')
group0 = f0['neurons']
group1 = f0['weights']
data_set_centers.append(np.array(group0['centers']))
i = 0
while i < np.size(group1):
data_set_weights.append(group1['weights_' + str(i)])
i += 1
# print dataset_centers[0][0]
# print dataset_weights[1][0][1]
for k in range(3):
for neuronIndex in range(neuronCount):
for nodeIndex in range(neuronCount):
Q_val[neuronIndex, k] \
+= \
data_set_weights[episode][neuronIndex][k] \
* input_layer(data_set_centers[0][nodeIndex],
data_set_centers[0][neuronIndex],
x_sigma,
x_d_sigma)
sorted_x = []
i = 0
while i < neuronCount:
sorted_x.append(data_set_centers[0][i][0])
i += 1
print(np.transpose(np.reshape(sorted_x, (N, N))))
sorted_phi = []
i = 0
while i < neuronCount:
sorted_phi.append(data_set_centers[0][i][1])
i += 1
print(np.transpose(np.reshape(sorted_phi, (N, N))))
actions = np.argmax(Q_val[:, :], axis=1)
x_direction[actions == 0] = 1.
x_direction[actions == 1] = -1.
x_direction[actions == 2] = 0.
phi_direction[actions == 0] = 0.
phi_direction[actions == 1] = 0.
phi_direction[actions == 2] = 0.
x_vector = np.reshape(x_direction, (N, N))
phi_vector = np.reshape(phi_direction, (N, N))
f0.close()
plt.title('Navigation Map Episode ' + str(episode + 1))
plt.quiver(np.transpose(x_vector), np.transpose(phi_vector), scale=50)
plt.xlabel('Neuron (Position)')
plt.ylabel('Neuron (Velocity)')
plt.axis([-1, N, -1, N])
plt.savefig('naviMap' + str(episode + 1) + '.jpg')
plt.show()
|
cigani/RL
|
naviMap.py
|
Python
|
mit
| 2,664
|
[
"NEURON"
] |
1e799a2d37608a5fe7d49e57f5611125555152f1d40fe848614c01fc7b7534b4
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Windows Scheduled Task job file parser."""
import unittest
# pylint: disable=unused-import
from plaso.formatters import winjob as winjob_formatter
from plaso.lib import eventdata
from plaso.lib import timelib_test
from plaso.parsers import test_lib
from plaso.parsers import winjob
class WinJobTest(test_lib.ParserTestCase):
"""Tests for the Windows Scheduled Task job file parser."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._parser = winjob.WinJobParser()
def testParse(self):
"""Tests the Parse function."""
test_file = self._GetTestFilePath(['wintask.job'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(len(event_objects), 2)
event_object = event_objects[0]
application_expected = (
u'C:\\Program Files (x86)\\Google\\Update\\GoogleUpdate.exe')
self.assertEqual(event_object.application, application_expected)
username_expected = u'Brian'
self.assertEqual(event_object.username, username_expected)
description_expected = eventdata.EventTimestamp.LAST_RUNTIME
self.assertEqual(event_object.timestamp_desc, description_expected)
trigger_expected = u'DAILY'
self.assertEqual(event_object.trigger, trigger_expected)
comment_expected = (
u'Keeps your Google software up to date. If this task is disabled or '
u'stopped, your Google software will not be kept up to date, meaning '
u'security vulnerabilities that may arise cannot be fixed and '
u'features may not work. This task uninstalls itself when there is '
u'no Google software using it.')
self.assertEqual(event_object.comment, comment_expected)
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2013-08-24 12:42:00.112')
self.assertEqual(event_object.timestamp, expected_timestamp)
# Parse second event. Same metadata; different timestamp event.
event_object = event_objects[1]
self.assertEqual(event_object.application, application_expected)
self.assertEqual(event_object.username, username_expected)
self.assertEqual(event_object.trigger, trigger_expected)
self.assertEqual(event_object.comment, comment_expected)
description_expected = u'Scheduled To Start'
self.assertEqual(event_object.timestamp_desc, description_expected)
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2013-07-12 15:42:00')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_msg = (
u'Application: C:\\Program Files (x86)\\Google\\Update\\'
u'GoogleUpdate.exe /ua /installsource scheduler '
u'Scheduled by: Brian '
u'Run Iteration: DAILY')
expected_msg_short = (
u'Application: C:\\Program Files (x86)\\Google\\Update\\'
u'GoogleUpdate.exe /ua /insta...')
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
if __name__ == '__main__':
unittest.main()
|
cvandeplas/plaso
|
plaso/parsers/winjob_test.py
|
Python
|
apache-2.0
| 3,780
|
[
"Brian"
] |
b1382034866f0294f8f7965bfd134cf20c6221ff53fbd8b15ca2c2fa77b05ce9
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import numpy as np
from sisl._internal import set_module
from sisl import Geometry, SuperCell
from ._common import geometry_define_nsc, geometry2uc
__all__ = ['sc', 'bcc', 'fcc', 'hcp', 'rocksalt']
# A few needed variables
_s30 = 1 / 2
_s60 = 3 ** .5 / 2
_s45 = 1 / 2 ** .5
_c30 = _s60
_c60 = _s30
_c45 = _s45
_t30 = 1 / 3 ** .5
_t45 = 1.
_t60 = 3 ** .5
@set_module("sisl.geom")
def sc(alat, atom):
""" Simple cubic lattice with 1 atom
Parameters
----------
alat : float
lattice parameter
atom : Atom
the atom in the SC lattice
"""
sc = SuperCell(np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]], np.float64) * alat)
g = Geometry([0, 0, 0], atom, sc=sc)
geometry_define_nsc(g)
return g
@set_module("sisl.geom")
def bcc(alat, atoms, orthogonal=False):
""" Body centered cubic lattice with 1 (non-orthogonal) or 2 atoms (orthogonal)
Parameters
----------
alat : float
lattice parameter
atoms : Atom
the atom(s) in the BCC lattice
orthogonal : bool, optional
whether the lattice is orthogonal (2 atoms)
"""
if orthogonal:
sc = SuperCell(np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]], np.float64) * alat)
ah = alat / 2
g = Geometry([[0, 0, 0], [ah, ah, ah]], atoms, sc=sc)
else:
sc = SuperCell(np.array([[-1, 1, 1],
[1, -1, 1],
[1, 1, -1]], np.float64) * alat / 2)
g = Geometry([0, 0, 0], atoms, sc=sc)
geometry_define_nsc(g)
return g
@set_module("sisl.geom")
def fcc(alat, atoms, orthogonal=False):
""" Face centered cubic lattice with 1 (non-orthogonal) or 4 atoms (orthogonal)
Parameters
----------
alat : float
lattice parameter
atoms : Atom
the atom(s) in the FCC lattice
orthogonal : bool, optional
whether the lattice is orthogonal (4 atoms)
"""
if orthogonal:
sc = SuperCell(np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]], np.float64) * alat)
ah = alat / 2
g = Geometry([[0, 0, 0], [ah, ah, 0],
[ah, 0, ah], [0, ah, ah]], atoms, sc=sc)
else:
sc = SuperCell(np.array([[0, 1, 1],
[1, 0, 1],
[1, 1, 0]], np.float64) * alat / 2)
g = Geometry([0, 0, 0], atoms, sc=sc)
geometry_define_nsc(g)
return g
@set_module("sisl.geom")
def hcp(a, atoms, coa=1.63333, orthogonal=False):
""" Hexagonal closed packed lattice with 2 (non-orthogonal) or 4 atoms (orthogonal)
Parameters
----------
a : float
lattice parameter for 1st and 2nd lattice vectors
atoms : Atom
the atom(s) in the HCP lattice
coa : float, optional
c over a parameter where c is the 3rd lattice vector length
orthogonal : bool, optional
whether the lattice is orthogonal (4 atoms)
"""
# height of hcp structure
c = a * coa
a3sq = a / 3 ** .5
if orthogonal:
sc = SuperCell([[a + a * _c60 * 2, 0, 0],
[0, a * _c30 * 2, 0],
[0, 0, c / 2]])
gt = Geometry([[0, 0, 0],
[a, 0, 0],
[a * _s30, a * _c30, 0],
[a * (1 + _s30), a * _c30, 0]], atoms, sc=sc)
# Create the rotated one on top
gr = gt.copy()
# mirror structure
gr.xyz[0, 1] += sc.cell[1, 1]
gr.xyz[1, 1] += sc.cell[1, 1]
gr = gr.translate(-np.amin(gr.xyz, axis=0))
# Now displace to get the correct offset
gr = gr.translate([0, a * _s30 / 2, 0])
g = gt.append(gr, 2)
else:
sc = SuperCell([a, a, c, 90, 90, 60])
g = Geometry([[0, 0, 0], [a3sq * _c30, a3sq * _s30, c / 2]],
atoms, sc=sc)
geometry_define_nsc(g)
return g
@set_module("sisl.geom")
def rocksalt(alat, atoms, orthogonal=False):
""" Two-element rocksalt lattice with 2 (non-orthogonal) or 8 atoms (orthogonal)
This is equivalent to the NaCl crystal structure (halite).
Parameters
----------
alat : float
lattice parameter
atoms : list
a list of two atoms that the crystal consists of
orthogonal : bool, optional
whether the lattice is orthogonal or not
"""
if isinstance(atoms, str):
atoms = [atoms, atoms]
if len(atoms) != 2:
raise ValueError(f"Invalid list of atoms, must have length 2")
g1 = fcc(alat, atoms[0], orthogonal=orthogonal)
g2 = fcc(alat, atoms[1], orthogonal=orthogonal).move(np.array([1, 1, 1]) * alat / 2)
g = g1.add(g2)
g = geometry2uc(g).sort(lattice=[2, 1, 0])
geometry_define_nsc(g)
return g
|
zerothi/sisl
|
sisl/geom/basic.py
|
Python
|
mpl-2.0
| 5,139
|
[
"CRYSTAL"
] |
cfb5c781c1c5a2f24d4926511e07f33e47a12f3d890c2dc4d8dcf3cfbadd9acd
|
import argparse
import sys
from ..utilities.log_utils import create_default_logger
from ..configuration.daijin_configurator import create_daijin_config
from .mikado import mikado_pipeline
from .assemble import assemble_transcripts_pipeline
from . import create_parser, create_config_parser
def main(call_args=None):
"""
Main call function.
:param call_args: Arguments to use to launch the pipeline. If unspecified, the default behaviour
(using CL arguments) will be adopted.
:return:
"""
if call_args is None:
call_args = sys.argv[1:]
parser = argparse.ArgumentParser(
"""A Directed Acyclic pipeline for gene model reconstruction from RNA seq data.
Basically, a pipeline for driving Mikado. It will first align RNAseq reads against
a genome using multiple tools, then creates transcript assemblies using multiple tools,
and find junctions in the alignments using Portcullis.
This input is then passed into Mikado.
WARNING: the "assemble" part of this pipeline will be soon DEPRECATED.
""")
subparsers = parser.add_subparsers(
title="Pipelines",
help="""These are the pipelines that can be executed via daijin.""")
subparsers.add_parser("configure",
help="Creates the configuration files for Daijin execution.")
subparsers.choices["configure"] = create_config_parser()
subparsers.choices["configure"].prog = "daijin configure"
subparsers.choices["configure"].set_defaults(func=create_daijin_config)
subparsers.add_parser("assemble",
description="Creates transcript assemblies from RNAseq data.",
help="""A pipeline that generates a variety of transcript assemblies
using various aligners and assemblers, as well a producing
a configuration file suitable for driving Mikado.
WARNING: this part of the Daijin pipeline will be DEPRECATED in future releases
as a new, more complete annotation pipeline is currently in development.""")
subparsers.choices["assemble"] = create_parser()
subparsers.choices["assemble"].add_argument(
"config",
help="Configuration file to use for running the transcript assembly pipeline.")
subparsers.choices["assemble"].prog = "daijin assemble"
subparsers.choices["assemble"].set_defaults(func=assemble_transcripts_pipeline)
subparsers.add_parser("mikado",
description="Run full mikado pipeline",
help="""Using a supplied configuration file that describes
all input assemblies to use, it runs the Mikado pipeline,
including prepare, BLAST, transdecoder, serialise and pick.""")
subparsers.choices["mikado"] = create_parser()
subparsers.choices["mikado"].add_argument(
"config",
help="Configuration file to use for running the Mikado step of the pipeline.")
subparsers.choices["mikado"].prog = "daijin mikado"
subparsers.choices["mikado"].set_defaults(func=mikado_pipeline)
try:
args = parser.parse_args(call_args)
if hasattr(args, "func"):
args.func(args)
else:
parser.print_help()
except KeyboardInterrupt:
raise KeyboardInterrupt
except BrokenPipeError:
pass
except Exception as exc:
logger = create_default_logger("main")
logger.error("Daijin crashed, cause:")
logger.exception(exc)
sys.exit(1)
if __name__ == '__main__':
# pylint: disable=redefined-builtin
# noinspection PyShadowingBuiltins
__spec__ = "Daijin"
# pylint: enable=redefined-builtin
main()
|
lucventurini/mikado
|
Mikado/daijin/__main__.py
|
Python
|
lgpl-3.0
| 3,818
|
[
"BLAST"
] |
089075338d5377ccf4c6f48350174390544fe90541bf06f79a0c8083d3a274d4
|
#!/usr/bin/env python
# encoding: utf-8
"""
Defines various nose unit tests
"""
import numpy as np
from .mh import MHSampler
from .ensemble import EnsembleSampler
from .ptsampler import PTSampler
logprecision = -4
def lnprob_gaussian(x, icov):
return -np.dot(x, np.dot(icov, x)) / 2.0
def lnprob_gaussian_nan(x, icov):
# if walker's parameters are zeros => return NaN
if not (np.array(x)).any():
result = np.nan
else:
result = -np.dot(x, np.dot(icov, x)) / 2.0
return result
def log_unit_sphere_volume(ndim):
if ndim % 2 == 0:
logfactorial = 0.0
for i in range(1, ndim / 2 + 1):
logfactorial += np.log(i)
return ndim / 2.0 * np.log(np.pi) - logfactorial
else:
logfactorial = 0.0
for i in range(1, ndim + 1, 2):
logfactorial += np.log(i)
return (ndim + 1) / 2.0 * np.log(2.0) \
+ (ndim - 1) / 2.0 * np.log(np.pi) - logfactorial
class LogLikeGaussian(object):
def __init__(self, icov):
"""Initialize a gaussian PDF with the given inverse covariance
matrix. If not ``None``, ``cutoff`` truncates the PDF at the
given number of sigma from the origin (i.e. the PDF is
non-zero only on an ellipse aligned with the principal axes of
the distribution). Without this cutoff, thermodynamic
integration with a flat prior is logarithmically divergent."""
self.icov = icov
def __call__(self, x):
dist2 = lnprob_gaussian(x, self.icov)
return dist2
class LogPriorGaussian(object):
def __init__(self, icov, cutoff=None):
self.icov = icov
self.cutoff = cutoff
def __call__(self, x):
dist2 = lnprob_gaussian(x, self.icov)
if self.cutoff is not None:
if -dist2 > self.cutoff * self.cutoff / 2.0:
return float('-inf')
else:
return 0.0
else:
return 0.0
def ln_flat(x):
return 0.0
class Tests:
def setUp(self):
self.nwalkers = 100
self.ndim = 5
self.ntemp = 20
self.N = 1000
self.mean = np.zeros(self.ndim)
self.cov = 0.5 - np.random.rand(self.ndim ** 2) \
.reshape((self.ndim, self.ndim))
self.cov = np.triu(self.cov)
self.cov += self.cov.T - np.diag(self.cov.diagonal())
self.cov = np.dot(self.cov, self.cov)
self.icov = np.linalg.inv(self.cov)
self.p0 = [0.1 * np.random.randn(self.ndim)
for i in range(self.nwalkers)]
self.truth = np.random.multivariate_normal(self.mean, self.cov, 100000)
def check_sampler(self, N=None, p0=None):
if N is None:
N = self.N
if p0 is None:
p0 = self.p0
for i in self.sampler.sample(p0, iterations=N):
pass
assert np.mean(self.sampler.acceptance_fraction) > 0.25
assert np.all(self.sampler.acceptance_fraction > 0)
chain = self.sampler.flatchain
maxdiff = 10. ** (logprecision)
assert np.all((np.mean(chain, axis=0) - self.mean) ** 2 / self.N ** 2
< maxdiff)
assert np.all((np.cov(chain, rowvar=0) - self.cov) ** 2 / self.N ** 2
< maxdiff)
def check_pt_sampler(self, cutoff, N=None, p0=None):
if N is None:
N = self.N
if p0 is None:
p0 = self.p0
for i in self.sampler.sample(p0, iterations=N):
pass
# Weaker assertions on acceptance fraction
assert np.mean(self.sampler.acceptance_fraction) > 0.1, \
"acceptance fraction < 0.1"
assert np.mean(self.sampler.tswap_acceptance_fraction) > 0.1, \
"tswap acceptance frac < 0.1"
maxdiff = 10.0 ** logprecision
chain = np.reshape(self.sampler.chain[0, ...],
(-1, self.sampler.chain.shape[-1]))
# np.savetxt('/tmp/chain.dat', chain)
log_volume = self.ndim * np.log(cutoff) \
+ log_unit_sphere_volume(self.ndim) \
+ 0.5 * np.log(np.linalg.det(self.cov))
gaussian_integral = self.ndim / 2.0 * np.log(2.0 * np.pi) \
+ 0.5 * np.log(np.linalg.det(self.cov))
lnZ, dlnZ = self.sampler.thermodynamic_integration_log_evidence()
print(self.sampler.get_autocorr_time())
assert np.abs(lnZ - (gaussian_integral - log_volume)) < 3 * dlnZ, \
("evidence incorrect: {0:g} versus correct {1:g} (uncertainty "
"{2:g})").format(lnZ, gaussian_integral - log_volume, dlnZ)
assert np.all((np.mean(chain, axis=0) - self.mean) ** 2.0 / N ** 2.0
< maxdiff), 'mean incorrect'
assert np.all((np.cov(chain, rowvar=0) - self.cov) ** 2.0 / N ** 2.0
< maxdiff), 'covariance incorrect'
def test_mh(self):
self.sampler = MHSampler(self.cov, self.ndim, lnprob_gaussian,
args=[self.icov])
self.check_sampler(N=self.N * self.nwalkers, p0=self.p0[0])
def test_ensemble(self):
self.sampler = EnsembleSampler(self.nwalkers, self.ndim,
lnprob_gaussian, args=[self.icov])
self.check_sampler()
def test_nan_lnprob(self):
self.sampler = EnsembleSampler(self.nwalkers, self.ndim,
lnprob_gaussian_nan,
args=[self.icov])
# If a walker is right at zero, ``lnprobfn`` returns ``np.nan``.
p0 = self.p0
p0[0] = 0.0
try:
self.check_sampler(p0=p0)
except ValueError:
# This should fail *immediately* with a ``ValueError``.
return
assert False, "We should never get here."
def test_inf_nan_params(self):
self.sampler = EnsembleSampler(self.nwalkers, self.ndim,
lnprob_gaussian, args=[self.icov])
# Set one of the walkers to have a ``np.nan`` value.
p0 = self.p0
p0[0][0] = np.nan
try:
self.check_sampler(p0=p0)
except ValueError:
# This should fail *immediately* with a ``ValueError``.
pass
else:
assert False, "The sampler should have failed by now."
# Set one of the walkers to have a ``np.inf`` value.
p0[0][0] = np.inf
try:
self.check_sampler(p0=p0)
except ValueError:
# This should fail *immediately* with a ``ValueError``.
pass
else:
assert False, "The sampler should have failed by now."
# Set one of the walkers to have a ``np.inf`` value.
p0[0][0] = -np.inf
try:
self.check_sampler(p0=p0)
except ValueError:
# This should fail *immediately* with a ``ValueError``.
pass
else:
assert False, "The sampler should have failed by now."
# def test_parallel(self):
# self.sampler = EnsembleSampler(self.nwalkers, self.ndim,
# lnprob_gaussian, args=[self.icov],
# threads=2)
# self.check_sampler()
def test_pt_sampler(self):
cutoff = 10.0
self.sampler = PTSampler(self.ntemp, self.nwalkers, self.ndim,
LogLikeGaussian(self.icov),
LogPriorGaussian(self.icov, cutoff=cutoff))
p0 = np.random.multivariate_normal(mean=self.mean, cov=self.cov,
size=(self.ntemp, self.nwalkers))
self.check_pt_sampler(cutoff, p0=p0, N=1000)
def test_blobs(self):
lnprobfn = lambda p: (-0.5 * np.sum(p ** 2), np.random.rand())
self.sampler = EnsembleSampler(self.nwalkers, self.ndim, lnprobfn)
self.check_sampler()
# Make sure that the shapes of everything are as expected.
assert (self.sampler.chain.shape == (self.nwalkers, self.N, self.ndim)
and len(self.sampler.blobs) == self.N
and len(self.sampler.blobs[0]) == self.nwalkers), \
"The blob dimensions are wrong."
# Make sure that the blobs aren't all the same.
blobs = self.sampler.blobs
assert np.any([blobs[-1] != blobs[i] for i in range(len(blobs) - 1)])
|
tbs1980/emcee
|
emcee/tests.py
|
Python
|
mit
| 8,459
|
[
"Gaussian"
] |
222c64cff0cef89ab8e3816a988d8cffffc9a8957183adebe8ac895847ed5c62
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import tempfile
from string import ascii_letters, digits
from ansible.compat.six import string_types
from ansible.compat.six.moves import configparser
from ansible.errors import AnsibleOptionsError
from ansible.module_utils._text import to_text
from ansible.parsing.quoting import unquote
from ansible.utils.path import makedirs_safe
BOOL_TRUE = frozenset([ "true", "t", "y", "1", "yes", "on" ])
def mk_boolean(value):
ret = value
if not isinstance(value, bool):
if value is None:
ret = False
ret = (str(value).lower() in BOOL_TRUE)
return ret
def shell_expand(path, expand_relative_paths=False):
'''
shell_expand is needed as os.path.expanduser does not work
when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE
'''
if path:
path = os.path.expanduser(os.path.expandvars(path))
if expand_relative_paths and not path.startswith('/'):
# paths are always 'relative' to the config?
if 'CONFIG_FILE' in globals():
CFGDIR = os.path.dirname(CONFIG_FILE)
path = os.path.join(CFGDIR, path)
path = os.path.abspath(path)
return path
def get_config(p, section, key, env_var, default, value_type=None, expand_relative_paths=False):
''' return a configuration variable with casting
:arg p: A ConfigParser object to look for the configuration in
:arg section: A section of the ini config that should be examined for this section.
:arg key: The config key to get this config from
:arg env_var: An Environment variable to check for the config var. If
this is set to None then no environment variable will be used.
:arg default: A default value to assign to the config var if nothing else sets it.
:kwarg value_type: The type of the value. This can be any of the following strings:
:boolean: sets the value to a True or False value
:integer: Sets the value to an integer or raises a ValueType error
:float: Sets the value to a float or raises a ValueType error
:list: Treats the value as a comma separated list. Split the value
and return it as a python list.
:none: Sets the value to None
:path: Expands any environment variables and tilde's in the value.
:tmp_path: Create a unique temporary directory inside of the directory
specified by value and return its path.
:pathlist: Treat the value as a typical PATH string. (On POSIX, this
means colon separated strings.) Split the value and then expand
each part for environment variables and tildes.
:kwarg expand_relative_paths: for pathlist and path types, if this is set
to True then also change any relative paths into absolute paths. The
default is False.
'''
value = _get_config(p, section, key, env_var, default)
if value_type == 'boolean':
value = mk_boolean(value)
elif value:
if value_type == 'integer':
value = int(value)
elif value_type == 'float':
value = float(value)
elif value_type == 'list':
if isinstance(value, string_types):
value = [x.strip() for x in value.split(',')]
elif value_type == 'none':
if value == "None":
value = None
elif value_type == 'path':
value = shell_expand(value, expand_relative_paths=expand_relative_paths)
elif value_type == 'tmppath':
value = shell_expand(value)
if not os.path.exists(value):
makedirs_safe(value, 0o700)
prefix = 'ansible-local-%s' % os.getpid()
value = tempfile.mkdtemp(prefix=prefix, dir=value)
elif value_type == 'pathlist':
if isinstance(value, string_types):
value = [shell_expand(x, expand_relative_paths=expand_relative_paths) \
for x in value.split(os.pathsep)]
elif isinstance(value, string_types):
value = unquote(value)
return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
def _get_config(p, section, key, env_var, default):
''' helper function for get_config '''
if env_var is not None:
value = os.environ.get(env_var, None)
if value is not None:
return value
if p is not None:
try:
# TODO: Once we branch Ansible-2.2, change to the following in devel
#return to_text(p.get(section, key, raw=True), errors='surrogate_or_strict')
return p.get(section, key, raw=True)
except:
return default
return default
def load_config_file():
''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
p = configparser.ConfigParser()
path0 = os.getenv("ANSIBLE_CONFIG", None)
if path0 is not None:
path0 = os.path.expanduser(path0)
if os.path.isdir(path0):
path0 += "/ansible.cfg"
try:
path1 = os.getcwd() + "/ansible.cfg"
except OSError:
path1 = None
path2 = os.path.expanduser("~/.ansible.cfg")
path3 = "/etc/ansible/ansible.cfg"
for path in [path0, path1, path2, path3]:
if path is not None and os.path.exists(path):
try:
p.read(path)
except configparser.Error as e:
raise AnsibleOptionsError("Error reading config file: \n{0}".format(e))
return p, path
return None, ''
p, CONFIG_FILE = load_config_file()
# check all of these extensions when looking for yaml files for things like
# group variables -- really anything we can load
YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ]
# the default whitelist for cow stencils
DEFAULT_COW_WHITELIST = ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'dragon', 'elephant-in-snake', 'elephant',
'eyes', 'hellokitty', 'kitty', 'luke-koala', 'meow', 'milk', 'moofasa', 'moose', 'ren', 'sheep',
'small', 'stegosaurus', 'stimpy', 'supermilker', 'three-eyes', 'turkey', 'turtle', 'tux', 'udder',
'vader-koala', 'vader', 'www',]
# sections in config file
DEFAULTS='defaults'
# FIXME: add deprecation warning when these get set
#### DEPRECATED VARS ####
# use more sanely named 'inventory'
DEPRECATED_HOST_LIST = get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts', value_type='path')
# this is not used since 0.5 but people might still have in config
DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, None)
# If --tags or --skip-tags is given multiple times on the CLI and this is
# True, merge the lists of tags together. If False, let the last argument
# overwrite any previous ones. Behaviour is overwrite through 2.2. 2.3
# overwrites but prints deprecation. 2.4 the default is to merge.
MERGE_MULTIPLE_CLI_TAGS = get_config(p, DEFAULTS, 'merge_multiple_cli_tags', 'ANSIBLE_MERGE_MULTIPLE_CLI_TAGS', False, value_type='boolean')
#### GENERALLY CONFIGURABLE THINGS ####
DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, value_type='boolean')
DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', DEPRECATED_HOST_LIST, value_type='path')
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None, value_type='pathlist')
DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', value_type='pathlist', expand_relative_paths=True)
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '~/.ansible/tmp')
DEFAULT_LOCAL_TMP = get_config(p, DEFAULTS, 'local_tmp', 'ANSIBLE_LOCAL_TEMP', '~/.ansible/tmp', value_type='tmppath')
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, value_type='integer')
DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '')
DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', os.getenv('LANG', 'en_US.UTF-8'))
DEFAULT_MODULE_SET_LOCALE = get_config(p, DEFAULTS, 'module_set_locale','ANSIBLE_MODULE_SET_LOCALE',False, value_type='boolean')
DEFAULT_MODULE_COMPRESSION= get_config(p, DEFAULTS, 'module_compression', None, 'ZIP_DEFLATED')
DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, value_type='integer')
DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, value_type='integer')
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', None)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, value_type='boolean')
DEFAULT_PRIVATE_KEY_FILE = get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None, value_type='path')
DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, value_type='integer')
DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, value_type='boolean')
DEFAULT_VAULT_PASSWORD_FILE = get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None, value_type='path')
DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart')
DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', 'smart')
DEFAULT_SFTP_BATCH_MODE = get_config(p, 'ssh_connection', 'sftp_batch_mode', 'ANSIBLE_SFTP_BATCH_MODE', True, value_type='boolean')
DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed')
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, value_type='boolean')
DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
DEFAULT_PRIVATE_ROLE_VARS = get_config(p, DEFAULTS, 'private_role_vars', 'ANSIBLE_PRIVATE_ROLE_VARS', False, value_type='boolean')
DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None)
DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh')
DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower()
DEFAULT_GATHER_SUBSET = get_config(p, DEFAULTS, 'gather_subset', 'ANSIBLE_GATHER_SUBSET', 'all').lower()
DEFAULT_GATHER_TIMEOUT = get_config(p, DEFAULTS, 'gather_timeout', 'ANSIBLE_GATHER_TIMEOUT', 10, value_type='integer')
DEFAULT_LOG_PATH = get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '', value_type='path')
DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, value_type='boolean')
DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE', ["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], value_type='list')
DEFAULT_VAR_COMPRESSION_LEVEL = get_config(p, DEFAULTS, 'var_compression_level', 'ANSIBLE_VAR_COMPRESSION_LEVEL', 0, value_type='integer')
DEFAULT_INTERNAL_POLL_INTERVAL = get_config(p, DEFAULTS, 'internal_poll_interval', None, 0.001, value_type='float')
ERROR_ON_MISSING_HANDLER = get_config(p, DEFAULTS, 'error_on_missing_handler', 'ANSIBLE_ERROR_ON_MISSING_HANDLER', True, value_type='boolean')
# static includes
DEFAULT_TASK_INCLUDES_STATIC = get_config(p, DEFAULTS, 'task_includes_static', 'ANSIBLE_TASK_INCLUDES_STATIC', False, value_type='boolean')
DEFAULT_HANDLER_INCLUDES_STATIC = get_config(p, DEFAULTS, 'handler_includes_static', 'ANSIBLE_HANDLER_INCLUDES_STATIC', False, value_type='boolean')
# disclosure
DEFAULT_NO_LOG = get_config(p, DEFAULTS, 'no_log', 'ANSIBLE_NO_LOG', False, value_type='boolean')
DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', False, value_type='boolean')
ALLOW_WORLD_READABLE_TMPFILES = get_config(p, DEFAULTS, 'allow_world_readable_tmpfiles', None, False, value_type='boolean')
# selinux
DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs', value_type='list')
DEFAULT_LIBVIRT_LXC_NOSECLABEL = get_config(p, 'selinux', 'libvirt_lxc_noseclabel', 'LIBVIRT_LXC_NOSECLABEL', False, value_type='boolean')
### PRIVILEGE ESCALATION ###
# Backwards Compat
DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, value_type='boolean')
DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root')
DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', None)
DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', None)
DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, value_type='boolean')
DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, value_type='boolean')
DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', None)
DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H -S -n')
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, value_type='boolean')
# Become
BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'doas': 'Permission denied', 'dzdo': '', 'ksu': 'Password incorrect'} #FIXME: deal with i18n
BECOME_MISSING_STRINGS = {'sudo': 'sorry, a password is required to run sudo', 'su': '', 'pbrun': '', 'pfexec': '', 'doas': 'Authorization required', 'dzdo': '', 'ksu': 'No password given'} #FIXME: deal with i18n
BECOME_METHODS = ['sudo','su','pbrun','pfexec','doas','dzdo','ksu']
BECOME_ALLOW_SAME_USER = get_config(p, 'privilege_escalation', 'become_allow_same_user', 'ANSIBLE_BECOME_ALLOW_SAME_USER', False, value_type='boolean')
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, value_type='boolean')
DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root')
DEFAULT_BECOME_EXE = get_config(p, 'privilege_escalation', 'become_exe', 'ANSIBLE_BECOME_EXE', None)
DEFAULT_BECOME_FLAGS = get_config(p, 'privilege_escalation', 'become_flags', 'ANSIBLE_BECOME_FLAGS', None)
DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, value_type='boolean')
# PLUGINS
# Modules that can optimize with_items loops into a single call. Currently
# these modules must (1) take a "name" or "pkg" parameter that is a list. If
# the module takes both, bad things could happen.
# In the future we should probably generalize this even further
# (mapping of param: squash field)
DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apk, apt, dnf, homebrew, pacman, pkgng, yum, zypper", value_type='list')
# paths
DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action:/usr/share/ansible/plugins/action', value_type='pathlist')
DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache', value_type='pathlist')
DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback', value_type='pathlist')
DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection:/usr/share/ansible/plugins/connection', value_type='pathlist')
DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup', value_type='pathlist')
DEFAULT_INVENTORY_PLUGIN_PATH = get_config(p, DEFAULTS, 'inventory_plugins', 'ANSIBLE_INVENTORY_PLUGINS', '~/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory', value_type='pathlist')
DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars:/usr/share/ansible/plugins/vars', value_type='pathlist')
DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter:/usr/share/ansible/plugins/filter', value_type='pathlist')
DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS', '~/.ansible/plugins/test:/usr/share/ansible/plugins/test', value_type='pathlist')
DEFAULT_STRATEGY_PLUGIN_PATH = get_config(p, DEFAULTS, 'strategy_plugins', 'ANSIBLE_STRATEGY_PLUGINS', '~/.ansible/plugins/strategy:/usr/share/ansible/plugins/strategy', value_type='pathlist')
DEFAULT_STRATEGY = get_config(p, DEFAULTS, 'strategy', 'ANSIBLE_STRATEGY', 'linear')
DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default')
# cache
CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None)
CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts')
CACHE_PLUGIN_TIMEOUT = get_config(p, DEFAULTS, 'fact_caching_timeout', 'ANSIBLE_CACHE_PLUGIN_TIMEOUT', 24 * 60 * 60, value_type='integer')
# Display
ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, value_type='boolean')
ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, value_type='boolean')
ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, value_type='boolean')
ANSIBLE_COW_SELECTION = get_config(p, DEFAULTS, 'cow_selection', 'ANSIBLE_COW_SELECTION', 'default')
ANSIBLE_COW_WHITELIST = get_config(p, DEFAULTS, 'cow_whitelist', 'ANSIBLE_COW_WHITELIST', DEFAULT_COW_WHITELIST, value_type='list')
DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, value_type='boolean')
DEFAULT_UNDEFINED_VAR_BEHAVIOR = get_config(p, DEFAULTS, 'error_on_undefined_vars', 'ANSIBLE_ERROR_ON_UNDEFINED_VARS', True, value_type='boolean')
HOST_KEY_CHECKING = get_config(p, DEFAULTS, 'host_key_checking', 'ANSIBLE_HOST_KEY_CHECKING', True, value_type='boolean')
SYSTEM_WARNINGS = get_config(p, DEFAULTS, 'system_warnings', 'ANSIBLE_SYSTEM_WARNINGS', True, value_type='boolean')
DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings', 'ANSIBLE_DEPRECATION_WARNINGS', True, value_type='boolean')
DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], value_type='list')
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', True, value_type='boolean')
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, value_type='boolean')
DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', 'ANSIBLE_CALLBACK_WHITELIST', [], value_type='list')
RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, value_type='boolean')
RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', None, value_type='path')
DEFAULT_NULL_REPRESENTATION = get_config(p, DEFAULTS, 'null_representation', 'ANSIBLE_NULL_REPRESENTATION', None, value_type='none')
DISPLAY_ARGS_TO_STDOUT = get_config(p, DEFAULTS, 'display_args_to_stdout', 'ANSIBLE_DISPLAY_ARGS_TO_STDOUT', False, value_type='boolean')
MAX_FILE_SIZE_FOR_DIFF = get_config(p, DEFAULTS, 'max_diff_size', 'ANSIBLE_MAX_DIFF_SIZE', 1024*1024, value_type='integer')
# CONNECTION RELATED
USE_PERSISTENT_CONNECTIONS = get_config(p, DEFAULTS, 'use_persistent_connections', 'ANSIBLE_USE_PERSISTENT_CONNECTIONS', False, value_type='boolean')
ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', '-C -o ControlMaster=auto -o ControlPersist=60s')
ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', u"%(directory)s/ansible-ssh-%%h-%%p-%%r")
ANSIBLE_SSH_CONTROL_PATH_DIR = get_config(p, 'ssh_connection', 'control_path_dir', 'ANSIBLE_SSH_CONTROL_PATH_DIR', u'~/.ansible/cp')
ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, value_type='boolean')
ANSIBLE_SSH_RETRIES = get_config(p, 'ssh_connection', 'retries', 'ANSIBLE_SSH_RETRIES', 0, value_type='integer')
ANSIBLE_SSH_EXECUTABLE = get_config(p, 'ssh_connection', 'ssh_executable', 'ANSIBLE_SSH_EXECUTABLE', 'ssh')
PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, value_type='boolean')
PARAMIKO_HOST_KEY_AUTO_ADD = get_config(p, 'paramiko_connection', 'host_key_auto_add', 'ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD', False, value_type='boolean')
PARAMIKO_PROXY_COMMAND = get_config(p, 'paramiko_connection', 'proxy_command', 'ANSIBLE_PARAMIKO_PROXY_COMMAND', None)
PARAMIKO_LOOK_FOR_KEYS = get_config(p, 'paramiko_connection', 'look_for_keys', 'ANSIBLE_PARAMIKO_LOOK_FOR_KEYS', True, value_type='boolean')
PERSISTENT_CONNECT_TIMEOUT = get_config(p, 'persistent_connection', 'connect_timeout', 'ANSIBLE_PERSISTENT_CONNECT_TIMEOUT', 30, value_type='integer')
PERSISTENT_CONNECT_RETRIES = get_config(p, 'persistent_connection', 'connect_retries', 'ANSIBLE_PERSISTENT_CONNECT_RETRIES', 10, value_type='integer')
PERSISTENT_CONNECT_INTERVAL = get_config(p, 'persistent_connection', 'connect_interval', 'ANSIBLE_PERSISTENT_CONNECT_INTERVAL', 1, value_type='integer')
# obsolete -- will be formally removed
ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, value_type='integer')
ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, value_type='integer')
ACCELERATE_CONNECT_TIMEOUT = get_config(p, 'accelerate', 'accelerate_connect_timeout', 'ACCELERATE_CONNECT_TIMEOUT', 1.0, value_type='float')
ACCELERATE_DAEMON_TIMEOUT = get_config(p, 'accelerate', 'accelerate_daemon_timeout', 'ACCELERATE_DAEMON_TIMEOUT', 30, value_type='integer')
ACCELERATE_KEYS_DIR = get_config(p, 'accelerate', 'accelerate_keys_dir', 'ACCELERATE_KEYS_DIR', '~/.fireball.keys')
ACCELERATE_KEYS_DIR_PERMS = get_config(p, 'accelerate', 'accelerate_keys_dir_perms', 'ACCELERATE_KEYS_DIR_PERMS', '700')
ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_file_perms', 'ACCELERATE_KEYS_FILE_PERMS', '600')
ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, value_type='boolean')
PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, value_type='boolean')
# galaxy related
GALAXY_SERVER = get_config(p, 'galaxy', 'server', 'ANSIBLE_GALAXY_SERVER', 'https://galaxy.ansible.com')
GALAXY_IGNORE_CERTS = get_config(p, 'galaxy', 'ignore_certs', 'ANSIBLE_GALAXY_IGNORE', False, value_type='boolean')
# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', value_type='list')
STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS', ['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], value_type='list' )
# colors
COLOR_HIGHLIGHT = get_config(p, 'colors', 'highlight', 'ANSIBLE_COLOR_HIGHLIGHT', 'white')
COLOR_VERBOSE = get_config(p, 'colors', 'verbose', 'ANSIBLE_COLOR_VERBOSE', 'blue')
COLOR_WARN = get_config(p, 'colors', 'warn', 'ANSIBLE_COLOR_WARN', 'bright purple')
COLOR_ERROR = get_config(p, 'colors', 'error', 'ANSIBLE_COLOR_ERROR', 'red')
COLOR_DEBUG = get_config(p, 'colors', 'debug', 'ANSIBLE_COLOR_DEBUG', 'dark gray')
COLOR_DEPRECATE = get_config(p, 'colors', 'deprecate', 'ANSIBLE_COLOR_DEPRECATE', 'purple')
COLOR_SKIP = get_config(p, 'colors', 'skip', 'ANSIBLE_COLOR_SKIP', 'cyan')
COLOR_UNREACHABLE = get_config(p, 'colors', 'unreachable', 'ANSIBLE_COLOR_UNREACHABLE', 'bright red')
COLOR_OK = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_OK', 'green')
COLOR_CHANGED = get_config(p, 'colors', 'changed', 'ANSIBLE_COLOR_CHANGED', 'yellow')
COLOR_DIFF_ADD = get_config(p, 'colors', 'diff_add', 'ANSIBLE_COLOR_DIFF_ADD', 'green')
COLOR_DIFF_REMOVE = get_config(p, 'colors', 'diff_remove', 'ANSIBLE_COLOR_DIFF_REMOVE', 'red')
COLOR_DIFF_LINES = get_config(p, 'colors', 'diff_lines', 'ANSIBLE_COLOR_DIFF_LINES', 'cyan')
# diff
DIFF_CONTEXT = get_config(p, 'diff', 'context', 'ANSIBLE_DIFF_CONTEXT', 3, value_type='integer')
# non-configurable things
MODULE_REQUIRE_ARGS = ['command', 'win_command', 'shell', 'win_shell', 'raw', 'script']
MODULE_NO_JSON = ['command', 'win_command', 'shell', 'win_shell', 'raw']
DEFAULT_BECOME_PASS = None
DEFAULT_PASSWORD_CHARS = to_text(ascii_letters + digits + ".,:-_", errors='strict') # characters included in auto-generated passwords
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
DEFAULT_SUBSET = None
DEFAULT_SU_PASS = None
VAULT_VERSION_MIN = 1.0
VAULT_VERSION_MAX = 1.0
TREE_DIR = None
LOCALHOST = frozenset(['127.0.0.1', 'localhost', '::1'])
# module search
BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm', '.md', '.txt')
IGNORE_FILES = ["COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES"]
|
kaarolch/ansible
|
lib/ansible/constants.py
|
Python
|
gpl-3.0
| 28,089
|
[
"Galaxy",
"MOOSE"
] |
dbf337f47b8303fa3207249bd1acbc8ff0890dd2a24734f34a2ce732b4578d3a
|
import shutil
import sys
import os
import re
import io
import traceback
from pybtex.plugin import find_plugin
from pybtex.database import BibliographyData, parse_file
from pybtex.database.input.bibtex import UndefinedMacro as undefined_macro_exception
import MooseDocs
from MooseCommonExtension import MooseCommonExtension
from markdown.preprocessors import Preprocessor
from markdown.util import etree
import logging
log = logging.getLogger(__name__)
class MooseBibtex(MooseCommonExtension, Preprocessor):
"""
Creates per-page bibliographies using latex syntax.
"""
RE_BIBLIOGRAPHY = r'(?<!`)\\bibliography\{(.*?)\}'
RE_STYLE = r'(?<!`)\\bibliographystyle\{(.*?)\}'
RE_CITE = r'(?<!`)\\(?P<cmd>cite|citet|citep)\{(?P<keys>.*?)\}'
def __init__(self, markdown_instance=None, **kwargs):
MooseCommonExtension.__init__(self, **kwargs),
Preprocessor.__init__(self, markdown_instance)
self._macro_files = kwargs.pop('macro_files', None)
def parseBibtexFile(self, bibfile):
"""
Returns parsed bibtex file. If "macro_files" are supplied in the configuration
file, then a temporary file will be made that contains the supplied macros
above the original bib file. This temporary combined file can then be
parsed by pybtex.
"""
if self._macro_files:
with open("tBib.bib", "wb") as tBib:
for tFile in self._macro_files:
with open(MooseDocs.abspath(tFile.strip()), "rb") as inFile:
shutil.copyfileobj(inFile, tBib)
with open(bibfile, "rb") as inFile:
shutil.copyfileobj(inFile, tBib)
data = parse_file("tBib.bib")
os.remove("tBib.bib")
else:
data = parse_file(bibfile)
return data
def run(self, lines):
"""
Create a bibliography from cite commands.
"""
# Join the content to enable regex searches throughout entire text
content = '\n'.join(lines)
# Build the database of bibtex data
self._citations = [] # member b/c it is used in substitution function
self._bibtex = BibliographyData() # ""
bibfiles = []
match = re.search(self.RE_BIBLIOGRAPHY, content)
if match:
bib_string = match.group(0)
for bfile in match.group(1).split(','):
try:
bibfiles.append(MooseDocs.abspath(bfile.strip()))
data = self.parseBibtexFile(bibfiles[-1])
except Exception as e:
if isinstance(e,undefined_macro_exception):
log.error('Undefined macro in bibtex file: {}, '\
'specify macro_files arguments in configuration file (e.g. moosedocs.yml)'\
.format(bfile.strip()))
else:
log.error('Failed to parse bibtex file: {}'.format(bfile.strip()))
traceback.print_exc(e)
return lines
self._bibtex.add_entries(data.entries.iteritems())
else:
return lines
# Determine the style
match = re.search(self.RE_STYLE, content)
if match:
content = content.replace(match.group(0), '')
try:
style = find_plugin('pybtex.style.formatting', match.group(1))
except:
log.error('Unknown bibliography style "{}"'.format(match.group(1)))
return lines
else:
style = find_plugin('pybtex.style.formatting', 'plain')
# Replace citations with author date, as an anchor
content = re.sub(self.RE_CITE, self.authors, content)
# Create html bibliography
if self._citations:
# Generate formatted html using pybtex
formatted_bibliography = style().format_bibliography(self._bibtex, self._citations)
backend = find_plugin('pybtex.backends', 'html')
stream = io.StringIO()
backend().write_to_stream(formatted_bibliography, stream)
# Strip the bib items from the formatted html
html = re.findall(r'\<dd\>(.*?)\</dd\>', stream.getvalue(), flags=re.MULTILINE|re.DOTALL)
# Produces an ordered list with anchors to the citations
output = u'<ol class="moose-bibliography" data-moose-bibfiles="{}">\n'.format(str(bibfiles))
for i, item in enumerate(html):
output += u'<li name="{}">{}</li>\n'.format(self._citations[i], item)
output += u'</ol>\n'
content = re.sub(self.RE_BIBLIOGRAPHY, self.markdown.htmlStash.store(output, safe=True), content)
return content.split('\n')
def authors(self, match):
"""
Return the author(s) citation for text, linked to bibliography.
"""
cmd = match.group('cmd')
keys = match.group('keys')
tex = '\\%s{%s}' % (cmd, keys)
cite_list = []
# Loop over all keys in the cite command
for key in [k.strip() for k in keys.split(',')]:
# Error if the key is not found and move on
if key not in self._bibtex.entries:
log.error('Unknown bibtext key: {}'.format(key))
continue
# Build the author list
self._citations.append(key)
entry = self._bibtex.entries[key]
a = entry.persons['author']
n = len(a)
if n > 2:
author = '{} et al.'.format(' '.join(a[0].last_names))
elif n == 2:
a0 = ' '.join(a[0].last_names)
a1 = ' '.join(a[1].last_names)
author = '{} and {}'.format(a0, a1)
else:
author = ' '.join(a[0].last_names)
if cmd == 'citep':
a = '<a href="#{}">{}, {}</a>'.format(key, author, entry.fields['year'])
else:
a = '<a href="#{}">{} ({})</a>'.format(key, author, entry.fields['year'])
cite_list.append(a)
# Create the correct text for list of keys in the cite command
if len(cite_list) == 2:
cite_list = [' and '.join(cite_list)]
elif len(cite_list) > 2:
cite_list[-1] = 'and ' + cite_list[-1]
# Write the html
if cmd == 'citep':
html = '(<span data-moose-cite="{}">{}</span>)'.format(tex, '; '.join(cite_list))
else:
html = '<span data-moose-cite="{}">{}</span>'.format(tex, ', '.join(cite_list))
return self.markdown.htmlStash.store(html, safe=True)
|
paulthulstrup/moose
|
python/MooseDocs/extensions/MooseBibtex.py
|
Python
|
lgpl-2.1
| 5,982
|
[
"MOOSE"
] |
4c48649e853e41979ba85d597e2792f11a8f674b0995d5ca8155c4fdcf9efe62
|
# Emacs: treat this as -*- python -*-
import os
import gtk
import pango
import tempfile
from math import cos, sin, sqrt, atan, atan2
from os.path import basename
import numpy as np
from ase.data import chemical_symbols
from ase.data.colors import jmol_colors
from ase.gui.repeat import Repeat
from ase.gui.rotate import Rotate
from ase.gui.render import Render
from ase.gui.colors import ColorWindow
from ase.gui.defaults import read_defaults
from ase.utils import rotate
from ase.quaternions import Quaternion
class View:
def __init__(self, vbox, rotations):
self.colormode = 'jmol' # The default colors
self.nselected = 0
self.labels = None
self.light_green_markings = 0
self.axes = rotate(rotations)
# this is a hack, in order to be able to toggle menu actions off/on
# without getting into an infinte loop
self.menu_change = 0
self.atoms_to_rotate = None
self.drawing_area = gtk.DrawingArea()
self.drawing_area.set_size_request(450, 450)
self.drawing_area.connect('button_press_event', self.press)
self.drawing_area.connect('button_release_event', self.release)
self.drawing_area.connect('motion-notify-event', self.move)
# Signals used to handle backing pixmap:
self.drawing_area.connect('expose_event', self.expose_event)
self.drawing_area.connect('configure_event', self.configure_event)
self.drawing_area.set_events(gtk.gdk.BUTTON_PRESS_MASK |
gtk.gdk.BUTTON_RELEASE_MASK |
gtk.gdk.BUTTON_MOTION_MASK |
gtk.gdk.POINTER_MOTION_HINT_MASK)
vbox.pack_start(self.drawing_area)
self.drawing_area.show()
self.configured = False
self.config = None
self.frame = None
def set_coordinates(self, frame=None, focus=None):
if frame is None:
frame = self.frame
self.make_box()
self.bind(frame)
n = self.images.natoms
self.X = np.empty((n + len(self.B1) + len(self.bonds), 3))
#self.X[n:] = np.dot(self.B1, self.images.A[frame])
#self.B = np.dot(self.B2, self.images.A[frame])
self.set_frame(frame, focus=focus, init=True)
def set_frame(self, frame=None, focus=False, init=False):
if frame is None:
frame = self.frame
n = self.images.natoms
if self.frame > self.images.nimages:
self.frame = self.images.nimages - 1
if init or frame != self.frame:
A = self.images.A
Disp = self.images.D
nc = len(self.B1)
nb = len(self.bonds)
if init or (A[frame] != A[self.frame]).any():
self.X[n:n + nc] = np.dot(self.B1, A[frame])
self.B = np.empty((nc + nb, 3))
self.B[:nc] = np.dot(self.B2, A[frame])
if nb > 0:
P = self.images.P[frame]
Af = self.images.repeat[:, np.newaxis] * A[frame]
a = P[self.bonds[:, 0]]
b = P[self.bonds[:, 1]] + np.dot(self.bonds[:, 2:], Af) - a
d = (b**2).sum(1)**0.5
r = 0.65 * self.images.r
x0 = (r[self.bonds[:, 0]] / d).reshape((-1, 1))
x1 = (r[self.bonds[:, 1]] / d).reshape((-1, 1))
self.X[n + nc:] = a + b * x0
b *= 1.0 - x0 - x1
b[self.bonds[:, 2:].any(1)] *= 0.5
self.B[nc:] = self.X[n + nc:] + b
filenames = self.images.filenames
filename = filenames[frame]
if self.frame is None or filename != filenames[self.frame] or filename is None:
if filename is None:
filename = 'ase.gui'
filename = basename(filename)
self.window.set_title(filename)
self.frame = frame
self.X[:n] = self.images.P[frame]
self.R = self.X[:n]
if focus:
self.focus()
else:
self.draw()
def set_colors(self):
self.colormode = 'jmol'
self.set_jmol_colors()
def set_jmol_colors(self):
self.colors = [None] * (len(jmol_colors) + 1)
self.colordata = []
new = self.drawing_area.window.new_gc
alloc = self.colormap.alloc_color
for z in self.images.Z:
if self.colors[z] is None:
c, p, k = jmol_colors[z]
self.colors[z] = new(alloc(int(65535 * c),
int(65535 * p),
int(65535 * k)))
hasfound = {}
for z in self.images.Z:
if z not in hasfound:
hasfound[z] = True
self.colordata.append([z, jmol_colors[z]])
def plot_cell(self):
V = self.images.A[0]
R1 = []
R2 = []
for c in range(3):
v = V[c]
d = sqrt(np.dot(v, v))
n = max(2, int(d / 0.3))
h = v / (2 * n - 1)
R = np.arange(n)[:, None] * (2 * h)
for i, j in [(0, 0), (0, 1), (1, 0), (1, 1)]:
R1.append(R + i * V[(c + 1) % 3] + j * V[(c + 2) % 3])
R2.append(R1[-1] + h)
return np.concatenate(R1), np.concatenate(R2)
def make_box(self):
if not self.ui.get_widget('/MenuBar/ViewMenu/ShowUnitCell'
).get_active():
self.B1 = self.B2 = np.zeros((0, 3))
return
V = self.images.A[0]
nn = []
for c in range(3):
v = V[c]
d = sqrt(np.dot(v, v))
n = max(2, int(d / 0.3))
nn.append(n)
self.B1 = np.zeros((2, 2, sum(nn), 3))
self.B2 = np.zeros((2, 2, sum(nn), 3))
n1 = 0
for c, n in enumerate(nn):
n2 = n1 + n
h = 1.0 / (2 * n - 1)
R = np.arange(n) * (2 * h)
for i, j in [(0, 0), (0, 1), (1, 0), (1, 1)]:
self.B1[i, j, n1:n2, c] = R
self.B1[i, j, n1:n2, (c + 1) % 3] = i
self.B1[i, j, n1:n2, (c + 2) % 3] = j
self.B2[:, :, n1:n2] = self.B1[:, :, n1:n2]
self.B2[:, :, n1:n2, c] += h
n1 = n2
self.B1.shape = (-1, 3)
self.B2.shape = (-1, 3)
def bind(self, frame):
if not self.ui.get_widget('/MenuBar/ViewMenu/ShowBonds'
).get_active():
self.bonds = np.empty((0, 5), int)
return
from ase.atoms import Atoms
from ase.calculators.neighborlist import NeighborList
nl = NeighborList(self.images.r * 1.5, skin=0, self_interaction=False)
nl.update(Atoms(positions=self.images.P[frame],
cell=(self.images.repeat[:, np.newaxis] *
self.images.A[frame]),
pbc=self.images.pbc))
nb = nl.nneighbors + nl.npbcneighbors
self.bonds = np.empty((nb, 5), int)
if nb == 0:
return
n1 = 0
for a in range(self.images.natoms):
indices, offsets = nl.get_neighbors(a)
n2 = n1 + len(indices)
self.bonds[n1:n2, 0] = a
self.bonds[n1:n2, 1] = indices
self.bonds[n1:n2, 2:] = offsets
n1 = n2
i = self.bonds[:n2, 2:].any(1)
self.bonds[n2:, 0] = self.bonds[i, 1]
self.bonds[n2:, 1] = self.bonds[i, 0]
self.bonds[n2:, 2:] = -self.bonds[i, 2:]
def toggle_show_unit_cell(self, action):
self.set_coordinates()
def reset_tools_modes(self):
dummy = self.menu_change
self.menu_change = 1
self.atoms_to_rotate = None
for c_mode in ['Rotate', 'Orient', 'Move']:
self.ui.get_widget('/MenuBar/ToolsMenu/%sAtoms' % c_mode).set_active(False)
self.light_green_markings = 0
self.menu_change = 0
self.draw()
def toggle_mode(self, mode):
self.menu_change = 1
i_sum = 0
for c_mode in ['Rotate', 'Orient', 'Move']:
i_sum += self.ui.get_widget('/MenuBar/ToolsMenu/%sAtoms' % c_mode).get_active()
if i_sum == 0 or (i_sum == 1 and sum(self.images.selected) == 0):
self.reset_tools_modes()
return()
if i_sum == 2:
try:
self.images.selected = self.atoms_to_rotate_0.copy()
except:
self.atoms_to_rotate_0 = self.images.selected.copy()
if i_sum == 1:
self.atoms_to_rotate_0 = self.images.selected.copy()
for c_mode in ['Rotate', 'Orient', 'Move']:
if c_mode != mode:
self.ui.get_widget('/MenuBar/ToolsMenu/%sAtoms' % c_mode).set_active(False)
if self.ui.get_widget('/MenuBar/ToolsMenu/%sAtoms' % mode).get_active():
self.atoms_to_rotate_0 = self.images.selected.copy()
for i in range(len(self.images.selected)):
self.images.selected[i] = False
self.light_green_markings = 1
else:
try:
atr = self.atoms_to_rotate_0
for i in range(len(self.images.selected)):
self.images.selected[i] = atr[i]
except:
pass
self.menu_change = 0
self.draw()
def toggle_move_mode(self, action):
"""
Toggles the move mode, where the selected atoms can be moved with the arrow
keys and pg up/dn. If the shift key is pressed, the movement will be reduced.
The movement will be relative to the current rotation of the coordinate system.
The implementation of the move mode is found in the gui.scroll
"""
if not (self.menu_change):
self.toggle_mode('Move')
def toggle_rotate_mode(self, action):
"""
Toggles the rotate mode, where the selected atoms can be rotated with the arrow keys
and pg up/dn. If the shift key is pressed, the rotation angle will be reduced.
The atoms to be rotated will be marked with light green - and the COM of the selected
atoms will be used as the COM of the rotation. This can be changed while rotating the
selected atoms.
If only two atoms are seleceted, and the number of atoms to be rotated is different from
two, the selected atoms will define the axis of rotation.
The implementation of the rotate mode is found in the gui.scroll
"""
if not (self.menu_change):
self.toggle_mode('Rotate')
def toggle_orient_mode(self, action):
"""
Toggle the orientation mode - the orientation of the atoms will be changed
according to the arrow keys selected.
If nothing is selected, standard directions are x, y and z
if two atoms are selected, the standard directions are along their displacement vector
if three atoms are selected, the orientation is changed according to the normal of these
three vectors.
"""
if not (self.menu_change):
self.toggle_mode('Orient')
self.orient_normal = np.array([1.0, 0.0, 0.0])
sel_pos = []
for i, j in enumerate(self.atoms_to_rotate_0):
if j:
sel_pos.append(self.R[i])
if len(sel_pos) == 2:
self.orient_normal = sel_pos[0] - sel_pos[1]
if len(sel_pos) == 3:
v1 = sel_pos[1] - sel_pos[0]
v2 = sel_pos[1] - sel_pos[2]
self.orient_normal = np.cross(v1, v2)
self.orient_normal /= sum(self.orient_normal ** 2) ** 0.5
def show_labels(self, action, active):
an = active.get_name()
if an == "AtomIndex":
self.labels = [range(self.images.natoms)] * self.images.nimages
elif an == "NoLabel":
self.labels = None
elif an == "MagMom":
self.labels = self.images.M
elif an == "Element":
self.labels = [[chemical_symbols[x] for x in self.images.Z]] * self.images.nimages
self.draw()
def toggle_show_axes(self, action):
self.draw()
def toggle_show_bonds(self, action):
self.set_coordinates()
def toggle_show_velocities(self, action):
self.show_vectors(10 * self.images.V) # XXX hard coded scale is ugly
self.draw()
def toggle_show_forces(self, action):
self.show_vectors(self.images.F)
self.draw()
def hide_selected(self, button):
self.images.visible[self.images.selected] = False
self.draw()
def show_selected(self, button):
self.images.visible[self.images.selected] = True
self.draw()
def repeat_window(self, menuitem):
self.reset_tools_modes()
Repeat(self)
def rotate_window(self, menuitem):
Rotate(self)
def colors_window(self, menuitem):
ColorWindow(self)
def focus(self, x=None):
if (self.images.natoms == 0 and not
self.ui.get_widget('/MenuBar/ViewMenu/ShowUnitCell').get_active()):
self.scale = 1.0
self.center = np.zeros(3)
self.draw()
return
P = np.dot(self.X, self.axes)
n = self.images.natoms
P[:n] -= self.images.r[:, None]
P1 = P.min(0)
P[:n] += 2 * self.images.r[:, None]
P2 = P.max(0)
self.center = np.dot(self.axes, (P1 + P2) / 2)
S = 1.3 * (P2 - P1)
if S[0] * self.height < S[1] * self.width:
self.scale = self.height / S[1]
else:
self.scale = self.width / S[0]
self.draw()
def reset_view(self, menuitem):
self.axes = rotate('0.0x,0.0y,0.0z')
self.set_coordinates()
self.focus(self)
def set_view(self, menuitem):
plane_rotation = menuitem.get_name()
if plane_rotation == 'xyPlane':
self.axes = rotate('0.0x,0.0y,0.0z')
elif plane_rotation == 'yzPlane':
self.axes = rotate('-90.0x,-90.0y,0.0z')
elif plane_rotation == 'zxPlane':
self.axes = rotate('90.0x,0.0y,90.0z')
elif plane_rotation == 'yxPlane':
self.axes = rotate('180.0x,0.0y,90.0z')
elif plane_rotation == 'zyPlane':
self.axes = rotate('0.0x,90.0y,0.0z')
elif plane_rotation == 'xzPlane':
self.axes = rotate('-90.0x,0.0y,0.0z')
else:
if plane_rotation == 'a1a2Plane':
i, j = 0, 1
elif plane_rotation == 'a2a3Plane':
i, j = 1, 2
elif plane_rotation == 'a3a1Plane':
i, j = 2, 0
elif plane_rotation == 'a2a1Plane':
i, j = 1, 0
elif plane_rotation == 'a3a2Plane':
i, j = 2, 1
elif plane_rotation == 'a1a3Plane':
i, j = 0, 2
x1 = self.images.A[self.frame, i]
x2 = self.images.A[self.frame, j]
norm = np.linalg.norm
x1 = x1 / norm(x1)
x2 = x2 - x1 * np.dot(x1, x2)
x2 /= norm(x2)
x3 = np.cross(x1, x2)
self.axes = np.array([x1, x2, x3]).T
self.set_coordinates()
def get_colors(self, rgb = False):
Z = self.images.Z
if rgb:
# Create a shape that is equivalent to self.colors,
# but contains rgb data instead gtk.gdk.GCX11 objects.
# The rgb data may be three numbers, or a named color. As
# the type of data is unknown, we cannot create an array
# beforehand with sensible default values, but need to
# create unused elements on the fly. The type of unused
# elements is important to prevent trouble when converting
# to numpy arrays.
colarray = []
for z, c in self.colordata:
if z >= len(colarray):
# Allocate unused elements as well.
colarray.extend([c,] * (1 + z - len(colarray)))
colarray[z] = c
else:
colarray = self.colors
if self.colormode == 'jmol' or self.colormode == 'atno':
colors = np.array(colarray)[Z]
elif self.colormode == 'tags':
colors = np.array(colarray)[self.images.T[self.frame]]
elif self.colormode == 'force':
F = self.images.F[self.frame]
F = np.sqrt(((F*self.images.dynamic[:,np.newaxis])**2).sum(axis=-1)) # The absolute force
nF = (F - self.colormode_force_data[0]) * self.colormode_force_data[1]
nF = np.clip(nF.astype(int), 0, len(self.colors)-1)
colors = np.array(colarray)[nF]
elif self.colormode == 'velocity':
V = self.images.V[self.frame]
V = np.sqrt((V*V).sum(axis=-1)) # The absolute velocity
nV = (V - self.colormode_velocity_data[0]) * self.colormode_velocity_data[1]
nV = np.clip(nV.astype(int), 0, len(self.colors)-1)
colors = np.array(colarray)[nV]
elif self.colormode == 'charge':
Q = self.images.q[self.frame]
nq = ((Q - self.colormode_charge_data[0]) *
self.colormode_charge_data[1] )
nq = np.clip(nq.astype(int), 0, len(self.colors)-1)
colors = np.array(colarray)[nq]
elif self.colormode == 'manual':
colors = colarray
elif self.colormode == 'same':
colors = [colarray[0]] * self.images.natoms
else:
raise RuntimeError('Unknown color mode: %s' % (self.colormode,))
return colors
def repeat_colors(self, repeat):
natoms = self.images.natoms
if self.colormode == 'manual':
a0 = 0
colors = self.colors
colordata = self.colordata
for i0 in range(repeat[0]):
for i1 in range(repeat[1]):
for i2 in range(repeat[2]):
a1 = a0 + natoms
colors[a0:a1] = self.colors[:natoms]
colordata[a0:a1] = self.colordata[:natoms]
a0 = a1
self.colors = colors
self.colordata = colordata
def my_arc(self, gc, fill, j, X, r, n, A, d):
if self.images.shapes is not None:
rx = (self.images.shapes[j, 0]).round().astype(int)
ry = (self.images.shapes[j, 1]).round().astype(int)
rz = (self.images.shapes[j, 2]).round().astype(int)
circle = rx == ry and ry == rz
if not circle:
Q = Quaternion(self.images.Q[self.frame][j])
X2d = np.array([X[j][0], X[j][1]])
Ellipsoid = np.array([[1. / (rx*rx), 0, 0],
[0, 1. / (ry*ry), 0],
[0, 0, 1. / (rz*rz)]
])
# Ellipsoid rotated by quaternion as Matrix X' = R X R_transpose
El_r = np.dot(Q.rotation_matrix(),
np.dot(Ellipsoid,
np.transpose(Q.rotation_matrix())))
# Ellipsoid rotated by quaternion and axes as
# Matrix X' = R_axes X' R_axes
El_v = np.dot(np.transpose(self.axes), np.dot(El_r, self.axes))
# Projection of rotated ellipsoid on xy plane
El_p = Ell = np.array([
[El_v[0][0] - El_v[0][2] * El_v[0][2] / El_v[2][2],
El_v[0][1] - El_v[0][2] * El_v[1][2] / El_v[2][2]],
[El_v[0][1] - El_v[0][2] * El_v[1][2] / El_v[2][2],
El_v[1][1] - El_v[1][2] * El_v[1][2] / El_v[2][2]]
])
# diagonal matrix der Ellipse gibt halbachsen
El_p_diag = np.linalg.eig(El_p)
# Winkel mit dem Ellipse in xy gedreht ist aus
# eigenvektor der diagonal matrix
phi = atan(El_p_diag[1][0][1] / El_p_diag[1][0][0])
tupl = []
alpha = np.array(range(16)) * 2 * np.pi / 16
El_xy = np.array([sqrt(1. / (El_p_diag[0][0])) *
np.cos(alpha)*np.cos(phi)
- sqrt(1./(El_p_diag[0][1])) *
np.sin(alpha) * np.sin(phi),
sqrt(1./(El_p_diag[0][0])) *
np.cos(alpha)*np.sin(phi)
+ sqrt(1./(El_p_diag[0][1])) *
np.sin(alpha) * np.cos(phi)])
tupl = (El_xy.transpose() * self.scale +
X[j][:2]).round().astype(int)
# XXX there must be a better way
tupl = [tuple(i) for i in tupl]
return self.pixmap.draw_polygon( gc, fill, tupl)
else:
return self.pixmap.draw_arc(gc, fill, A[j, 0], A[j, 1], d[j],
d[j], 0, 23040)
else:
return self.pixmap.draw_arc(gc, fill, A[j, 0], A[j, 1], d[j], d[j],
0, 23040)
def arrow(self, begin, end):
vec = end - begin
length = np.sqrt((vec[:2]**2).sum())
length = min(length, 0.3 * self.scale)
line = self.pixmap.draw_line
beg = begin.round().astype(int)
en = end.round().astype(int)
line(self.foreground_gc, beg[0], beg[1], en[0], en[1])
angle = atan2(en[1] - beg[1], en[0] - beg[0]) + np.pi
x1 = (end[0] + length * cos(angle - 0.3)).round().astype(int)
y1 = (end[1] + length * sin(angle - 0.3)).round().astype(int)
x2 = (end[0] + length * cos(angle + 0.3)).round().astype(int)
y2 = (end[1] + length * sin(angle + 0.3)).round().astype(int)
line(self.foreground_gc, x1, y1, en[0], en[1])
line(self.foreground_gc, x2, y2, en[0], en[1])
def draw(self, status=True):
self.pixmap.draw_rectangle(self.background_gc, True, 0, 0,
self.width, self.height)
axes = self.scale * self.axes * (1, -1, 1)
offset = (np.dot(self.center, axes) -
(0.5 * self.width, 0.5 * self.height, 0))
X = np.dot(self.X, axes) - offset
n = self.images.natoms
self.indices = X[:, 2].argsort()
if self.ui.get_widget('/MenuBar/ViewMenu/ShowBonds').get_active():
r = self.images.r * (0.65 * self.scale)
else:
r = self.images.r * self.scale
P = self.P = X[:n, :2]
A = (P - r[:, None]).round().astype(int)
X1 = X[n:, :2].round().astype(int)
X2 = (np.dot(self.B, axes) - offset).round().astype(int)
disp = (np.dot(self.images.D[self.frame],axes)).round().astype(int)
d = (2 * r).round().astype(int)
vectors = (self.ui.get_widget('/MenuBar/ViewMenu/ShowVelocities'
).get_active() or
self.ui.get_widget('/MenuBar/ViewMenu/ShowForces'
).get_active())
if vectors:
V = np.dot(self.vectors[self.frame], axes)
selected_gc = self.selected_gc
colors = self.get_colors()
arc = self.pixmap.draw_arc
line = self.pixmap.draw_line
foreground_gc = self.foreground_gc
dynamic = self.images.dynamic
selected = self.images.selected
visible = self.images.visible
for a in self.indices:
if a < n:
ra = d[a]
if visible[a]:
# Draw the atoms
self.my_arc(colors[a], True, a, X, r, n, A, d)
# Draw labels on the atoms
if self.labels is not None:
# start labeling with atomic indexes
# to do: scale position and size with radius in some
# meaningful manner - pick a reference magnification
# where it "looks good" and then go from there ...
nlabel = str(self.labels[self.frame][a])
colorl = self.foreground_gc
layout = self.drawing_area.create_pango_layout(nlabel)
xlabel = int(A[a,0]+ra/2 - layout.get_size()[0]/2. / pango.SCALE)
ylabel = int(A[a,1]+ra/2 - layout.get_size()[1]/2. / pango.SCALE)
self.pixmap.draw_layout(colorl, xlabel, ylabel, layout)
# Draw cross on constrained atoms
if not dynamic[a]:
R1 = int(0.14644 * ra)
R2 = int(0.85355 * ra)
line(foreground_gc,
A[a, 0] + R1, A[a, 1] + R1,
A[a, 0] + R2, A[a, 1] + R2)
line(foreground_gc,
A[a, 0] + R2, A[a, 1] + R1,
A[a, 0] + R1, A[a, 1] + R2)
# Draw velocities og forces
if vectors:
self.arrow(X[a], X[a] + V[a])
if self.light_green_markings and self.atoms_to_rotate_0[a]:
arc(self.green, False, A[a, 0] + 2, A[a, 1] + 2,
ra - 4, ra - 4, 0, 23040)
# Draw marking circles around the atoms
if selected[a]:
self.my_arc(selected_gc, False, a, X, r, n, A, d)
elif visible[a]:
self.my_arc(foreground_gc, False, a, X, r, n, A, d)
else:
# Draw unit cell
a -= n
line(foreground_gc, X1[a, 0] + disp[0], X1[a, 1] + disp[1], X2[a, 0] + disp[0], X2[a, 1] + disp[1])
if self.ui.get_widget('/MenuBar/ViewMenu/ShowAxes').get_active():
self.draw_axes()
if self.images.nimages > 1:
self.draw_frame_number()
self.drawing_area.window.draw_drawable(self.background_gc, self.pixmap,
0, 0, 0, 0,
self.width, self.height)
if status:
self.status()
def draw_axes(self):
axes_labels = [
"<span foreground=\"red\" weight=\"bold\">X</span>",
"<span foreground=\"green\" weight=\"bold\">Y</span>",
"<span foreground=\"blue\" weight=\"bold\">Z</span>"]
axes_length = 15
for i in self.axes[:,2].argsort():
a = 20
b = self.height - 20
c = int(self.axes[i][0] * axes_length + a)
d = int(-self.axes[i][1] * axes_length + b)
self.pixmap.draw_line(self.foreground_gc, a, b, c, d)
# The axes label
layout = self.drawing_area.create_pango_layout(axes_labels[i])
layout.set_markup(axes_labels[i])
lox = int(self.axes[i][0] * 20 + 20\
- layout.get_size()[0] / 2. / pango.SCALE)
loy = int(self.height - 20 - self.axes[i][1] * 20\
- layout.get_size()[1] / 2. / pango.SCALE)
self.pixmap.draw_layout(self.foreground_gc, lox, loy, layout)
def draw_frame_number(self):
n = str(self.frame)
color = self.foreground_gc
line = self.pixmap.draw_line
layout = self.drawing_area.create_pango_layout("Frame: " + n)
x = self.width - 3 - layout.get_size()[0] / pango.SCALE
y = self.height - 5 - layout.get_size()[1] / pango.SCALE
self.pixmap.draw_layout(self.foreground_gc, x, y, layout)
def release(self, drawing_area, event):
if event.button != 1:
return
selected = self.images.selected
selected_ordered = self.images.selected_ordered
if event.time < self.t0 + 200: # 200 ms
d = self.P - self.xy
hit = np.less((d**2).sum(1), (self.scale * self.images.r)**2)
for a in self.indices[::-1]:
if a < self.images.natoms and hit[a]:
if event.state & gtk.gdk.CONTROL_MASK:
selected[a] = not selected[a]
if selected[a]:
selected_ordered += [a]
elif len(selected_ordered) > 0:
if selected_ordered[-1] == a:
selected_ordered = selected_ordered[:-1]
else:
selected_ordered = []
else:
selected[:] = False
selected[a] = True
selected_ordered = [a]
break
else:
selected[:] = False
selected_ordered = []
self.draw()
else:
A = (event.x, event.y)
C1 = np.minimum(A, self.xy)
C2 = np.maximum(A, self.xy)
hit = np.logical_and(self.P > C1, self.P < C2)
indices = np.compress(hit.prod(1), np.arange(len(hit)))
if not (event.state & gtk.gdk.CONTROL_MASK):
selected[:] = False
selected[indices] = True
if len(indices) == 1 and indices[0] not in self.images.selected_ordered:
selected_ordered += [indices[0]]
elif len(indices) > 1:
selected_ordered = []
self.draw()
indices = np.arange(self.images.natoms)[self.images.selected]
if len(indices) != len(selected_ordered):
selected_ordered = []
self.images.selected_ordered = selected_ordered
def press(self, drawing_area, event):
self.button = event.button
self.xy = (event.x, event.y)
self.t0 = event.time
self.axes0 = self.axes
self.center0 = self.center
def move(self, drawing_area, event):
x, y, state = event.window.get_pointer()
x0, y0 = self.xy
if self.button == 1:
window = self.drawing_area.window
window.draw_drawable(self.background_gc, self.pixmap,
0, 0, 0, 0,
self.width, self.height)
x0 = int(round(x0))
y0 = int(round(y0))
window.draw_rectangle(self.selected_gc, False,
min(x, x0), min(y, y0),
abs(x - x0), abs(y - y0))
return
if self.button == 2:
return
if state & gtk.gdk.SHIFT_MASK:
self.center = (self.center0 -
np.dot(self.axes, (x - x0, y0 - y, 0)) / self.scale)
else:
# Snap mode: the a-b angle and t should multipla of 15 degrees ???
a = x - x0
b = y0 - y
t = sqrt(a * a + b * b)
if t > 0:
a /= t
b /= t
else:
a = 1.0
b = 0.0
c = cos(0.01 * t)
s = -sin(0.01 * t)
rotation = np.array([(c * a * a + b * b, (c - 1) * b * a, s * a),
((c - 1) * a * b, c * b * b + a * a, s * b),
(-s * a, -s * b, c)])
self.axes = np.dot(self.axes0, rotation)
if self.images.natoms > 0:
com = self.X[:self.images.natoms].mean(0)
else:
com = self.images.A[self.frame].mean(0)
self.center = com - np.dot(com - self.center0,
np.dot(self.axes0, self.axes.T))
self.draw(status=False)
# Create a new backing pixmap of the appropriate size
def configure_event(self, drawing_area, event):
if self.configured:
w = self.width
h = self.height
else:
self.config = read_defaults()
self.colormap = self.drawing_area.get_colormap()
self.foreground_gc = self.drawing_area.window.new_gc(
self.colormap.alloc_color(self.config['gui_foreground_color']))
self.background_gc = self.drawing_area.window.new_gc(
self.colormap.alloc_color(self.config['gui_background_color']))
self.red = self.drawing_area.window.new_gc(
self.colormap.alloc_color(62345, 0, 0), line_width=2)
self.green = self.drawing_area.window.new_gc(
self.colormap.alloc_color(0, 54456, 0), line_width=2)
self.blue = self.drawing_area.window.new_gc(
self.colormap.alloc_color(0, 0, 54456), line_width=2)
self.selected_gc = self.drawing_area.window.new_gc(
self.colormap.alloc_color(0, 16456, 0),
line_width=3)
x, y, self.width, self.height = drawing_area.get_allocation()
self.pixmap = gtk.gdk.Pixmap(drawing_area.window,
self.width, self.height)
if self.configured:
self.scale *= sqrt(1.0 * self.width * self.height / (w * h))
self.draw()
self.configured = True
# Redraw the screen from the backing pixmap
def expose_event(self, drawing_area, event):
x, y, width, height = event.area
gc = self.background_gc
drawing_area.window.draw_drawable(gc, self.pixmap,
x, y, x, y, width, height)
def external_viewer(self, action):
name = action.get_name()
command = {'Avogadro': 'avogadro',
'XMakeMol': 'xmakemol -f',
'RasMol': 'rasmol -xyz',
'VMD': 'vmd'}[name]
fd, filename = tempfile.mkstemp('.xyz', 'ase.gui-')
os.close(fd)
self.images.write(filename)
os.system('(%s %s &); (sleep 60; rm %s) &' %
(command, filename, filename))
def render_window(self, action):
Render(self)
def show_vectors(self, vectors):
self.vectors = vectors
|
PHOTOX/fuase
|
ase/ase/gui/view.py
|
Python
|
gpl-2.0
| 34,755
|
[
"ASE",
"Avogadro",
"Jmol",
"RasMol",
"VMD"
] |
562df89544b9be8c14b9ed0c27a262a973d673258597628017d9776518603f4a
|
# -*- coding: utf-8 -*-
"""
Library of functions for simulated data creation
Created on 07/04/2016
@author: Anderson Brito da Silva & Jan Cimbalnik
"""
import numpy as np
import scipy.signal as sig
from scipy.stats import norm
# %% Noise types
def pinknoise(N):
"""
Create a pink noise (1/f) with N points.
Parameters:
----------
N - Number of samples to be returned
"""
M = N
# ensure that the N is even
if N % 2:
N += 1
x = np.random.randn(N) # generate a white noise
X = np.fft.fft(x) #FFT
# prepare a vector for 1/f multiplication
nPts = int(N/2 + 1)
n = range(1,nPts+1)
n = np.sqrt(n)
#multiplicate the left half of the spectrum
X[range(nPts)] = X[range(nPts)]/n
#prepare a right half of the spectrum - a copy of the left one
X[range(nPts,N)] = np.real(X[range(int(N/2-1),0,-1)]) - 1j*np.imag(X[range(int(N/2-1),0,-1)])
y = np.fft.ifft(X) #IFFT
y = np.real(y)
# normalising
y -= np.mean(y)
y /= np.sqrt(np.mean(y**2))
# returning size of N
if M % 2 == 1:
y = y[:-1]
return y
def brownnoise(N):
"""
Create a brown noise (1/f²) with N points.
Parameters:
----------
N - Number of samples to be returned
"""
M = N
# ensure that the N is even
if N % 2:
N += 1
x = np.random.randn(N) # generate a white noise
X = np.fft.fft(x) #FFT
# prepare a vector for 1/f² multiplication
nPts = int(N/2 + 1)
n = range(1,nPts+1)
#multiplicate the left half of the spectrum
X[range(nPts)] = X[range(nPts)]/n
#prepare a right half of the spectrum - a copy of the left one
X[range(nPts,N)] = np.real(X[range(int(N/2-1),0,-1)]) - 1j*np.imag(X[range(int(N/2-1),0,-1)])
y = np.fft.ifft(X) #IFFT
y = np.real(y)
# normalising
y -= np.mean(y)
y /= np.sqrt(np.mean(y**2))
# returning size of N
if M % 2 == 1:
y = y[:-1]
return y
# %% Artifacts
def delta(srate = 5000, decay_dur = None):
"""
Delta function with exponential decay.
Parameters:
-----------
decay_dur - decay duration before returning to 0\n
Returns:
--------
delta - numpy array\n
"""
if decay_dur is None:
decay_dur = np.random.random()
decay_N = int(srate * decay_dur)
return_value = 0.001 # This is the value where decay finishes
decay_factor = np.log(return_value)/-decay_N
t = np.linspace(0,decay_N,decay_N, endpoint=False)
decay = np.exp(-t * decay_factor)
delta = np.concatenate([[0],decay])
return delta
def line_noise(srate = 5000, freq = 50, numcycles = None):
"""
Line noise artifact.
Parameters:
-----------
srate = 5000 - sampling frequency\n
freq = 50 (Default) - line noise frequency\n
ncycles - number of cycles\n
Returns:
--------
line_noise - numpy array\n
"""
if numcycles is None:
numcycles = np.random.randint(3,50)
dur_samps = int((numcycles / freq) * srate)
x = np.arange(dur_samps)
y = np.sin(2 * np.pi * freq * x / srate)
return y
def artifact_spike(srate = 5000, dur = None):
"""
Artifact like spike (sharp, not gaussian)
Parameters:
-----------
srate = 5000 - sampling frequency\n
dur - duration of the event\n
Returns:
--------
artifact_spike - numpy array\n
"""
if dur is None:
dur = round(np.random.random()/10,3)
N = int(srate * dur)
if not N % 2: # Check if the number is odd - we want to have proper spike
N -= 1
y = np.zeros(N)
y[:int(N/2)+1] = np.linspace(0,1,int(N/2)+1)
y[-int(N/2):] = np.linspace(1,0,int(N/2)+1)[1:]
return y
# %% HFO
def _wavelet(numcycles,f,srate):
"""
Create a wavelet
Parameters:
----------
numcycles - number of cycles (gaussian window)\n
f - central frequency\n
srate - signal sample rate\n
Returns:
----------
wave - numpy array with waveform.\n
time - numpy array with the time vector.\n
"""
N = float(srate*numcycles)/(f) # number of points
time = np.linspace((-numcycles/2)/float(f),(numcycles/2)/float(f),N) # time vector
std = numcycles/(2*np.pi*f) # standard deviation
wave = np.exp(2*1j*np.pi*f*time)*np.exp(-(time**2)/(2*(std**2))) # waveform
return wave,time
def hfo(srate = 5000, f=None, numcycles = None):
"""
Create an HFO.
Parameters:
----------
srate = 5000 (Defaults) - sampling rate\n
f = None (Default) - Create a random HFO with central frequency between 60-600 Hz.\n
numcycles = None (Default) - Create a random HFO with numcycles between 9 - 14.\n
Returns:
----------
wave - numpy array with waveform.\n
time - numpy array with the time vector.\n
"""
if numcycles is None:
numcycles = np.random.randint(9,15)
if f is None:
f = np.random.randint(60,600)
wave,time = _wavelet(numcycles,f,srate)
return np.real(wave), time
# %% Spike
def spike(srate = 5000, dur = None):
"""
Create a simple gausian spike.
Parameters:
-----------
srate = 5000 (Default) - sampling rate.\n
dur - spike duration (sec)\n
Returns:
--------
spike - numpy array.
"""
if dur is None:
dur = round(np.random.random()*0.5,2)
x = np.linspace(-4, 4, int(srate * dur)) # 4 stds
spike_dist = norm.pdf(x, loc=0, scale=1) # Create gaussian shape
spike = spike_dist * 1/max(spike_dist) # Normalize so that the peak is at 1
return spike
#def wavelet_spike(srate = 2000, f=None, numcycles = None):
# '''
# Create a wavelet spike.
#
# Parameters:
# ----------
# f = None (Default) - Create a random Spike with central frequency between 60-600 Hz.
# numcycles = None (Default) - Create a random Spike with numcycles between 1 - 2.
#
# Returns:
# ----------
# wave - numpy array with waveform.
# time - numpy array with the time vector.
# '''
# if numcycles is None:
# numcycles = np.random.randint(1,3)
# if f is None:
# f = np.random.randint(60,600)
# wave,time = wavelet(numcycles,f,srate)
# return -np.real(wave),time
# %% Combinations - just convenience functions
|
HFO-detect/HFO-detect-python
|
pyhfo_detect/sim/create_simulated.py
|
Python
|
bsd-3-clause
| 6,496
|
[
"Gaussian"
] |
fae9dfcd15754b40dc78346d3da7d804075928b4faa3dd94de279d81627cd01c
|
# Routines for reading and plotting to produce comparable figures to the SAGE paper
from pylab import *
from scipy import signal as ss
def galdtype():
# Define the data-type for the public version of SAGE
Galdesc_full = [
('SnapNum' , np.int32),
('Type' , np.int32),
('GalaxyIndex' , np.int64),
('CentralGalaxyIndex' , np.int64),
('SAGEHaloIndex' , np.int32),
('SAGETreeIndex' , np.int32),
('SimulationHaloIndex' , np.int64),
('mergeType' , np.int32),
('mergeIntoID' , np.int32),
('mergeIntoSnapNum' , np.int32),
('dT' , np.float32),
('Pos' , (np.float32, 3)),
('Vel' , (np.float32, 3)),
('Spin' , (np.float32, 3)),
('Len' , np.int32),
('Mvir' , np.float32),
('CentralMvir' , np.float32),
('Rvir' , np.float32),
('Vvir' , np.float32),
('Vmax' , np.float32),
('VelDisp' , np.float32),
('ColdGas' , np.float32),
('StellarMass' , np.float32),
('BulgeMass' , np.float32),
('HotGas' , np.float32),
('EjectedMass' , np.float32),
('BlackHoleMass' , np.float32),
('IntraClusterStars' , np.float32),
('MetalsColdGas' , np.float32),
('MetalsStellarMass' , np.float32),
('MetalsBulgeMass' , np.float32),
('MetalsHotGas' , np.float32),
('MetalsEjectedMass' , np.float32),
('MetalsIntraClusterStars' , np.float32),
('SfrDisk' , np.float32),
('SfrBulge' , np.float32),
('SfrDiskZ' , np.float32),
('SfrBulgeZ' , np.float32),
('DiskRadius' , np.float32),
('Cooling' , np.float32),
('Heating' , np.float32),
('QuasarModeBHaccretionMass' , np.float32),
('TimeOfLastMajorMerger' , np.float32),
('TimeOfLastMinorMerger' , np.float32),
('OutflowRate' , np.float32),
('infallMvir' , np.float32),
('infallVvir' , np.float32),
('infallVmax' , np.float32)
]
names = [Galdesc_full[i][0] for i in xrange(len(Galdesc_full))]
formats = [Galdesc_full[i][1] for i in xrange(len(Galdesc_full))]
Galdesc = np.dtype({'names':names, 'formats':formats}, align=True)
return Galdesc
def sageoutsingle(fname):
# Read a single SAGE output file, intended only as a subroutine of read_sagesnap
Galdesc = galdtype()
fin = open(fname, 'rb') # Open the file
Ntrees = np.fromfile(fin,np.dtype(np.int32),1) # Read number of trees in file
NtotGals = np.fromfile(fin,np.dtype(np.int32),1)[0] # Read number of gals in file.
GalsPerTree = np.fromfile(fin, np.dtype((np.int32, Ntrees)),1) # Read the number of gals in each tree
G = np.fromfile(fin, Galdesc, NtotGals) # Read all the galaxy data
return G, NtotGals
def read_sagesnap(fpre, firstfile=0, lastfile=7):
# Read full SAGE snapshot, going through each file and compiling into 1 array
Galdesc = galdtype()
Glist = []
Ngal = np.array([])
for i in range(firstfile,lastfile+1):
G1, N1 = sageoutsingle(fpre+'_'+str(i))
Glist += [G1]
Ngal = np.append(Ngal,N1)
G = np.empty(sum(Ngal), dtype=Galdesc)
for i in range(firstfile,lastfile+1):
j = i-firstfile
G[sum(Ngal[:j]):sum(Ngal[:j+1])] = Glist[j][0:Ngal[j]].copy()
G = G.view(np.recarray)
return G
def sphere2dk(R, Lbin, Nbin):
# Make a square 2d kernel of a collapsed sphere of radius R with Nbin bins of length Lbin.
Nbin = int(Nbin) # Ensure an integer number of bins
k = np.zeros((Nbin,Nbin)) # k is the convolution kernel to be output
for i in xrange(Nbin):
for j in xrange(Nbin):
r = Lbin*np.sqrt((i - (Nbin-1)/2)**2 + (j - (Nbin-1)/2)**2) # Average distance of the pixel from the centre
if r<R: k[i,j] = np.sqrt(R**2 - r**2)
k /= np.sum(k) # Make it normalised
return k
def contour(x, y, Nbins=None, weights=None, range=None, Nlevels=25, c='k', ls='-', lw=2):
# Plot a 2D contour by first doing a 2D histogram of data with axis positions x and y
if range==None: range = [[np.min(x),np.max(x)],[np.min(y),np.max(y)]]
if Nbins==None: Nbins = len(x)/10
im, xedges, yedges = np.histogram2d(x, y, bins=Nbins, weights=weights, range=range)
xd, yd = xedges[1]-xedges[0], yedges[1]-yedges[0]
xp, yp = xedges[1:]-xd, yedges[1:]-yd
k = sphere2dk(3, 1, 7)
im = ss.convolve2d(im, k, mode='same') # Smooth the image for cleaner contours
plt.contour(xp, yp, im.transpose(), Nlevels, colors=c, linestyles=ls, linewidths=lw)
def massfunction(mass, Lbox, Nbins=50, c='k', lw=2, ls='-', label=r'\textsc{sage}'):
masslog = np.log10(mass[mass>0])
lbound, ubound = max(8,np.min(masslog)), min(12.5,np.max(masslog))
N, edges = np.histogram(masslog, bins=Nbins, range=[lbound,ubound])
binwidth = edges[1]-edges[0]
x = edges[:-1] + binwidth/2
y = N/(binwidth*Lbox**3)
plt.plot(x, y, c+ls, linewidth=lw, label=label)
def massfunction_obsdata(h=0.678):
B = np.array([
[7.05, 1.3531e-01, 6.0741e-02],
[7.15, 1.3474e-01, 6.0109e-02],
[7.25, 2.0971e-01, 7.7965e-02],
[7.35, 1.7161e-01, 3.1841e-02],
[7.45, 2.1648e-01, 5.7832e-02],
[7.55, 2.1645e-01, 3.9988e-02],
[7.65, 2.0837e-01, 4.8713e-02],
[7.75, 2.0402e-01, 7.0061e-02],
[7.85, 1.5536e-01, 3.9182e-02],
[7.95, 1.5232e-01, 2.6824e-02],
[8.05, 1.5067e-01, 4.8824e-02],
[8.15, 1.3032e-01, 2.1892e-02],
[8.25, 1.2545e-01, 3.5526e-02],
[8.35, 9.8472e-02, 2.7181e-02],
[8.45, 8.7194e-02, 2.8345e-02],
[8.55, 7.0758e-02, 2.0808e-02],
[8.65, 5.8190e-02, 1.3359e-02],
[8.75, 5.6057e-02, 1.3512e-02],
[8.85, 5.1380e-02, 1.2815e-02],
[8.95, 4.4206e-02, 9.6866e-03],
[9.05, 4.1149e-02, 1.0169e-02],
[9.15, 3.4959e-02, 6.7898e-03],
[9.25, 3.3111e-02, 8.3704e-03],
[9.35, 3.0138e-02, 4.7741e-03],
[9.45, 2.6692e-02, 5.5029e-03],
[9.55, 2.4656e-02, 4.4359e-03],
[9.65, 2.2885e-02, 3.7915e-03],
[9.75, 2.1849e-02, 3.9812e-03],
[9.85, 2.0383e-02, 3.2930e-03],
[9.95, 1.9929e-02, 2.9370e-03],
[10.05, 1.8865e-02, 2.4624e-03],
[10.15, 1.8136e-02, 2.5208e-03],
[10.25, 1.7657e-02, 2.4217e-03],
[10.35, 1.6616e-02, 2.2784e-03],
[10.45, 1.6114e-02, 2.1783e-03],
[10.55, 1.4366e-02, 1.8819e-03],
[10.65, 1.2588e-02, 1.8249e-03],
[10.75, 1.1372e-02, 1.4436e-03],
[10.85, 9.1213e-03, 1.5816e-03],
[10.95, 6.1125e-03, 9.6735e-04],
[11.05, 4.3923e-03, 9.6254e-04],
[11.15, 2.5463e-03, 5.0038e-04],
[11.25, 1.4298e-03, 4.2816e-04],
[11.35, 6.4867e-04, 1.6439e-04],
[11.45, 2.8294e-04, 9.9799e-05],
[11.55, 1.0617e-04, 4.9085e-05],
[11.65, 3.2702e-05, 2.4546e-05],
[11.75, 1.2571e-05, 1.2571e-05],
[11.85, 8.4589e-06, 8.4589e-06],
[11.95, 7.4764e-06, 7.4764e-06]
], dtype=np.float32)
plt.fill_between(B[:,0]+np.log10(0.7**2)-np.log10(h**2), (B[:,1]+B[:,2])*h**3, (B[:,1]-B[:,2])*h**3, facecolor='purple', alpha=0.2)
plt.plot([1,1], [1,2], color='purple', linewidth=8, alpha=0.3, label=r'Baldry et al.~(2008)') # Just for the legend
def btf_obsdata(h=0.678):
x_obs = np.linspace(1,3,100)
y_obs_arr = np.array([[4.09*x_obs + 2.3], [4.09*x_obs + 1.28], [3.79*x_obs + 2.3], [3.79*x_obs + 1.28]]) # Random + systematic
y_obs_min = np.min(y_obs_arr, axis=0)[0] + 2*np.log10(0.75/h)
y_obs_max = np.max(y_obs_arr, axis=0)[0] + 2*np.log10(0.75/h) # h=0.75 used in the Stark+ paper
plt.fill_between(x_obs, y_obs_max, y_obs_min, color='purple', alpha=0.2)
plt.plot([-1,-1],[-1,-2], color='purple', ls='-', lw=8, alpha=0.3, label=r'Stark et al.~(2009)') # Just for legend
def bhbulge_obsdata(h=0.678):
M_BH_obs = (0.7/h)**2*1e8*np.array([39, 11, 0.45, 25, 24, 0.044, 1.4, 0.73, 9.0, 58, 0.10, 8.3, 0.39, 0.42, 0.084, 0.66, 0.73, 15, 4.7, 0.083, 0.14, 0.15, 0.4, 0.12, 1.7, 0.024, 8.8, 0.14, 2.0, 0.073, 0.77, 4.0, 0.17, 0.34, 2.4, 0.058, 3.1, 1.3, 2.0, 97, 8.1, 1.8, 0.65, 0.39, 5.0, 3.3, 4.5, 0.075, 0.68, 1.2, 0.13, 4.7, 0.59, 6.4, 0.79, 3.9, 47, 1.8, 0.06, 0.016, 210, 0.014, 7.4, 1.6, 6.8, 2.6, 11, 37, 5.9, 0.31, 0.10, 3.7, 0.55, 13, 0.11])
M_BH_hi = (0.7/h)**2*1e8*np.array([4, 2, 0.17, 7, 10, 0.044, 0.9, 0.0, 0.9, 3.5, 0.10, 2.7, 0.26, 0.04, 0.003, 0.03, 0.69, 2, 0.6, 0.004, 0.02, 0.09, 0.04, 0.005, 0.2, 0.024, 10, 0.1, 0.5, 0.015, 0.04, 1.0, 0.01, 0.02, 0.3, 0.008, 1.4, 0.5, 1.1, 30, 2.0, 0.6, 0.07, 0.01, 1.0, 0.9, 2.3, 0.002, 0.13, 0.4, 0.08, 0.5, 0.03, 0.4, 0.38, 0.4, 10, 0.2, 0.014, 0.004, 160, 0.014, 4.7, 0.3, 0.7, 0.4, 1, 18, 2.0, 0.004, 0.001, 2.6, 0.26, 5, 0.005])
M_BH_lo = (0.7/h)**2*1e8*np.array([5, 2, 0.10, 7, 10, 0.022, 0.3, 0.0, 0.8, 3.5, 0.05, 1.3, 0.09, 0.04, 0.003, 0.03, 0.35, 2, 0.6, 0.004, 0.13, 0.1, 0.05, 0.005, 0.2, 0.012, 2.7, 0.06, 0.5, 0.015, 0.06, 1.0, 0.02, 0.02, 0.3, 0.008, 0.6, 0.5, 0.6, 26, 1.9, 0.3, 0.07, 0.01, 1.0, 2.5, 1.5, 0.002, 0.13, 0.9, 0.08, 0.5, 0.09, 0.4, 0.33, 0.4, 10, 0.1, 0.014, 0.004, 160, 0.007, 3.0, 0.4, 0.7, 1.5, 1, 11, 2.0, 0.004, 0.001, 1.5, 0.19, 4, 0.005])
M_sph_obs = (0.7/h)**2*1e10*np.array([69, 37, 1.4, 55, 27, 2.4, 0.46, 1.0, 19, 23, 0.61, 4.6, 11, 1.9, 4.5, 1.4, 0.66, 4.7, 26, 2.0, 0.39, 0.35, 0.30, 3.5, 6.7, 0.88, 1.9, 0.93, 1.24, 0.86, 2.0, 5.4, 1.2, 4.9, 2.0, 0.66, 5.1, 2.6, 3.2, 100, 1.4, 0.88, 1.3, 0.56, 29, 6.1, 0.65, 3.3, 2.0, 6.9, 1.4, 7.7, 0.9, 3.9, 1.8, 8.4, 27, 6.0, 0.43, 1.0, 122, 0.30, 29, 11, 20, 2.8, 24, 78, 96, 3.6, 2.6, 55, 1.4, 64, 1.2])
M_sph_hi = (0.7/h)**2*1e10*np.array([59, 32, 2.0, 80, 23, 3.5, 0.68, 1.5, 16, 19, 0.89, 6.6, 9, 2.7, 6.6, 2.1, 0.91, 6.9, 22, 2.9, 0.57, 0.52, 0.45, 5.1, 5.7, 1.28, 2.7, 1.37, 1.8, 1.26, 1.7, 4.7, 1.7, 7.1, 2.9, 0.97, 7.4, 3.8, 2.7, 86, 2.1, 1.30, 1.9, 0.82, 25, 5.2, 0.96, 4.9, 3.0, 5.9, 1.2, 6.6, 1.3, 5.7, 2.7, 7.2, 23, 5.2, 0.64, 1.5, 105, 0.45, 25, 10, 17, 2.4, 20, 67, 83, 5.2, 3.8, 48, 2.0, 55, 1.8])
M_sph_lo = (0.7/h)**2*1e10*np.array([32, 17, 0.8, 33, 12, 1.4, 0.28, 0.6, 9, 10, 0.39, 2.7, 5, 1.1, 2.7, 0.8, 0.40, 2.8, 12, 1.2, 0.23, 0.21, 0.18, 2.1, 3.1, 0.52, 1.1, 0.56, 0.7, 0.51, 0.9, 2.5, 0.7, 2.9, 1.2, 0.40, 3.0, 1.5, 1.5, 46, 0.9, 0.53, 0.8, 0.34, 13, 2.8, 0.39, 2.0, 1.2, 3.2, 0.6, 3.6, 0.5, 2.3, 1.1, 3.9, 12, 2.8, 0.26, 0.6, 57, 0.18, 13, 5, 9, 1.3, 11, 36, 44, 2.1, 1.5, 26, 0.8, 30, 0.7])
core = np.array([1,1,0,1,1,0,0,0,1,1,0,1,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,1,0,0,1,0,0,0,1,0,1,0,1,0,1,1,1,0,0,1,0,1,0])
yerr2, yerr1 = np.log10((M_BH_obs+M_BH_hi)/M_BH_obs), -np.log10((M_BH_obs-M_BH_lo)/M_BH_obs)
xerr2, xerr1 = np.log10((M_sph_obs+M_sph_hi)/M_sph_obs), -np.log10((M_sph_obs-M_sph_lo)/M_sph_obs)
plt.errorbar(np.log10(M_sph_obs[core==0]), np.log10(M_BH_obs[core==0]), yerr=[yerr1[core==0],yerr2[core==0]], xerr=[xerr1[core==0],xerr2[core==0]], color='purple', alpha=0.3, label=r'S13 core', ls='none', lw=2, ms=0)
plt.errorbar(np.log10(M_sph_obs[core==1]), np.log10(M_BH_obs[core==1]), yerr=[yerr1[core==1],yerr2[core==1]], xerr=[xerr1[core==1],xerr2[core==1]], color='c', alpha=0.3, label=r'S13 S\`{e}rsic', ls='none', lw=2, ms=0)
def massmet_obsdata(h=0.673):
x_obs = np.array([8.52, 8.57, 8.67, 8.76, 8.86, 8.96, 9.06, 9.16, 9.26, 9.36, 9.46, 9.57, 9.66, 9.76, 9.86, 9.96, 10.06, 10.16, 10.26, 10.36, 10.46, 10.56, 10.66, 10.76, 10.86, 10.95, 11.05, 11.15, 11.25, 11.30])
y_low = np.array([8.25, 8.25, 8.28, 8.32, 8.37, 8.46, 8.56, 8.59, 8.60, 8.63, 8.66, 8.69, 8.72, 8.76, 8.80, 8.83, 8.85, 8.88, 8.92, 8.94, 8.96, 8.98, 9.00, 9.01, 9.02, 9.03, 9.03, 9.04, 9.03, 9.03])
y_high= np.array([8.64, 8.64, 8.65, 8.70, 8.73, 8.75, 8.82, 8.82, 8.86, 8.88, 8.92, 8.94, 8.96, 8.99, 9.01, 9.05, 9.06, 9.09, 9.10, 9.11, 9.12, 9.14, 9.15, 9.15, 9.16, 9.17, 9.17, 9.18, 9.18, 9.18])
x_obs += np.log10(1.5/1.8) + 2*np.log10(0.7/h) # Accounts for difference in Kroupa & Chabrier IMFs and the difference in h
plt.fill_between(x_obs, y_high, y_low, color='purple', alpha=0.2)
plt.plot([-1,-1],[-1,-2], color='purple', ls='-', lw=8, alpha=0.3, label=r'Tremonti et al.~(2004)') # Just for legend
plt.xlim(np.min(x_obs), np.max(x_obs))
def quiescent(M_star, SFR, sSFRcut=1e-11, c='k', ls='-', lw=2, h=0.678, Nbins=25):
M_star, SFR = M_star[M_star>0], SFR[M_star>0]
sSFR = SFR/M_star
logM = np.log10(M_star)
range=[9.0+2*np.log10(0.7/h), 11.5+2*np.log10(0.7/h)]
Ntot, edge = np.histogram(logM, bins=Nbins, range=range)
Nred, edge = np.histogram(logM[sSFR<sSFRcut], bins=Nbins, range=range)
logMplot = (edge[1:]+edge[:-1])/2.
plt.plot(logMplot[Ntot>0], (1.0*Nred[Ntot>0])/Ntot[Ntot>0], c+ls, linewidth=lw, label=r'\textsc{sage}')
plt.axis([range[0], range[1], 0, 1])
def quiescent_obs(h=0.678):
xplot = np.array([9.01355036, 9.11355036, 9.21355036, 9.31355036,
9.41355036, 9.51355036, 9.61355036, 9.71355036,
9.81355036, 9.91355036, 10.01355036, 10.11355036,
10.21355036, 10.31355036, 10.41355036, 10.51355036,
10.61355036, 10.71355036, 10.81355036, 10.91355036,
11.01355036, 11.11355036, 11.21355036, 11.31355036, 11.41355036]) + 2*np.log10(0.73/h)
ymax = np.array([0.01573072, 0.02670944, 0.03486613, 0.05387423, 0.06631411,
0.10754735, 0.14996351, 0.19991417, 0.24246808, 0.2786477 ,
0.34340491, 0.37078458, 0.42101923, 0.4627398 , 0.53563742,
0.58856477, 0.64181569, 0.71478586, 0.77162649, 0.81487748,
0.89033055, 0.92382114, 0.96371631, 1.03993788, 1.09048146])
ymin = np.array([0.01193108, 0.02189671, 0.02953616, 0.04715393, 0.05877118,
0.09789483, 0.13853287, 0.1864227 , 0.2276533 , 0.26262365,
0.32536941, 0.35174427, 0.40059964, 0.44076193, 0.51158711,
0.5615463 , 0.61213584, 0.68116556, 0.73224148, 0.7676331 ,
0.8323257 , 0.84779387, 0.86470192, 0.89376378, 0.84981705])
plt.fill_between(xplot, ymax, ymin, color='purple', alpha=0.3)
plt.plot([-1,-1], [-1,-2], '-', color='purple', lw=8, alpha=0.3, label=r'Observations')
plt.axis([np.min(xplot), np.max(xplot), 0, 1])
def percentiles(x, y, low=0.16, med=0.5, high=0.84, bins=20, xrange=None, yrange=None, Nmin=10):
# Given some values to go on x and y axes, bin them along x and return the percentile ranges
f = np.isfinite(x)*np.isfinite(y)
if xrange is not None: f = (x>=xrange[0])*(x<=xrange[1])*f
if yrange is not None: f = (y>=yrange[0])*(y<=yrange[1])*f
x, y = x[f], y[f]
if type(bins)==int:
indices = np.array(np.linspace(0,len(x)-1,bins+1), dtype=int)
bins = np.sort(x)[indices]
elif Nmin>0: # Ensure a minimum number of data in each bin
Nhist, bins = np.histogram(x, bins)
while len(Nhist[Nhist<Nmin])>0:
ii = np.where(Nhist<Nmin)[0][0]
if ii==0 or (ii!=len(Nhist)-1 and Nhist[ii+1]<Nhist[ii-1]):
bins = np.delete(bins,ii+1)
else:
bins = np.delete(bins,ii)
Nhist, bins = np.histogram(x, bins)
Nbins = len(bins)-1
y_low, y_med, y_high = np.zeros(Nbins), np.zeros(Nbins), np.zeros(Nbins)
x_av, N = np.zeros(Nbins), np.zeros(Nbins)
for i in range(Nbins):
f = (x>=bins[i])*(x<bins[i+1])
yy = np.sort(y[f])
if len(yy)>2:
i_low, i_med, i_high = int(low*len(yy))-1, int(med*len(yy))-1, int(high*len(yy))-1
frac_low, frac_med, frac_high = low*len(yy)-i_low-1, med*len(yy)-i_med-1, high*len(yy)-i_high-1
if i_high<=i_med or i_med<=i_low or i_high<=i_low: print 'i_low, i_med, i_high = ', i_low, i_med, i_high
y_low[i] = yy[i_low]*(1-frac_low) + yy[i_low+1]*frac_low if i_low>0 else yy[0]
y_med[i] = yy[i_med]*(1-frac_med) + yy[i_med+1]*frac_med if i_med>0 else yy[0]
y_high[i] = yy[i_high]*(1-frac_high) + yy[i_high+1]*frac_high if i_high<len(yy)-1 else yy[-1]
x_av[i] = np.mean(x[f])
N[i] = len(x[f])
fN = (N>0)
return x_av[fN], y_high[fN], y_med[fN], y_low[fN]
|
darrencroton/sage
|
output/plot_read_routines.py
|
Python
|
mit
| 16,817
|
[
"Galaxy"
] |
3f94e19663d856a31d5ab2cbe271928546abb4719c2d398f34b3323cb82013d3
|
#!/usr/bin/env python
"""
refguide_check.py [OPTIONS] [-- ARGS]
Check for a Scipy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings. This is different from doctesting [we do not aim to have
scipy docstrings doctestable!], this is just to make sure that code in
docstrings is valid python::
$ python refguide_check.py --doctests optimize
"""
from __future__ import print_function
import copy
import doctest
import glob
import inspect
import io
import os
import re
import shutil
import sys
import tempfile
import warnings
from argparse import ArgumentParser
from contextlib import contextmanager, redirect_stderr
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
import docutils.core
import numpy as np
import sphinx
from docutils.parsers.rst import directives
from pkg_resources import parse_version
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
if parse_version(sphinx.__version__) >= parse_version('1.5'):
# Enable specific Sphinx directives
from sphinx.directives import SeeAlso, Only
directives.register_directive('seealso', SeeAlso)
directives.register_directive('only', Only)
else:
# Remove sphinx directives that don't run without Sphinx environment.
# Sphinx < 1.5 installs all directives on import...
directives._directives.pop('versionadded', None)
directives._directives.pop('versionchanged', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "scipy"
PUBLIC_SUBMODULES = [
'cluster',
'cluster.hierarchy',
'cluster.vq',
'constants',
'fft',
'fftpack',
'fftpack.convolve',
'integrate',
'interpolate',
'io',
'io.arff',
'io.wavfile',
'linalg',
'linalg.blas',
'linalg.lapack',
'linalg.interpolative',
'misc',
'ndimage',
'odr',
'optimize',
'signal',
'signal.windows',
'sparse',
'sparse.csgraph',
'sparse.linalg',
'spatial',
'spatial.distance',
'spatial.transform',
'special',
'stats',
'stats.mstats',
'stats.contingency',
]
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {
'fftpack.convolve': 'fftpack',
'io.wavfile': 'io',
'io.arff': 'io',
}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([
'scipy.stats.kstwobign', # inaccurate cdf or ppf
'scipy.stats.levy_stable',
'scipy.special.sinc', # comes from numpy
'scipy.misc.who', # comes from numpy
'io.rst', # XXX: need to figure out how to deal w/ mat files
])
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
r'scipy\.sparse\.csgraph',
r'scipy\.sparse\.linalg',
r'scipy\.spatial\.distance',
r'scipy\.linalg\.blas\.[sdczi].*',
r'scipy\.linalg\.lapack\.[sdczi].*',
]
# these names are not required to be in an autosummary:: listing
# despite being in ALL
REFGUIDE_AUTOSUMMARY_SKIPLIST = [
r'scipy\.special\..*_roots', # old aliases for scipy.special.*_roots
r'scipy\.special\.jn', # alias for jv
r'scipy\.linalg\.solve_lyapunov', # deprecated name
r'scipy\.stats\.contingency\.chi2_contingency',
r'scipy\.stats\.contingency\.expected_freq',
r'scipy\.stats\.contingency\.margins',
r'scipy\.stats\.reciprocal',
]
# deprecated windows in scipy.signal namespace
for name in ('barthann', 'bartlett', 'blackmanharris', 'blackman', 'bohman',
'boxcar', 'chebwin', 'cosine', 'exponential', 'flattop',
'gaussian', 'general_gaussian', 'hamming', 'hann', 'hanning',
'kaiser', 'nuttall', 'parzen', 'slepian', 'triang', 'tukey'):
REFGUIDE_AUTOSUMMARY_SKIPLIST.append(r'scipy\.signal\.' + name)
HAVE_MATPLOTLIB = False
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
# Refguide entries:
#
# - 3 spaces followed by function name, and maybe some spaces, some
# dashes, and an explanation; only function names listed in
# refguide are formatted like this (mostly, there may be some false
# positives)
#
# - special directives, such as data and function
#
# - (scipy.constants only): quoted list
#
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""Return sets of objects only in __all__, refguide, or completely missing."""
only_all = set()
for name in all_dict:
if name not in names:
for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:
if re.match(pat, module_name + '.' + name):
break
else:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg":None})
except DeprecationWarning:
return True
except Exception:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = set(only_ref).intersection(deprecated)
only_ref = set(only_ref).difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
output += "\nThis issue can be fixed by adding these objects to\n"
output += "the function listing in __init__.py for this module\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
output += "\nThis issue should likely be fixed by removing these objects\n"
output += "from the function listing in __init__.py for this module\n"
output += "or adding them to __all__.\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'currentmodule', 'autosummary', 'data',
'obj', 'versionadded', 'versionchanged', 'module', 'class', 'meth',
'ref', 'func', 'toctree', 'moduleauthor',
'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Returns: [(name, success_flag, output), ...]
"""
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
# python 3
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except Exception:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
return results
### Doctest helpers ####
# the namespace to run examples in
DEFAULT_NAMESPACE = {'np': np}
# the namespace to do checks in
CHECK_NAMESPACE = {
'np': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'matrix': np.matrix,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float32': np.float32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf,}
class DTRunner(doctest.DocTestRunner):
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
self._report_item_name(out)
return doctest.DocTestRunner.report_unexpected_exception(
self, out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', '.axis(', '.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim',
'# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(',
'.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel='}
def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
if ns is None:
self.ns = dict(CHECK_NAMESPACE)
else:
self.ns = ns
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(self.ns))
a_got = eval(got, dict(self.ns))
except Exception:
# Maybe we're printing a numpy array? This produces invalid python
# code: `print(np.arange(3))` produces "[0 1 2]" w/o commas between
# values. So, reinsert commas and retry.
# TODO: handle (1) abberivation (`print(np.arange(10000))`), and
# (2) n-dim arrays with n > 1
s_want = want.strip()
s_got = got.strip()
cond = (s_want.startswith("[") and s_want.endswith("]") and
s_got.startswith("[") and s_got.endswith("]"))
if cond:
s_want = ", ".join(s_want[1:-1].split())
s_got = ", ".join(s_got[1:-1].split())
return self.check_output(s_want, s_got, optionflags)
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = ('[\w\d_]+\(' +
', '.join(['[\w\d_]+=(.+)']*num) +
'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except (TypeError, ValueError):
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogeneous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
def _run_doctests(tests, full_name, verbose, doctest_warnings):
"""Run modified doctests for the set of `tests`.
Returns: list of [(success_flag, output), ...]
"""
flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = io.StringIO(newline='')
success = True
# Redirect stderr to the stdout or output
tmp_stderr = sys.stdout if doctest_warnings else output
@contextmanager
def temp_cwd():
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
os.chdir(tmpdir)
yield tmpdir
finally:
os.chdir(cwd)
shutil.rmtree(tmpdir)
# Run tests, trying to restore global state afterward
cwd = os.getcwd()
with np.errstate(), np.printoptions(), temp_cwd() as tmpdir, \
redirect_stderr(tmp_stderr):
# try to ensure random seed is NOT reproducible
np.random.seed(None)
for t in tests:
t.filename = short_path(t.filename, cwd)
fails, successes = runner.run(t, out=output.write)
if fails > 0:
success = False
output.seek(0)
return success, output.read()
def check_doctests(module, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in docstrings of the module's public symbols.
Returns: list of [(item_name, success_flag, output), ...]
"""
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
# Loop over non-deprecated items
results = []
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except Exception:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def check_doctests_testfile(fname, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in a text file.
Mimic `check_doctests` above, differing mostly in test discovery.
(which is borrowed from stdlib's doctest.testfile here,
https://github.com/python-git/python/blob/master/Lib/doctest.py)
Returns: list of [(item_name, success_flag, output), ...]
Notes
-----
refguide can be signalled to skip testing code by adding
``#doctest: +SKIP`` to the end of the line. If the output varies or is
random, add ``# may vary`` or ``# random`` to the comment. for example
>>> plt.plot(...) # doctest: +SKIP
>>> random.randint(0,10)
5 # random
We also try to weed out pseudocode:
* We maintain a list of exceptions which signal pseudocode,
* We split the text file into "blocks" of code separated by empty lines
and/or intervening text.
* If a block contains a marker, the whole block is then assumed to be
pseudocode. It is then not being doctested.
The rationale is that typically, the text looks like this:
blah
<BLANKLINE>
>>> from numpy import some_module # pseudocode!
>>> func = some_module.some_function
>>> func(42) # still pseudocode
146
<BLANKLINE>
blah
<BLANKLINE>
>>> 2 + 3 # real code, doctest it
5
"""
results = []
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
_, short_name = os.path.split(fname)
if short_name in DOCTEST_SKIPLIST:
return results
full_name = fname
with open(fname, encoding='utf-8') as f:
text = f.read()
PSEUDOCODE = set(['some_function', 'some_module', 'import example',
'ctypes.CDLL', # likely need compiling, skip it
'integrate.nquad(func,' # ctypes integrate tutotial
])
# split the text into "blocks" and try to detect and omit pseudocode blocks.
parser = doctest.DocTestParser()
good_parts = []
for part in text.split('\n\n'):
tests = parser.get_doctest(part, ns, fname, fname, 0)
if any(word in ex.source for word in PSEUDOCODE
for ex in tests.examples):
# omit it
pass
else:
# `part` looks like a good code, let's doctest it
good_parts += [part]
# Reassemble the good bits and doctest them:
good_text = '\n\n'.join(good_parts)
tests = parser.get_doctest(good_text, ns, fname, fname, 0)
success, output = _run_doctests([tests], full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def init_matplotlib():
global HAVE_MATPLOTLIB
try:
import matplotlib
matplotlib.use('Agg')
HAVE_MATPLOTLIB = True
except ImportError:
HAVE_MATPLOTLIB = False
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=[],
nargs='*', help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true", help="Run also doctests")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
parser.add_argument("--skip-tutorial", action="store_true",
help="Skip running doctests in the tutorial.")
args = parser.parse_args(argv)
modules = []
names_dict = {}
if args.module_names:
args.skip_tutorial = True
else:
args.module_names = list(PUBLIC_SUBMODULES)
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in list(module_names):
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
dots = True
success = True
results = []
print("Running checks for %d modules:" % (len(modules),))
if args.doctests or not args.skip_tutorial:
init_matplotlib()
for module in modules:
if dots:
if module is not modules[0]:
sys.stderr.write(' ')
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others, module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
if not args.skip_tutorial:
base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
tut_path = os.path.join(base_dir, 'doc', 'source', 'tutorial', '*.rst')
print('\nChecking tutorial files at %s:' % os.path.relpath(tut_path, os.getcwd()))
for filename in sorted(glob.glob(tut_path)):
if dots:
sys.stderr.write('\n')
sys.stderr.write(os.path.split(filename)[1] + ' ')
sys.stderr.flush()
tut_results = check_doctests_testfile(filename, (args.verbose >= 2),
dots=dots, doctest_warnings=args.doctest_warnings)
def scratch(): pass # stub out a "module", see below
scratch.__name__ = filename
results.append((scratch, tut_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
all_success = True
for module, mod_results in results:
success = all(x[1] for x in mod_results)
all_success = all_success and success
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if all_success:
print("\nOK: refguide and doctests checks passed!")
sys.exit(0)
else:
print("\nERROR: refguide or doctests have errors")
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
|
arokem/scipy
|
tools/refguide_check.py
|
Python
|
bsd-3-clause
| 31,105
|
[
"Gaussian"
] |
1791233b9c16cb791859f0258c7990312d1ed13061920df7026c10d4cb14d33b
|
# -*- coding: iso-8859-15 -*-
import unittest
from mycroft.util.parse import normalize
from mycroft.util.parse import get_gender
from mycroft.util.parse import extractnumber
from mycroft.util.parse import extract_datetime
from datetime import datetime
class TestNormalize(unittest.TestCase):
def test_articles(self):
self.assertEqual(normalize("this is a test", remove_articles=True),
"this is test")
self.assertEqual(normalize("this is the test", remove_articles=True),
"this is test")
self.assertEqual(normalize("and another test", remove_articles=True),
"and another test")
self.assertEqual(normalize("this is an extra test",
remove_articles=False),
"this is an extra test")
def test_extractnumber(self):
self.assertEqual(extractnumber("this is the first test"), 1)
self.assertEqual(extractnumber("this is 2 test"), 2)
self.assertEqual(extractnumber("this is second test"), 2)
self.assertEqual(extractnumber("this is the third test"), 1.0/3.0)
self.assertEqual(extractnumber("this is test number 4"), 4)
self.assertEqual(extractnumber("one third of a cup"), 1.0/3.0)
self.assertEqual(extractnumber("three cups"), 3)
self.assertEqual(extractnumber("1/3 cups"), 1.0/3.0)
self.assertEqual(extractnumber("quarter cup"), 0.25)
self.assertEqual(extractnumber("1/4 cup"), 0.25)
self.assertEqual(extractnumber("one fourth cup"), 0.25)
self.assertEqual(extractnumber("2/3 cups"), 2.0/3.0)
self.assertEqual(extractnumber("3/4 cups"), 3.0/4.0)
self.assertEqual(extractnumber("1 and 3/4 cups"), 1.75)
self.assertEqual(extractnumber("1 cup and a half"), 1.5)
self.assertEqual(extractnumber("one cup and a half"), 1.5)
self.assertEqual(extractnumber("one and a half cups"), 1.5)
self.assertEqual(extractnumber("one and one half cups"), 1.5)
self.assertEqual(extractnumber("three quarter cups"), 3.0/4.0)
self.assertEqual(extractnumber("three quarters cups"), 3.0/4.0)
def test_extractdatetime_en(self):
def extractWithFormat(text):
date = datetime(2017, 06, 27, 00, 00)
[extractedDate, leftover] = extract_datetime(text, date)
extractedDate = extractedDate.strftime("%Y-%m-%d %H:%M:%S")
return [extractedDate, leftover]
def testExtract(text, expected_date, expected_leftover):
res = extractWithFormat(text)
self.assertEqual(res[0], expected_date)
self.assertEqual(res[1], expected_leftover)
testExtract("Set the ambush for 5 days from today",
"2017-07-02 00:00:00", "set ambush")
testExtract("What is the day after tomorrow's weather?",
"2017-06-29 00:00:00", "what is weather")
testExtract("Remind me at 10:45 pm",
"2017-06-27 22:45:00", "remind me")
testExtract("what is the weather on friday morning",
"2017-06-30 08:00:00", "what is weather")
testExtract("what is tomorrow's weather",
"2017-06-28 00:00:00", "what is weather")
testExtract("remind me to call mom in 8 weeks and 2 days",
"2017-08-24 00:00:00", "remind me to call mom")
testExtract("Play Rick Astley music 2 days from Friday",
"2017-07-02 00:00:00", "play rick astley music")
testExtract("Begin the invasion at 3:45 pm on Thursday",
"2017-06-29 15:45:00", "begin invasion")
testExtract("On Monday, order pie from the bakery",
"2017-07-03 00:00:00", "order pie from bakery")
testExtract("Play Happy Birthday music 5 years from today",
"2022-06-27 00:00:00", "play happy birthday music")
testExtract("Skype Mom at 12:45 pm next Thursday",
"2017-07-06 12:45:00", "skype mom")
testExtract("What's the weather next Thursday?",
"2017-07-06 00:00:00", "what weather")
testExtract("what is the weather next friday morning",
"2017-07-07 08:00:00", "what is weather")
testExtract("what is the weather next friday evening",
"2017-07-07 19:00:00", "what is weather")
testExtract("what is the weather next friday afternoon",
"2017-07-07 15:00:00", "what is weather")
testExtract("remind me to call mom on august 3rd",
"2017-08-03 00:00:00", "remind me to call mom")
testExtract("Buy fireworks on the 4th of July",
"2017-07-04 00:00:00", "buy fireworks")
testExtract("what is the weather 2 weeks from next friday",
"2017-07-21 00:00:00", "what is weather")
testExtract("what is the weather wednesday at 0700 hours",
"2017-06-28 07:00:00", "what is weather")
testExtract("what is the weather wednesday at 7 o'clock",
"2017-06-28 07:00:00", "what is weather")
testExtract("Set up an appointment at 12:45 pm next Thursday",
"2017-07-06 12:45:00", "set up appointment")
testExtract("What's the weather this Thursday?",
"2017-06-29 00:00:00", "what weather")
testExtract("set up the visit for 2 weeks and 6 days from Saturday",
"2017-07-21 00:00:00", "set up visit")
testExtract("Begin the invasion at 03 45 on Thursday",
"2017-06-29 03:45:00", "begin invasion")
testExtract("Begin the invasion at o 800 hours on Thursday",
"2017-06-29 08:00:00", "begin invasion")
testExtract("Begin the party at 8 o'clock in the evening on Thursday",
"2017-06-29 20:00:00", "begin party")
testExtract("Begin the invasion at 8 in the evening on Thursday",
"2017-06-29 20:00:00", "begin invasion")
testExtract("Begin the invasion on Thursday at noon",
"2017-06-29 12:00:00", "begin invasion")
testExtract("Begin the invasion on Thursday at midnight",
"2017-06-29 00:00:00", "begin invasion")
testExtract("Begin the invasion on Thursday at 0500",
"2017-06-29 05:00:00", "begin invasion")
testExtract("remind me to wake up in 4 years",
"2021-06-27 00:00:00", "remind me to wake up")
testExtract("remind me to wake up in 4 years and 4 days",
"2021-07-01 00:00:00", "remind me to wake up")
testExtract("What is the weather 3 days after tomorrow?",
"2017-07-01 00:00:00", "what is weather")
def test_spaces(self):
self.assertEqual(normalize(" this is a test"),
"this is test")
self.assertEqual(normalize(" this is a test "),
"this is test")
self.assertEqual(normalize(" this is one test"),
"this is 1 test")
def test_numbers(self):
self.assertEqual(normalize("this is a one two three test"),
"this is 1 2 3 test")
self.assertEqual(normalize(" it's a four five six test"),
"it is 4 5 6 test")
self.assertEqual(normalize("it's a seven eight nine test"),
"it is 7 8 9 test")
self.assertEqual(normalize("it's a seven eight nine test"),
"it is 7 8 9 test")
self.assertEqual(normalize("that's a ten eleven twelve test"),
"that is 10 11 12 test")
self.assertEqual(normalize("that's a thirteen fourteen test"),
"that is 13 14 test")
self.assertEqual(normalize("that's fifteen sixteen seventeen"),
"that is 15 16 17")
self.assertEqual(normalize("that's eighteen nineteen twenty"),
"that is 18 19 20")
def test_contractions(self):
self.assertEqual(normalize("ain't"), "is not")
self.assertEqual(normalize("aren't"), "are not")
self.assertEqual(normalize("can't"), "can not")
self.assertEqual(normalize("could've"), "could have")
self.assertEqual(normalize("couldn't"), "could not")
self.assertEqual(normalize("didn't"), "did not")
self.assertEqual(normalize("doesn't"), "does not")
self.assertEqual(normalize("don't"), "do not")
self.assertEqual(normalize("gonna"), "going to")
self.assertEqual(normalize("gotta"), "got to")
self.assertEqual(normalize("hadn't"), "had not")
self.assertEqual(normalize("hadn't have"), "had not have")
self.assertEqual(normalize("hasn't"), "has not")
self.assertEqual(normalize("haven't"), "have not")
# TODO: Ambiguous with "he had"
self.assertEqual(normalize("he'd"), "he would")
self.assertEqual(normalize("he'll"), "he will")
# TODO: Ambiguous with "he has"
self.assertEqual(normalize("he's"), "he is")
# TODO: Ambiguous with "how would"
self.assertEqual(normalize("how'd"), "how did")
self.assertEqual(normalize("how'll"), "how will")
# TODO: Ambiguous with "how has" and "how does"
self.assertEqual(normalize("how's"), "how is")
# TODO: Ambiguous with "I had"
self.assertEqual(normalize("I'd"), "I would")
self.assertEqual(normalize("I'll"), "I will")
self.assertEqual(normalize("I'm"), "I am")
self.assertEqual(normalize("I've"), "I have")
self.assertEqual(normalize("I haven't"), "I have not")
self.assertEqual(normalize("isn't"), "is not")
self.assertEqual(normalize("it'd"), "it would")
self.assertEqual(normalize("it'll"), "it will")
# TODO: Ambiguous with "it has"
self.assertEqual(normalize("it's"), "it is")
self.assertEqual(normalize("it isn't"), "it is not")
self.assertEqual(normalize("mightn't"), "might not")
self.assertEqual(normalize("might've"), "might have")
self.assertEqual(normalize("mustn't"), "must not")
self.assertEqual(normalize("mustn't have"), "must not have")
self.assertEqual(normalize("must've"), "must have")
self.assertEqual(normalize("needn't"), "need not")
self.assertEqual(normalize("oughtn't"), "ought not")
self.assertEqual(normalize("shan't"), "shall not")
# TODO: Ambiguous wiht "she had"
self.assertEqual(normalize("she'd"), "she would")
self.assertEqual(normalize("she hadn't"), "she had not")
self.assertEqual(normalize("she'll"), "she will")
self.assertEqual(normalize("she's"), "she is")
self.assertEqual(normalize("she isn't"), "she is not")
self.assertEqual(normalize("should've"), "should have")
self.assertEqual(normalize("shouldn't"), "should not")
self.assertEqual(normalize("shouldn't have"), "should not have")
self.assertEqual(normalize("somebody's"), "somebody is")
# TODO: Ambiguous with "someone had"
self.assertEqual(normalize("someone'd"), "someone would")
self.assertEqual(normalize("someone hadn't"), "someone had not")
self.assertEqual(normalize("someone'll"), "someone will")
# TODO: Ambiguous with "someone has"
self.assertEqual(normalize("someone's"), "someone is")
self.assertEqual(normalize("that'll"), "that will")
# TODO: Ambiguous with "that has"
self.assertEqual(normalize("that's"), "that is")
# TODO: Ambiguous with "that had"
self.assertEqual(normalize("that'd"), "that would")
# TODO: Ambiguous with "there had"
self.assertEqual(normalize("there'd"), "there would")
self.assertEqual(normalize("there're"), "there are")
# TODO: Ambiguous with "there has"
self.assertEqual(normalize("there's"), "there is")
# TODO: Ambiguous with "they had"
self.assertEqual(normalize("they'd"), "they would")
self.assertEqual(normalize("they'll"), "they will")
self.assertEqual(normalize("they won't have"), "they will not have")
self.assertEqual(normalize("they're"), "they are")
self.assertEqual(normalize("they've"), "they have")
self.assertEqual(normalize("they haven't"), "they have not")
self.assertEqual(normalize("wasn't"), "was not")
# TODO: Ambiguous wiht "we had"
self.assertEqual(normalize("we'd"), "we would")
self.assertEqual(normalize("we would've"), "we would have")
self.assertEqual(normalize("we wouldn't"), "we would not")
self.assertEqual(normalize("we wouldn't have"), "we would not have")
self.assertEqual(normalize("we'll"), "we will")
self.assertEqual(normalize("we won't have"), "we will not have")
self.assertEqual(normalize("we're"), "we are")
self.assertEqual(normalize("we've"), "we have")
self.assertEqual(normalize("weren't"), "were not")
self.assertEqual(normalize("what'd"), "what did")
self.assertEqual(normalize("what'll"), "what will")
self.assertEqual(normalize("what're"), "what are")
# TODO: Ambiguous with "what has" / "what does")
self.assertEqual(normalize("whats"), "what is")
self.assertEqual(normalize("what's"), "what is")
self.assertEqual(normalize("what've"), "what have")
# TODO: Ambiguous with "when has"
self.assertEqual(normalize("when's"), "when is")
self.assertEqual(normalize("where'd"), "where did")
# TODO: Ambiguous with "where has" / where does"
self.assertEqual(normalize("where's"), "where is")
self.assertEqual(normalize("where've"), "where have")
# TODO: Ambiguous with "who had" "who did")
self.assertEqual(normalize("who'd"), "who would")
self.assertEqual(normalize("who'd've"), "who would have")
self.assertEqual(normalize("who'll"), "who will")
self.assertEqual(normalize("who're"), "who are")
# TODO: Ambiguous with "who has" / "who does"
self.assertEqual(normalize("who's"), "who is")
self.assertEqual(normalize("who've"), "who have")
self.assertEqual(normalize("why'd"), "why did")
self.assertEqual(normalize("why're"), "why are")
# TODO: Ambiguous with "why has" / "why does"
self.assertEqual(normalize("why's"), "why is")
self.assertEqual(normalize("won't"), "will not")
self.assertEqual(normalize("won't've"), "will not have")
self.assertEqual(normalize("would've"), "would have")
self.assertEqual(normalize("wouldn't"), "would not")
self.assertEqual(normalize("wouldn't've"), "would not have")
self.assertEqual(normalize("ya'll"), "you all")
self.assertEqual(normalize("y'all"), "you all")
self.assertEqual(normalize("y'ain't"), "you are not")
# TODO: Ambiguous with "you had"
self.assertEqual(normalize("you'd"), "you would")
self.assertEqual(normalize("you'd've"), "you would have")
self.assertEqual(normalize("you'll"), "you will")
self.assertEqual(normalize("you're"), "you are")
self.assertEqual(normalize("you aren't"), "you are not")
self.assertEqual(normalize("you've"), "you have")
self.assertEqual(normalize("you haven't"), "you have not")
def test_combinations(self):
self.assertEqual(normalize("I couldn't have guessed there'd be two"),
"I could not have guessed there would be 2")
self.assertEqual(normalize("I wouldn't have"), "I would not have")
self.assertEqual(normalize("I hadn't been there"),
"I had not been there")
self.assertEqual(normalize("I would've"), "I would have")
self.assertEqual(normalize("it hadn't"), "it had not")
self.assertEqual(normalize("it hadn't have"), "it had not have")
self.assertEqual(normalize("it would've"), "it would have")
self.assertEqual(normalize("she wouldn't have"), "she would not have")
self.assertEqual(normalize("she would've"), "she would have")
self.assertEqual(normalize("someone wouldn't have"),
"someone would not have")
self.assertEqual(normalize("someone would've"), "someone would have")
self.assertEqual(normalize("what's the weather like"),
"what is weather like")
self.assertEqual(normalize("that's what I told you"),
"that is what I told you")
self.assertEqual(normalize("whats 8 + 4"), "what is 8 + 4")
def test_gender(self):
self.assertEqual(get_gender("person"),
False)
# Pt-pt
def test_articles_pt(self):
self.assertEqual(normalize(u"isto é o teste",
lang="pt", remove_articles=True),
u"isto teste")
self.assertEqual(
normalize(u"isto é a frase", lang="pt", remove_articles=True),
u"isto frase")
self.assertEqual(
normalize("e outro teste", lang="pt", remove_articles=True),
"outro teste")
self.assertEqual(normalize(u"isto é o teste extra", lang="pt",
remove_articles=False),
u"isto e o teste extra")
def test_extractnumber_pt(self):
self.assertEqual(extractnumber("isto e o primeiro teste", lang="pt"), 1)
self.assertEqual(extractnumber("isto e o 2 teste", lang="pt"), 2)
self.assertEqual(extractnumber("isto e o segundo teste", lang="pt"), 2)
self.assertEqual(extractnumber(u"isto e um terço de teste", lang="pt"),
1.0 / 3.0)
self.assertEqual(extractnumber("isto e o teste numero quatro",
lang="pt"), 4)
self.assertEqual(extractnumber(u"um terço de chavena", lang="pt"),
1.0 / 3.0)
self.assertEqual(extractnumber("3 canecos", lang="pt"), 3)
self.assertEqual(extractnumber("1/3 canecos", lang="pt"), 1.0 / 3.0)
self.assertEqual(extractnumber("quarto de hora", lang="pt"), 0.25)
self.assertEqual(extractnumber("1/4 hora", lang="pt"), 0.25)
self.assertEqual(extractnumber("um quarto hora", lang="pt"), 0.25)
self.assertEqual(extractnumber("2/3 pinga", lang="pt"), 2.0 / 3.0)
self.assertEqual(extractnumber("3/4 pinga", lang="pt"), 3.0 / 4.0)
self.assertEqual(extractnumber("1 e 3/4 cafe", lang="pt"), 1.75)
self.assertEqual(extractnumber("1 cafe e meio", lang="pt"), 1.5)
self.assertEqual(extractnumber("um cafe e um meio", lang="pt"), 1.5)
self.assertEqual(extractnumber("tres quartos de chocolate", lang="pt"),
3.0 / 4.0)
self.assertEqual(extractnumber(u"três quarto de chocolate",
lang="pt"), 3.0 / 4.0)
self.assertEqual(extractnumber("sete ponto cinco", lang="pt"), 7.5)
self.assertEqual(extractnumber("sete ponto 5", lang="pt"), 7.5)
self.assertEqual(extractnumber("sete e meio", lang="pt"), 7.5)
self.assertEqual(extractnumber("sete e oitenta", lang="pt"), 7.80)
self.assertEqual(extractnumber("sete e oito", lang="pt"), 7.8)
self.assertEqual(extractnumber("sete e zero oito",
lang="pt"), 7.08)
self.assertEqual(extractnumber("sete e zero zero oito",
lang="pt"), 7.008)
self.assertEqual(extractnumber("vinte treze avos", lang="pt"),
20.0 / 13.0)
self.assertEqual(extractnumber("seis virgula seiscentos e sessenta",
lang="pt"), 6.66)
self.assertEqual(extractnumber("seiscentos e sessenta e seis",
lang="pt"), 666)
self.assertEqual(extractnumber("seiscentos ponto zero seis",
lang="pt"), 600.06)
self.assertEqual(extractnumber("seiscentos ponto zero zero seis",
lang="pt"), 600.006)
self.assertEqual(extractnumber("seiscentos ponto zero zero zero seis",
lang="pt"), 600.0006)
def test_agressive_pruning_pt(self):
self.assertEqual(normalize("uma palavra", lang="pt"),
"1 palavra")
self.assertEqual(normalize("esta palavra um", lang="pt"),
"palavra 1")
self.assertEqual(normalize("o homem batia-lhe", lang="pt"),
"homem batia")
self.assertEqual(normalize("quem disse asneira nesse dia", lang="pt"),
"quem disse asneira dia")
def test_spaces_pt(self):
self.assertEqual(normalize(" isto e o teste", lang="pt"),
"isto teste")
self.assertEqual(normalize(" isto sao os testes ", lang="pt"),
"isto sao testes")
self.assertEqual(normalize(" isto e um teste", lang="pt",
remove_articles=False),
"isto e 1 teste")
def test_numbers_pt(self):
self.assertEqual(normalize(u"isto e o um dois três teste", lang="pt"),
u"isto 1 2 3 teste")
self.assertEqual(normalize(u"é a sete oito nove test", lang="pt"),
u"7 8 9 test")
self.assertEqual(
normalize("teste zero dez onze doze treze", lang="pt"),
"teste 0 10 11 12 13")
self.assertEqual(
normalize("teste mil seiscentos e sessenta e seis", lang="pt",
remove_articles=False),
"teste 1000 600 e 66")
self.assertEqual(
normalize("teste sete e meio", lang="pt",
remove_articles=False),
"teste 7 e meio")
self.assertEqual(
normalize("teste dois ponto nove", lang="pt"),
"teste 2 ponto 9")
self.assertEqual(
normalize("teste cento e nove", lang="pt",
remove_articles=False),
"teste 100 e 9")
self.assertEqual(
normalize("teste vinte e 1", lang="pt"),
"teste 20 1")
def test_extractdatetime_pt(self):
def extractWithFormat(text):
date = datetime(2017, 06, 27, 00, 00)
[extractedDate, leftover] = extract_datetime(text, date, lang="pt")
extractedDate = extractedDate.strftime("%Y-%m-%d %H:%M:%S")
return [extractedDate, leftover]
def testExtract(text, expected_date, expected_leftover):
res = extractWithFormat(text)
self.assertEqual(res[0], expected_date)
self.assertEqual(res[1], expected_leftover)
testExtract(u"que dia é hoje",
"2017-06-27 00:00:00", u"dia")
testExtract(u"que dia é amanha",
"2017-06-28 00:00:00", u"dia")
testExtract(u"que dia foi ontem",
"2017-06-26 00:00:00", u"dia")
testExtract(u"que dia foi antes de ontem",
"2017-06-25 00:00:00", u"dia")
testExtract(u"que dia foi ante ontem",
"2017-06-25 00:00:00", u"dia")
testExtract(u"que dia foi ante ante ontem",
"2017-06-24 00:00:00", u"dia")
testExtract("marca o jantar em 5 dias",
"2017-07-02 00:00:00", "marca jantar")
testExtract("como esta o tempo para o dia depois de amanha?",
"2017-06-29 00:00:00", "como tempo")
testExtract(u"lembra me ás 10:45 pm",
"2017-06-27 22:45:00", u"lembra")
testExtract("como esta o tempo na sexta de manha",
"2017-06-30 08:00:00", "como tempo")
testExtract(u"lembra me para ligar a mãe daqui a 8 semanas e 2 dias",
"2017-08-24 00:00:00", u"lembra ligar mae")
testExtract("Toca black metal 2 dias a seguir a sexta",
"2017-07-02 00:00:00", "toca black metal")
testExtract("Toca satanic black metal 2 dias para esta sexta",
"2017-07-02 00:00:00", "toca satanic black metal")
testExtract("Toca super black metal 2 dias a partir desta sexta",
"2017-07-02 00:00:00", "toca super black metal")
testExtract(u"Começa a invasão ás 3:45 pm de quinta feira",
"2017-06-29 15:45:00", "comeca invasao")
testExtract("na segunda, compra queijo",
"2017-07-03 00:00:00", "compra queijo")
testExtract(u"Toca os parabéns daqui a 5 anos",
"2022-06-27 00:00:00", "toca parabens")
testExtract(u"manda Skype a Mãe ás 12:45 pm próxima quinta",
"2017-06-29 12:45:00", "manda skype mae")
testExtract(u"como está o tempo esta sexta?",
"2017-06-30 00:00:00", "como tempo")
testExtract(u"como está o tempo esta sexta de tarde?",
"2017-06-30 15:00:00", "como tempo")
testExtract(u"como está o tempo esta sexta as tantas da manha?",
"2017-06-30 04:00:00", "como tempo")
testExtract(u"como está o tempo esta sexta a meia noite?",
"2017-06-30 00:00:00", "como tempo")
testExtract(u"como está o tempo esta sexta ao meio dia?",
"2017-06-30 12:00:00", "como tempo")
testExtract(u"como está o tempo esta sexta ao fim da tarde?",
"2017-06-30 19:00:00", "como tempo")
testExtract(u"como está o tempo esta sexta ao meio da manha?",
"2017-06-30 10:00:00", "como tempo")
testExtract("lembra me para ligar a mae no dia 3 de agosto",
"2017-08-03 00:00:00", "lembra ligar mae")
testExtract(u"compra facas no 13º dia de maio",
"2018-05-13 00:00:00", "compra facas")
testExtract(u"gasta dinheiro no maio dia 13",
"2018-05-13 00:00:00", "gasta dinheiro")
testExtract(u"compra velas a maio 13",
"2018-05-13 00:00:00", "compra velas")
testExtract(u"bebe cerveja a 13 maio",
"2018-05-13 00:00:00", "bebe cerveja")
testExtract("como esta o tempo 1 dia a seguir a amanha",
"2017-06-29 00:00:00", "como tempo")
testExtract(u"como esta o tempo ás 0700 horas",
"2017-06-27 07:00:00", "como tempo")
testExtract(u"como esta o tempo amanha ás 7 em ponto",
"2017-06-28 07:00:00", "como tempo")
testExtract(u"como esta o tempo amanha pelas 2 da tarde",
"2017-06-28 14:00:00", "como tempo")
testExtract(u"como esta o tempo amanha pelas 2",
"2017-06-28 02:00:00", "como tempo")
testExtract(u"como esta o tempo pelas 2 da tarde da proxima sexta",
"2017-06-30 14:00:00", "como tempo")
testExtract("lembra-me de acordar em 4 anos",
"2021-06-27 00:00:00", "lembra acordar")
testExtract("lembra-me de acordar em 4 anos e 4 dias",
"2021-07-01 00:00:00", "lembra acordar")
testExtract("dorme 3 dias depois de amanha",
"2017-07-02 00:00:00", "dorme")
testExtract("marca consulta para 2 semanas e 6 dias depois de Sabado",
"2017-07-21 00:00:00", "marca consulta")
testExtract(u"começa a festa ás 8 em ponto da noite de quinta",
"2017-06-29 20:00:00", "comeca festa")
def test_gender_pt(self):
self.assertEqual(get_gender("vaca", lang="pt"),
"f")
self.assertEqual(get_gender("cavalo", lang="pt"),
"m")
self.assertEqual(get_gender("vacas", lang="pt"),
"f")
self.assertEqual(get_gender("boi", "o boi come erva", lang="pt"),
"m")
self.assertEqual(get_gender("boi", lang="pt"),
False)
self.assertEqual(get_gender("homem", "estes homem come merda",
lang="pt"),
"m")
self.assertEqual(get_gender("ponte", lang="pt"),
"m")
self.assertEqual(get_gender("ponte", "essa ponte caiu",
lang="pt"),
"f")
#
# Spanish
#
def test_articles_es(self):
self.assertEqual(normalize("esta es la prueba", lang="es",
remove_articles=True),
"esta es prueba")
self.assertEqual(normalize("y otra prueba", lang="es",
remove_articles=True),
"y otra prueba")
def test_numbers_es(self):
self.assertEqual(normalize("esto es un uno una", lang="es"),
"esto es 1 1 1")
self.assertEqual(normalize("esto es dos tres prueba", lang="es"),
"esto es 2 3 prueba")
self.assertEqual(normalize("esto es cuatro cinco seis prueba",
lang="es"),
"esto es 4 5 6 prueba")
self.assertEqual(normalize("siete más ocho más nueve", lang="es"),
"7 más 8 más 9")
self.assertEqual(normalize("diez once doce trece catorce quince",
lang="es"),
"10 11 12 13 14 15")
self.assertEqual(normalize(u"dieciséis diecisiete", lang="es"),
"16 17")
self.assertEqual(normalize(u"dieciocho diecinueve", lang="es"),
"18 19")
self.assertEqual(normalize(u"veinte treinta cuarenta", lang="es"),
"20 30 40")
self.assertEqual(normalize(u"treinta y dos caballos", lang="es"),
"32 caballos")
self.assertEqual(normalize(u"cien caballos", lang="es"),
"100 caballos")
self.assertEqual(normalize(u"ciento once caballos", lang="es"),
"111 caballos")
self.assertEqual(normalize(u"habÃa cuatrocientas una vacas",
lang="es"),
u"habÃa 401 vacas")
self.assertEqual(normalize(u"dos mil", lang="es"),
"2000")
self.assertEqual(normalize(u"dos mil trescientas cuarenta y cinco",
lang="es"),
"2345")
self.assertEqual(normalize(
u"ciento veintitrés mil cuatrocientas cincuenta y seis",
lang="es"),
"123456")
self.assertEqual(normalize(
u"quinientas veinticinco mil", lang="es"),
"525000")
self.assertEqual(normalize(
u"novecientos noventa y nueve mil novecientos noventa y nueve",
lang="es"),
"999999")
if __name__ == "__main__":
unittest.main()
|
JarbasAI/JarbasAI
|
test/unittests/util/test_parse.py
|
Python
|
gpl-3.0
| 31,587
|
[
"VisIt"
] |
a7957734f114462809d704a554a0a46cb24e6f129a56557f34abb57214697f66
|
"""Script for building StarFusion indices using FusionFilter."""
import argparse
import gzip
import logging
from pathlib import Path
import shutil
import subprocess
FORMAT = "[%(asctime)-15s] %(message)s"
logging.basicConfig(format=FORMAT, level=logging.INFO)
def main():
"""Main function."""
args = parse_args()
# Create output and temp dirs.
tmp_dir = args.output_dir / '_tmp'
tmp_dir.mkdir(parents=True, exist_ok=False)
# Filter the patch chromosomes from the gtf, as these are
# likely not present in the fasta. Note we also filter rows without
# a transcript_id, as these seem to be problematic for STAR-Fusion.
logging.info('- Filtering GTF file')
gtf_path = tmp_dir / 'ref.gtf'
tmp_gtf_path = gtf_path.with_suffix('.gtf.tmp')
with tmp_gtf_path.open('wb') as file_:
check_call(
['grep', '-v', r'^\(MG\|JH\|GL\)', str(args.gtf)], stdout=file_)
with gtf_path.open('wb') as file_:
check_call(['grep', 'transcript_id', str(tmp_gtf_path)], stdout=file_)
tmp_gtf_path.unlink()
# Create cDNA_seqs file.
logging.info('- Generating cDNA sequences')
cdna_path = tmp_dir / 'cDNA_seqs.fa'
with cdna_path.open('wb') as file_:
script_path = args.ff_path / 'util' / 'gtf_file_to_cDNA_seqs.pl'
check_call(
['perl', str(script_path), str(gtf_path), str(args.fasta)],
stdout=file_)
# Build masked cDNA_seqs file using RepeatMasker.
# Note: requires library to be installed from http://www.girinst.org.
logging.info('- Masking repeats')
masked_path = cdna_path.with_suffix('.fa.masked')
check_call([
str(args.rm_path / 'RepeatMasker'), '-pa', str(args.threads), '-s',
'-species', 'mouse', '-xsmall', str(cdna_path)
])
# Create blastpairs.
logging.info('- Creating blast pairs')
check_call(['makeblastdb', '-in', str(masked_path), '-dbtype', 'nucl'])
pair_path = tmp_dir / 'blast_pairs.outfmt6'
with pair_path.open('wb') as file_:
check_call(
[
'blastn', '-query', str(cdna_path), '-db', str(masked_path),
'-max_target_seqs', '10000', '-outfmt', '6', '-evalue', '1e-3',
'-lcase_masking', '-num_threads', str(args.threads),
'-word_size', '11'
],
stdout=file_)
pair_gz_path = pair_path.with_suffix('.gene_syms.outfmt6.gz')
with gzip.open(str(pair_gz_path), 'wb') as file_:
script_path = (args.ff_path / 'util' /
'blast_outfmt6_replace_trans_id_w_gene_symbol.pl')
check_call(
['perl', str(script_path), str(cdna_path), str(pair_path)],
stdout=file_)
# Prepare library.
logging.info('- Preparing library')
script_path = args.ff_path / 'prep_genome_lib.pl'
check_call([
'perl', str(script_path), '--genome_fa', str(args.fasta), '--gtf',
str(gtf_path), '--blast_pairs', str(pair_gz_path), '--cdna_fa',
str(cdna_path), '--CPU', str(args.threads), '--max_readlength',
str(args.read_length), '--output_dir', str(args.output_dir)
])
shutil.rmtree(str(args.tmp_dir))
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--fasta', required=True, type=Path)
parser.add_argument('--gtf', required=True, type=Path)
parser.add_argument('--output_dir', required=True, type=Path)
parser.add_argument('--ff_path', required=False, default='', type=Path)
parser.add_argument('--rm_path', required=False, default='', type=Path)
parser.add_argument('--threads', type=int, default=1)
parser.add_argument('--read_length', type=int, default=100)
return parser.parse_args()
def check_call(args, verbose=True, **kwargs):
"""Wrapper function for check_call."""
if verbose:
logging.info('Running command - "%s"', ' '.join(args))
subprocess.check_call(args, **kwargs)
if __name__ == '__main__':
main()
|
NKI-CCB/imfusion
|
scripts/starfusion_build_reference.py
|
Python
|
mit
| 4,048
|
[
"BLAST"
] |
c46c7785ad43d8e0e45c76f4f2c596c5f8c3868238b8149967e9ee06c355dc4d
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with functions for Psi4/Cfour interface. Portions that require
calls to Boost Python psi4 module are here, otherwise in qcdb module.
Also calls to qcdb module are here and not elsewhere in driver.
Organizationally, this module isolates qcdb code from psi4 code.
"""
from __future__ import print_function
from __future__ import absolute_import
import shutil
import os
import subprocess
import re
import inspect
import glob
import shelve
import datetime
import difflib
from psi4.driver.p4util.exceptions import *
def run_cfour_module(xmod):
# Find environment by merging PSIPATH and PATH environment variables
lenv = {
'PATH': ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) + \
':' + os.environ.get('PATH') + \
':' + core.Process.environment["PSIDATADIR"] + '/basis' + \
':' + core.psi_top_srcdir() + '/share/basis',
'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH')
}
# Filter out None values as subprocess will fault on them
lenv = {k: v for k, v in lenv.items() if v is not None}
# Call executable xcfour, directing cfour output to the psi4 output file
try:
retcode = subprocess.Popen([xmod], bufsize=0, stdout=subprocess.PIPE, env=lenv)
except OSError as e:
sys.stderr.write('Program %s not found in path or execution failed: %s\n' % (cfour_executable, e.strerror))
#p4out.write('Program %s not found in path or execution failed: %s\n' % (cfour_executable, e.strerror))
message = ('Program %s not found in path or execution failed: %s\n' % (cfour_executable, e.strerror))
raise ValidationError(message)
c4out = ''
while True:
data = retcode.stdout.readline()
if not data:
break
#if core.outfile_name() == 'stdout':
# sys.stdout.write(data)
#else:
# p4out.write(data)
# p4out.flush()
c4out += data
#internal_p4c4_info['output'] = c4out
return c4out
def vpt2(name, **kwargs):
"""Perform vibrational second-order perturbation computation through
Cfour to get anharmonic frequencies. This version uses c4 for the disp
and pt2 but gets gradients from p4.
:type c4full: :ref:`boolean <op_py_boolean>`
:param c4full: ``'on'`` || |dl| ``'off'`` |dr|
Indicates whether when *name* indicates a Cfour method and *mode*
indicates a sow/reap approach, sown files are direct ZMAT files
and FJOBARC files are expected to reap, so that Cfour only, not
Cfour-through-Psi4, is needed for distributed jobs.
.. caution:: Some features are not yet implemented. Buy a developer a coffee.
- Presently uses all gradients. Could mix in analytic 2nd-derivs.
- Collect resutls.
- Manage scratch / subdir better.
- Untangle CCSD(T) vs CCSD[T] and FJOBARC issue
- Allow CFOUR_BASIS
- Consider forcing some tighter convcrit, c4 and p4
- sow/reap
- mixed ang/bohr signals
- error by converting to ang in psi?
- Expand CURRENT DIPOLE XYZ beyond SCF
- Remember additional FJOBARC record TOTENER2 if EXCITE .ne. NONE
- S/R P4grad
- S/R C4grad
- C P4grad
- C C4grad
- switch C --> S/R with recovery using shelf
- pure C mode where only need P4 for wrapper
"""
lowername = name.lower()
kwargs = p4util.kwargs_lower(kwargs)
optstash = p4util.OptionsState(
['BASIS'])
# Option mode of operation- whether vpt2 run in one job or files farmed out
if not('vpt2_mode' in kwargs):
if ('mode' in kwargs):
kwargs['vpt2_mode'] = kwargs['mode']
del kwargs['mode']
else:
kwargs['vpt2_mode'] = 'continuous'
# Switches for route through code- S/R or continuous & Psi4 or Cfour gradients
isSowReap = True if kwargs['vpt2_mode'].lower() == 'sowreap' else False
isC4notP4 = bool(re.match('cfour', lowername)) or bool(re.match('c4-', lowername))
isC4fully = True if ('c4full' in kwargs and yes.match(str(kwargs['c4full'])) and isC4notP4 and isSowReap) else False
# Save submission directory and basis set
current_directory = os.getcwd()
user_basis = core.get_global_option('BASIS')
# Open data persistence shelf- vital for sowreap, checkpoint for continuouw
shelf = shelve.open(current_directory + '/' + os.path.splitext(core.outfile_name())[0] + '.shelf', writeback=True)
# Cfour keywords to request vpt2 analysis through findif gradients
core.set_local_option('CFOUR', 'CFOUR_VIBRATION', 'FINDIF')
core.set_local_option('CFOUR', 'CFOUR_FREQ_ALGORITHM', 'PARALLEL')
core.set_local_option('CFOUR', 'CFOUR_ANH_ALGORITHM', 'PARALLEL')
core.set_local_option('CFOUR', 'CFOUR_ANHARMONIC', 'VPT2')
core.set_local_option('CFOUR', 'CFOUR_FD_PROJECT', 'OFF')
# When a Psi4 method is requested for vpt2, a skeleton of
# computations in Cfour is still required to hang the gradients
# upon. The skeleton is as cheap as possible (integrals only
# & sto-3g) and set up here.
if isC4notP4:
skelname = lowername
else:
skelname = 'c4-scf'
core.set_global_option('BASIS', 'STO-3G')
# P4 'c4-scf'/'cfour'CALC_LEVEL lowername # temporary
# C4 lowername cfour{} # temporary
if 'status' not in shelf:
shelf['status'] = 'initialized'
shelf['linkage'] = os.getpid()
shelf['zmat'] = {} # Cfour-generated ZMAT files with finite difference geometries
shelf['fjobarc'] = {} # Cfour- or Psi4-generated ascii files with packaged gradient results
shelf.sync()
else:
pass
# how decide whether to use. keep precedent of intco.dat in mind
# Construct and move into directory job scratch / cfour scratch / harm
psioh = core.IOManager.shared_object()
psio = core.IO.shared_object()
os.chdir(psioh.get_default_path()) # psi_scratch
cfour_tmpdir = kwargs['path'] if 'path' in kwargs else \
'psi.' + str(os.getpid()) + '.' + psio.get_default_namespace() + \
'.cfour.' + str(uuid.uuid4())[:8]
if not os.path.exists(cfour_tmpdir):
os.mkdir(cfour_tmpdir)
os.chdir(cfour_tmpdir) # psi_scratch/cfour
if not os.path.exists('harm'):
os.mkdir('harm')
os.chdir('harm') # psi_scratch/cfour/harm
psioh.set_specific_retention(32, True) # temporary, to track p4 scratch
#shelf['status'] = 'anharm_jobs_sown' # temporary to force backtrack
print('STAT', shelf['status']) # temporary
# Generate the ZMAT input file in scratch
with open('ZMAT', 'w') as handle:
cfour_infile = write_zmat(skelname, 1)
handle.write(cfour_infile)
print('\n====== Begin ZMAT input for CFOUR ======')
print(open('ZMAT', 'r').read())
print('======= End ZMAT input for CFOUR =======\n')
shelf['genbas'] = open('GENBAS', 'r').read()
# Check existing shelf consistent with generated ZMAT, store
if ('000-000' in shelf['zmat']) and (shelf['zmat']['000-000'] != cfour_infile):
diff = difflib.Differ().compare(shelf['zmat']['000-000'].splitlines(), cfour_infile.splitlines())
raise ValidationError("""Input file translated to Cfour ZMAT does not match ZMAT stored in shelf.\n\n""" +
'\n'.join(list(diff)))
shelf['zmat']['000-000'] = cfour_infile
shelf.sync()
# Reset basis after Cfour skeleton seeded
core.set_global_option('BASIS', user_basis)
if shelf['status'] == 'initialized':
p4util.banner(' VPT2 Setup: Harmonic ')
# Generate the displacements that will form the harmonic freq
os.chdir(psioh.get_default_path() + cfour_tmpdir + '/harm') # psi_scratch/cfour/harm
with open('partial.out', 'w') as handle:
handle.write(run_cfour_module('xjoda'))
handle.write(run_cfour_module('xsymcor'))
# Read the displacements that will form the harmonic freq
zmats0N = ['000-' + item[-3:] for item in sorted(glob.glob('zmat*'))]
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
with open('zmat' + zm2, 'r') as handle:
shelf['zmat'][zm12] = handle.read()
shelf.sync()
core.print_out(' CFOUR scratch file %s for %s-%s has been read\n' % ('zmat' + zm2, zm1, zm2))
core.print_out('%s\n' % shelf['zmat'][zm12])
# S/R: Write distributed input files for harmonic freq
if isSowReap:
os.chdir(current_directory)
inputSansMol = p4util.format_currentstate_for_input(gradient, lowername, allButMol=True, **kwargs)
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
ifile = vpt2_sow_files(zm12, shelf['linkage'], isC4notP4, isC4fully,
shelf['zmat'][zm12], inputSansMol, shelf['genbas'])
with open('VPT2-' + zm12 + '.in', 'w') as handle:
handle.write(ifile)
msg = vpt2_instructions('harmonic', current_directory, zmats0N)
core.print_out(msg)
print(msg)
shelf['status'] = 'harm_jobs_sown'
# S/R: Pause for distributed calculations
if isSowReap:
shelf.close()
return 0.0
if shelf['status'] == 'harm_jobs_sown':
zmats0N = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] == '000' and item[-3:] != '000')]
# S/R: Check that distributed calcs all completed correctly
if isSowReap:
msg = vpt2_instructions('harmonic', current_directory, zmats0N)
core.print_out(msg)
isOk, msg = sown_jobs_status(current_directory, 'VPT2', zmats0N, reap_job_validate,
shelf['linkage'], ['CURRENT ENERGY', 'CURRENT DIPOLE', 'CURRENT GRADIENT'])
core.print_out(msg)
print(msg)
if not isOk:
shelf.close()
return 0.0
# Collect all results from gradients forming the harmonic freq
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
if zm12 not in shelf['fjobarc']:
p4util.banner(' VPT2 Computation: %s ' % (zm12))
print(' VPT2 Computation: %s ' % (zm12))
fjobarc = vpt2_reaprun_files(zm12, shelf['linkage'], isSowReap, isC4notP4, isC4fully,
shelf['zmat'][zm12], current_directory, psioh.get_default_path(), cfour_tmpdir,
lowername, kwargs)
shelf['fjobarc'][zm12] = fjobarc
shelf.sync()
shelf['status'] = 'harm_jobs_reaped'
if shelf['status'] == 'harm_jobs_reaped':
zmats0N = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] == '000' and item[-3:] != '000')]
p4util.banner(' VPT2 Results: Harmonic ')
# Process the gradients into harmonic freq
os.chdir(psioh.get_default_path() + cfour_tmpdir + '/harm') # psi_scratch/cfour/harm
harmout = run_cfour_module('xjoda')
harmout += run_cfour_module('xsymcor')
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
with open('FJOBARC', 'w') as handle:
handle.write(shelf['fjobarc'][zm12])
harmout += run_cfour_module('xja2fja')
harmout += run_cfour_module('xsymcor')
shutil.move('FJOBARC', 'fja.' + zm12)
try:
os.remove('zmat' + zm2)
except OSError:
pass
harmout += run_cfour_module('xjoda')
harmout += run_cfour_module('xcubic')
core.print_out(harmout)
with open('harm.out', 'w') as handle:
handle.write(harmout)
# Generate displacements along harmonic normal modes
zmatsN0 = [item[-3:] for item in sorted(glob.glob('zmat*'))]
os.chdir('..') # psi_scratch/cfour
for zm1 in zmatsN0:
zm12 = zm1 + '-000'
with open(psioh.get_default_path() + cfour_tmpdir + '/harm/zmat' + zm1, 'r') as handle:
shelf['zmat'][zm12] = handle.read()
shelf.sync()
core.print_out(' CFOUR scratch file %s for %s has been read\n' % ('zmat' + zm1, zm12))
core.print_out('%s\n' % shelf['zmat'][zm12])
# Collect displacements along the normal coordinates generated by the harmonic freq.
# Further harmonic freqs are to be run at each of these to produce quartic force field.
# To carry these out, generate displacements for findif by gradient at each displacement.
if os.path.exists(zm1):
shutil.rmtree(zm1)
os.mkdir(zm1)
os.chdir(zm1) # psi_scratch/cfour/004
with open('ZMAT', 'w') as handle:
handle.write(shelf['zmat'][zm12])
shutil.copy2('../harm/GENBAS', 'GENBAS') # ln -s $ecpdir/ECPDATA $j/ECPDATA
with open('partial.out', 'w') as handle:
handle.write(run_cfour_module('xjoda'))
handle.write(run_cfour_module('xsymcor'))
# Read the displacements that will form the anharmonic freq
zmatsNN = [item[-3:] for item in sorted(glob.glob('zmat*'))]
for zm2 in zmatsNN:
zm12 = zm1 + '-' + zm2
with open(psioh.get_default_path() + cfour_tmpdir + '/' + zm1 + '/zmat' + zm2, 'r') as handle:
shelf['zmat'][zm12] = handle.read()
shelf.sync()
core.print_out(' CFOUR scratch file %s for %s has been read\n' % ('zmat' + zm2, zm12))
core.print_out('%s\n' % shelf['zmat'][zm12])
os.chdir('..') # psi_scratch/cfour
zmatsNN = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] != '000' and item[-3:] != '000')]
# S/R: Write distributed input files for anharmonic freq
if isSowReap:
os.chdir(current_directory)
inputSansMol = p4util.format_currentstate_for_input(gradient, lowername, allButMol=True, **kwargs)
for zm12 in zmatsNN:
zm1, zm2 = zm12.split('-')
ifile = vpt2_sow_files(zm12, shelf['linkage'], isC4notP4, isC4fully,
shelf['zmat'][zm12], inputSansMol, shelf['genbas'])
# GENBAS needed here
with open('VPT2-' + zm12 + '.in', 'w') as handle:
handle.write(ifile)
msg = vpt2_instructions('anharmonic', current_directory, zmatsNN)
core.print_out(msg)
print(msg)
shelf['status'] = 'anharm_jobs_sown'
# S/R: Pause for distributed calculations
if isSowReap:
shelf.close()
return 0.0
if shelf['status'] == 'anharm_jobs_sown':
zmatsNN = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] != '000' and item[-3:] != '000')]
# S/R: Check that distributed calcs all completed correctly
if isSowReap:
msg = vpt2_instructions('anharmonic', current_directory, zmatsNN)
core.print_out(msg)
isOk, msg = sown_jobs_status(current_directory, 'VPT2', zmatsNN,
reap_job_validate, shelf['linkage'],
['CURRENT ENERGY', 'CURRENT DIPOLE', 'CURRENT GRADIENT'])
core.print_out(msg)
print(msg)
if not isOk:
shelf.close()
return 0.0
# Collect all results from gradients forming the anharmonic freq
for zm12 in zmatsNN:
zm1, zm2 = zm12.split('-')
if zm12 not in shelf['fjobarc']:
p4util.banner(' VPT2 Computation: %s ' % (zm12))
print(' VPT2 Computation: %s ' % (zm12))
fjobarc = vpt2_reaprun_files(zm12, shelf['linkage'], isSowReap, isC4notP4, isC4fully,
shelf['zmat'][zm12], current_directory, psioh.get_default_path(), cfour_tmpdir,
lowername, kwargs)
shelf['fjobarc'][zm12] = fjobarc
shelf.sync()
shelf['status'] = 'anharm_jobs_reaped'
if shelf['status'] == 'anharm_jobs_reaped':
zmats0N = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] == '000' and item[-3:] != '000')]
zmatsN0 = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] != '000' and item[-3:] == '000')]
zmatsNN = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] != '000' and item[-3:] != '000')]
p4util.banner(' VPT2 Results: Harmonic ')
# Process the gradients into harmonic freq
os.chdir(psioh.get_default_path() + cfour_tmpdir) # psi_scratch/cfour
if os.path.exists('anharm'):
shutil.rmtree('anharm')
os.mkdir('anharm')
os.chdir('harm') # psi_scratch/cfour/harm
run_cfour_module('xclean')
anharmout = run_cfour_module('xjoda')
anharmout += run_cfour_module('xsymcor')
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
with open('FJOBARC', 'w') as handle:
handle.write(shelf['fjobarc'][zm12])
anharmout += run_cfour_module('xja2fja')
anharmout += run_cfour_module('xsymcor')
shutil.move('FJOBARC', 'fja.' + zm12)
anharmout += run_cfour_module('xjoda')
anharmout += run_cfour_module('xcubic')
core.print_out(anharmout)
with open('harm.out', 'w') as handle:
handle.write(anharmout)
# Process the gradients into harmonic freq at each normco displaced point
os.chdir('..') # psi_scratch/cfour
for zm11 in zmatsN0:
zm1 = zm11[:3]
if os.path.exists(zm1):
shutil.rmtree(zm1)
os.mkdir(zm1)
os.chdir(zm1) # psi_scratch/cfour/004
run_cfour_module('xclean')
with open('ZMAT', 'w') as handle:
handle.write(shelf['zmat'][zm11])
shutil.copy2('../harm/GENBAS', 'GENBAS')
anharmout = run_cfour_module('xjoda')
anharmout += run_cfour_module('xsymcor')
for zm22 in [item for item in zmatsNN if (item[:3] == zm1 and item[-3:] != '000')]:
zm2 = zm22[-3:]
zm12 = zm1 + '-' + zm2
print(zm12)
with open('FJOBARC', 'w') as handle:
handle.write(shelf['fjobarc'][zm12])
anharmout += run_cfour_module('xja2fja')
anharmout += run_cfour_module('xsymcor')
shutil.move('FJOBARC', 'fja.' + zm12)
anharmout += run_cfour_module('xjoda')
anharmout += run_cfour_module('xja2fja')
with open('FJOBARC', 'r') as handle:
shelf['fjobarc'][zm11] = handle.read()
shelf.sync()
core.print_out(anharmout)
with open('partial.out', 'w') as handle:
handle.write(anharmout)
os.chdir('..') # psi_scratch/cfour
# Process the harmonic freqs at normco displacements into anharmonic freq
p4util.banner(' VPT2 Results: Anharmonic ')
os.chdir('anharm') # psi_scratch/cfour/anharm
shutil.copy2('../harm/JOBARC', 'JOBARC')
shutil.copy2('../harm/JAINDX', 'JAINDX')
for zm12 in zmatsN0:
with open('FJOBARC', 'w') as handle:
handle.write(shelf['fjobarc'][zm12])
anharmout = run_cfour_module('xja2fja')
anharmout += run_cfour_module('xcubic')
shutil.move('FJOBARC', 'fja.' + zm12)
core.print_out(anharmout)
with open('anharm.out', 'w') as handle:
handle.write(anharmout)
shelf['status'] = 'vpt2_completed'
# Finish up
os.chdir(current_directory)
shelf.close()
optstash.restore()
def vpt2_sow_files(item, linkage, isC4notP4, isC4fully, zmat, inputSansMol, inputGenbas):
"""Provided with the particular displacement number *item* and the
associated *zmat* file contents and *linkage*, and common contents
*inputSansMol*, returns contents of input file to be sown.
"""
inputReapOrders = r"""
print_variables()
print_out('VPT2 RESULT: linkage {0} for item {1} yields CURRENT ENERGY being %r\n' % (get_variable('CURRENT ENERGY')))
print_out('VPT2 RESULT: linkage {0} for item {1} yields CURRENT GRADIENT being %r\n' % (p4util.mat2arr(core.get_gradient())))
print_out('VPT2 RESULT: linkage {0} for item {1} yields CURRENT DIPOLE being [%r, %r, %r]\n' % (get_variable('CURRENT DIPOLE X'), get_variable('CURRENT DIPOLE Y'), get_variable('CURRENT DIPOLE Z')))
""".format(linkage, item)
# Direct Cfour for gradients
if isC4fully:
inputString = zmat
with open('VPT2-GENBAS', 'w') as handle:
handle.write(inputGenbas)
# Cfour for gradients
elif isC4notP4:
# GENBAS needed here
inputString = 'extracted_genbas = """\n' + inputGenbas.replace('\n\n', '\nblankline\n') + '\n"""\n\n'
inputString += """cfour {\n%s\n}\n\nenergy('cfour', genbas=extracted_genbas)\n\n""" % (zmat)
inputString += inputReapOrders
inputString += r"""
print_out('VPT2 RESULT: linkage {0} for item {1} yields CURRENT MOLECULE being %r\n' % (get_active_molecule().create_psi4_string_from_molecule()))
""".format(linkage, item)
# Psi4 for gradients
else:
inputString = p4util.format_molecule_for_input(
qcdb.cfour.harvest_zmat(zmat).create_psi4_string_from_molecule(),
name='disp' + item[:3] + item[-3:])
inputString += inputSansMol
inputString += inputReapOrders
return inputString
def vpt2_reaprun_files(item, linkage, isSowReap, isC4notP4, isC4fully, zmat, outdir, scrdir, c4scrdir, lowername, kwargs):
"""Provided with the particular displacement number *item* and the
associated *zmat* file with geometry and *linkage*, returns the
FJOBARC contents. Depending on the mode settings of *isC4notP4*,
*isSowReap*, and *isC4fully*, either runs (using *lowername* and
*kwargs*) or reaps contents. *outdir* is where psi4 was invoked,
*scrdir* is the psi4 scratch directory, and *c4scrdir* is Cfour
scratch directory within.
"""
os.chdir(outdir) # current_directory
# Extract qcdb.Molecule at findif orientation
zmmol = qcdb.cfour.harvest_zmat(zmat)
# Cfour S/R Direct for gradients
if isC4fully:
with open('VPT2-' + item + '.fja', 'r') as handle:
fjobarc = handle.read()
# Cfour for gradients
elif isC4notP4:
# S/R: Reap results from output file
if isSowReap:
isOk, msg, results = reap_job_validate(outdir, 'VPT2', item, linkage,
['CURRENT ENERGY', 'CURRENT DIPOLE', 'CURRENT GRADIENT', 'CURRENT MOLECULE'])
if not isOk:
raise ValidationError(msg)
fje = results['CURRENT ENERGY']
fjgrd = results['CURRENT GRADIENT']
fjdip = [item / constants.dipmom_au2debye for item in results['CURRENT DIPOLE']]
c4mol = qcdb.Molecule(results['CURRENT MOLECULE'])
c4mol.update_geometry()
# C: Run the job and collect results
else:
# Prepare Cfour skeleton calc directory
os.chdir(scrdir + c4scrdir) # psi_scratch/cfour
if os.path.exists('scr.' + item):
shutil.rmtree('scr.' + item)
os.mkdir('scr.' + item)
os.chdir('scr.' + item) # psi_scratch/cfour/scr.000-004
with open('ZMAT', 'w') as handle:
handle.write(zmat)
shutil.copy2('../harm/GENBAS', 'GENBAS')
#os.chdir(scrdir + '/scr.' + item)
#run_cfour_module('xja2fja')
#with open('FJOBARC', 'r') as handle:
# fjobarc = handle.read()
# Run Cfour calc using ZMAT & GENBAS in scratch, outdir redirects to outfile
os.chdir(outdir) # current_directory
core.get_active_molecule().set_name('blank_molecule_psi4_yo')
energy('cfour', path=c4scrdir + '/scr.' + item)
# os.chdir(scrdir + '/scr.' + item)
fje = core.get_variable('CURRENT ENERGY')
fjgrd = p4util.mat2arr(core.get_gradient())
fjdip = [core.get_variable('CURRENT DIPOLE X') / constants.dipmom_au2debye,
core.get_variable('CURRENT DIPOLE Y') / constants.dipmom_au2debye,
core.get_variable('CURRENT DIPOLE Z') / constants.dipmom_au2debye]
c4mol = qcdb.Molecule(core.get_active_molecule().create_psi4_string_from_molecule())
c4mol.update_geometry()
# Get map btwn ZMAT and C4 orientation, then use it, grad and dipole to forge FJOBARC file
fjobarc = qcdb.cfour.format_fjobarc(fje,
*qcdb.cfour.backtransform(chgeMol=zmmol, permMol=c4mol), gradient=fjgrd, dipole=fjdip)
# Psi4 for gradients
else:
# Prepare Cfour skeleton calc directory
os.chdir(scrdir + c4scrdir) # psi_scratch/cfour
if os.path.exists('scr.' + item):
shutil.rmtree('scr.' + item)
os.mkdir('scr.' + item)
os.chdir('scr.' + item) # psi_scratch/cfour/scr.000-004
with open('ZMAT', 'w') as handle:
handle.write(zmat)
shutil.copy2('../harm/GENBAS', 'GENBAS')
# Run Cfour skeleton calc and extract qcdb.Molecule at needed C4 orientation
with open('partial.out', 'w') as handle:
handle.write(run_cfour_module('xjoda'))
handle.write(run_cfour_module('xvmol'))
handle.write(run_cfour_module('xvmol2ja'))
core.print_out(' CFOUR scratch file %s for %s has been read\n' % ('JOBARC (binary)', item))
c4mol = qcdb.cfour.jajo2mol(qcdb.jajo.getrec(['COORD ', 'ATOMCHRG', 'MAP2ZMAT']))
# S/R: Reap results from output file
if isSowReap:
isOk, msg, results = reap_job_validate(outdir, 'VPT2', item, linkage,
['CURRENT ENERGY', 'CURRENT DIPOLE', 'CURRENT GRADIENT'])
if not isOk:
raise ValidationError(msg)
fje = results['CURRENT ENERGY']
fjgrd = results['CURRENT GRADIENT']
fjdip = [item / constants.dipmom_au2debye for item in results['CURRENT DIPOLE']]
# C: Run the job and collect results
else:
core.IO.set_default_namespace(item)
molecule = geometry(zmmol.create_psi4_string_from_molecule(), 'disp-' + item)
molecule.update_geometry()
gradient(lowername, **kwargs)
fje = core.get_variable('CURRENT ENERGY')
fjgrd = p4util.mat2arr(core.get_gradient())
fjdip = [core.get_variable('CURRENT DIPOLE X') / constants.dipmom_au2debye,
core.get_variable('CURRENT DIPOLE Y') / constants.dipmom_au2debye,
core.get_variable('CURRENT DIPOLE Z') / constants.dipmom_au2debye]
# Transform results into C4 orientation (defined by c4mol) & forge FJOBARC file
fjobarc = qcdb.cfour.format_fjobarc(fje,
*qcdb.cfour.backtransform(chgeMol=zmmol, permMol=c4mol, chgeGrad=fjgrd, chgeDip=fjdip))
return fjobarc
def vpt2_instructions(stage, dir, zmats):
"""Stores all the instructions to the user for running
:py:func:`~wrappers_cfour.vpt2` in sowreap mode. Depending on the
*stage*, Pieces together instruction strings for the appropriate
*stage* individualized by working directory *dir* and sown inputs
*zmats* information.
"""
stepFiles = ''
for zm12 in sorted(zmats):
stepFiles += """ psi4 %-27s %-27s\n""" % ('VPT2-' + zm12 + '.in', 'VPT2-' + zm12 + '.out')
step0 = """
The vpt2 sow/reap procedure has been selected through mode='sowreap'. This
output file, the corresponding input file, and the data persistence file
must not be edited by the user over the course of the sow/reap procedure.
Throughout, psi4 can be invoked to move to the next stage of the procedure
or to tally up the 'sown' jobs. This output file is overwritten each time
psi4 is invoked, but all results and instructions accumulate.
This procedure involves two stages of distributed calculations, harmonic and
anharmonic, and a mimimum of three invokations of psi4 on the original input
file (including the one that initially generated this text). From the input
geometry (0), displacements are generated for which gradients are required.
Input files for these are 'sown' in the current directory (1). Upon
completion, their output files are 'reaped' into a harmonic force field (2).
At displacements along the normal coordinates, further displacements are
generated for which gradients are required. Input files for these are again
'sown' in the current directory (3). Upon completion, their output files are
'reaped' into an anharmonic force field (4), terminating the vpt2 procedure.
Follow the instructions below to continue.
(0) Read Only
--------------
%s
%s
%s
""" % (dir + '/' + os.path.splitext(core.outfile_name())[0] + '.in',
dir + '/' + core.outfile_name(),
dir + '/' + os.path.splitext(core.outfile_name())[0] + '.shelf')
step1 = """
(1) Sow
--------
Run all of the VPT2-000-*.in input files on any variety of computer
architecture. The output file names must be as given below (default).
"""
step2 = """
(2) Reap
---------
Gather all the resulting output files in this directory along with the
three read-only files from (0). Invoke psi4 again. The job will be
trivial in length (unless sto-3g integrals on the molecule are costly)
and give results for the harmonic frequency stage in this output file. It
will also supply the next set of instructions.
psi4 %-27s %-27s
""" % (os.path.splitext(core.outfile_name())[0] + '.in', core.outfile_name())
step3 = """
(3) Sow
--------
Run all of the VPT2-*-*.in input files on any variety of computer
architecture. The output file names must be as given below (default).
"""
step4 = """
(4) Reap
---------
Gather all the resulting output files in this directory along with the
three read-only files from (0). Invoke psi4 again. The job will be
trivial in length (unless sto-3g integrals on the molecule are costly)
and give results for the harmonic and anharmonic frequency stages in this
output file.
psi4 %-27s %-27s
""" % (os.path.splitext(core.outfile_name())[0] + '.in', core.outfile_name())
if stage == 'harmonic':
instructions = step0 + step1 + stepFiles + step2
elif stage == 'anharmonic':
instructions = step0 + step3 + stepFiles + step4
return instructions
def sown_jobs_status(dir, prefix, zmats, validate_func=None, linkage=None, keys=None):
"""Evaluate the output file status of jobs in *zmats* which should
exist at *dir* + '/' + prefix + '-' + job + '.out'. Returns string with
formatted summary of job status and boolean of whether all complete.
Return boolean *isOk* signals whether all *zmats* have completed and,
if *validate_func* present, are validated.
"""
isOk = True
msgError = ''
instructions = '\n'
instructions += p4util.banner(prefix + ' Status: ' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), strNotOutfile=True)
instructions += '\n'
for job in sorted(zmats):
outfile = dir + '/' + prefix + '-' + job + '.out'
fjafile = dir + '/' + prefix + '-' + job + '.fja'
formatArgs = [prefix + '-' + job, '', '', '', '']
if os.path.isfile(outfile):
with open(outfile, 'r') as handle:
for line in handle:
if line.find('Buy a developer a beer!') > -1:
formatArgs[3] = 'Completed'
if reap_job_validate is not None:
isOkJob, msg, temp = reap_job_validate(dir, prefix, job, linkage, keys)
if isOkJob:
formatArgs[4] = '& Validated'
else:
isOk = False
msgError += msg
formatArgs[4] = 'INVALID'
break
else:
isOk = False
formatArgs[2] = 'Running'
elif os.path.isfile(fjafile):
formatArgs[3] = 'Completed'
else:
isOk = False
formatArgs[1] = 'Waiting'
instructions += """ {0:<27} {1:^10} {2:^10} {3:^10} {4:^10}\n""".format(*formatArgs)
instructions += '\n' + msgError + '\n\n'
return isOk, instructions
def reap_job_validate(dir, prefix, item, linkage, keys):
"""For a given output file whose path is constructed with
*dir* + '/' + *prefix* + '-' + *item* + '.out', tests that the file
exists and has *prefix* RESULTS lines for each piece of information
requested in list *keys* and that those lines correspond to the
appropriate *linkage* and *item*. Returns *keys* along with their
scanned values in dict *reapings*, along with error and success
messages in *instructions* and a boolean *isOk* indicating whether
all *keys* reaped sucessfully.
"""
isOk = True
instructions = ''
reapings = {}
outfile = dir + '/' + prefix + '-' + item + '.out'
try:
with open(outfile, 'r') as handle:
for line in handle:
if line.find(prefix + ' RESULT:') == 0:
sline = line.split()
if sline[2:7] == ['linkage', str(linkage), 'for', 'item', item]:
yieldsAt = line.find('yields')
beingAt = line.find('being')
if beingAt > yieldsAt > -1:
key = line[yieldsAt + 6:beingAt].strip()
val = line[beingAt + 5:].strip()
if key in keys:
reapings[key] = eval(val)
#core.print_out(' CFOUR scratch file %s for %s has been read\n' % ('JOBARC', zm12))
else:
isOk = False
instructions += """Outfile file %s
has corrupted sowreap result line:\n%s\n\n""" % (outfile, line)
else:
isOk = False
instructions += """Outfile file %s
has sowreap result of either incompatible linkage (observed: %s, expected: %s)
or incompatible job affiliation (observed: %s, expected: %s).\n\n""" % \
(outfile, sline[3], linkage, sline[6], item)
else:
if len(reapings) != len(keys):
isOk = False
instructions += """Output file %s
has missing results (observed: %s, expected: %s).\n\n""" % \
(outfile, reapings.keys(), keys)
except IOError:
isOk = False
instructions += """Output file %s
that was judged present and complete at the beginning of this
job is now missing. Replace it and invoke psi4 again.\n\n""" % (outfile)
# return file contents in instructions
return isOk, instructions, reapings
|
kratman/psi4public
|
psi4/driver/procrouting/wrappers_cfour.py
|
Python
|
gpl-2.0
| 36,674
|
[
"CFOUR",
"Psi4"
] |
825f8e715125f0dcfb3031c0b8f3eae8f0d83a1cc561cb36964e1c33929670cd
|
#!/usr/bin/env python
#===============================================================================
# Copyright 2017 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
'''
CSV2NetCDFConverter concrete class for converting data to netCDF
Created on 28Mar.2018
@author: Alex Ip
'''
from collections import OrderedDict
from geophys_utils.netcdf_converter import ToNetCDFConverter, NetCDFVariable
import numpy as np
class CSV2NetCDFConverter(ToNetCDFConverter):
'''
CSV2NetCDFConverter concrete class for converting CSV data to netCDF
'''
def __init__(self, nc_out_path, csv_path, netcdf_format='NETCDF4_CLASSIC'):
'''
Concrete constructor for subclass CSV2NetCDFConverter
Needs to initialise object with everything that is required for the other Concrete methods
N.B: Make sure the base class constructor is called from the subclass constructor
'''
ToNetCDFConverter.__init__(self, nc_out_path, netcdf_format)
self.csv_path = csv_path
def get_global_attributes(self):
'''
Concrete method to return dict of global attribute <key>:<value> pairs
'''
return {'title': 'test dataset'}
def get_dimensions(self):
'''
Concrete method to return OrderedDict of <dimension_name>:<dimension_size> pairs
'''
dimensions = OrderedDict()
# Example lat/lon dimensions
dimensions['lon'] = 509
dimensions['lat'] = 639
return dimensions
def variable_generator(self):
'''
Concrete generator to yield NetCDFVariable objects
'''
# Example of latitude dimension variable creation
yield self.build_dimension_variable(dimension_name='lat',
min_value=-22.9247209891964,
max_value=-20.5641209891964,
long_name='latitude',
units='degrees north',
standard_name='latitude',
descending=True # Invert Y axis
)
# Example of longitude dimension variable creation
yield self.build_dimension_variable(dimension_name='lon',
min_value=121.122089060582,
max_value=123.001689060582,
long_name='longitude',
units='degrees east',
standard_name='longitude',
descending=False
)
# Example of crs variable creation for GDA94
yield self.build_crs_variable('''\
GEOGCS["GDA94",
DATUM["Geocentric_Datum_of_Australia_1994",
SPHEROID["GRS 1980",6378137,298.257222101,
AUTHORITY["EPSG","7019"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6283"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4283"]]
'''
)
yield NetCDFVariable(short_name='test_data',
data=np.random.random((self.nc_output_dataset.dimensions['lat'].size,
self.nc_output_dataset.dimensions['lon'].size)),
dimensions=['lat', 'lon'],
fill_value=0.0,
attributes={'units': 'random crap',
'long_name': 'random numbers between 0 and 1'
},
dtype='float32'
)
return
def main():
nc_out_path = 'C:\\Temp\\test.nc'
c2n = CSV2NetCDFConverter(nc_out_path)
c2n.convert2netcdf()
print('Finished writing netCDF file {}'.format(nc_out_path))
if __name__ == '__main__':
main()
|
alex-ip/geophys_utils
|
geophys_utils/netcdf_converter/csv2netcdf_converter.py
|
Python
|
apache-2.0
| 4,919
|
[
"NetCDF"
] |
0bb6e7793f8428a23625f41bb5c003b807b2d941a2bdf6b54feeafc1db27e2a7
|
import click
import logging
from multiprocessing_logging import install_mp_handler
import simtk.unit as unit
# import all of the sim makers we have available
from wepy_tools.sim_makers.openmm import *
SYSTEM_SIM_MAKERS = {
'LennardJonesPair' : LennardJonesPairOpenMMSimMaker,
'LysozymeImplicit' : LysozymeImplicitOpenMMSimMaker,
}
def parse_system_spec(spec):
sys_spec, runner_platform = spec.split("/")
runner, platform = runner_platform.split('-')
return sys_spec, runner, platform
@click.option('-v', '--verbose', is_flag=True)
@click.option('-W', '--work-mapper',
default='WorkerMapper',
help="Work mapper for doing work.")
@click.option('-R', '--resampler',
default='WExplore',
help="Resampling algorithm.")
@click.argument('n_workers', type=int)
@click.argument('tau', type=float)
@click.argument('n_cycles', type=int)
@click.argument('n_walkers', type=int)
@click.argument('system')
@click.command()
def cli(
verbose,
work_mapper,
resampler,
n_workers,
tau,
n_cycles,
n_walkers,
system,
):
"""Run a pre-parametrized wepy simulation.
\b
Parameters
----------
\b
SYSTEM : str
Which pre-parametrized simulation to run should have the format: System/Runner-Platform
\b
N_WALKERS : int
Number of parallel trajectories to run
\b
N_CYCLES : int
How many cycles to run the simulation for
\b
TAU : float
Cycle simulation time in picoseconds
\b
N_WORKERS : int
Number of worker processes to run on
\b
Available Systems
-----------------
LennardJonesPair : A pair of Lennard-Jones particles
LysozymeImplicit : Lysozyme-xylene receptor ligand in implicit solvent (2621 atoms)
\b
Available Runners/Platforms
---------------------------
\b
OpenMM-
Reference
CPU
OpenCL (GPU)
CUDA (GPU)
\b
Available Work Mappers
----------------------
WorkerMapper (default) : parallel python multiprocessing based
worker-consumer concurrency model
WIP not available in test drive yet:
TaskMapper : parallel python multiprocessing based task-process
based concurrency model
Mapper : non-parallel single-process implementation
\b
Available Resamplers
--------------------
No : Doesn't do any resampling. Simply runs an ensemble of walkers.
WExplore : Hierarchical History Dependent Voronoi Binning
REVO : Stateless and Binless algorithm that rewards in-ensemble novelty.
\b
Examples
--------
python -m wepy_test_drive LennardJonesPair/OpenMM-CPU 20 10 2 4
\b
Notes
-----
When using a GPU platform your number of workers should be the
number of GPUs you want to use.
"""
if verbose:
logging.getLogger().setLevel(logging.DEBUG)
install_mp_handler()
logging.debug("Starting the test")
resampler_fullname = resampler + 'Resampler'
sys_spec, runner, platform = parse_system_spec(system)
# choose which sim_maker to use
sim_maker = SYSTEM_SIM_MAKERS[sys_spec]()
apparatus = sim_maker.make_apparatus(
platform = platform,
resampler = resampler_fullname,
)
# compute the number of steps to take from tau
tau = tau * unit.picosecond
n_steps = round(tau / apparatus.filters[0].integrator.getStepSize())
config = sim_maker.make_configuration(apparatus,
work_mapper_spec=work_mapper,
platform=platform)
sim_manager = sim_maker.make_sim_manager(n_walkers, apparatus, config)
# run the simulation
sim_manager.run_simulation(n_cycles, n_steps, num_workers=n_workers)
if __name__ == "__main__":
cli()
|
ADicksonLab/wepy
|
src/wepy_test_drive.py
|
Python
|
mit
| 3,930
|
[
"OpenMM"
] |
f6f8dc4cc59dff1aae4aa8e60b44812892cd145ce5565b92c9b9bf04a60f9375
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This packages contains affiliated package tests.
"""
import os
import shutil
import time
import tempfile
import warnings
# import urllib
from astropy import log
from astropy.utils.data import download_file
from contextlib import contextmanager
@contextmanager
def cwd(path):
oldpwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(oldpwd)
def download_test_data(datadir):
from zipfile import ZipFile
import os, ssl
if not os.environ.get("PYTHONHTTPSVERIFY", "") and getattr(
ssl, "_create_unverified_context", None
):
ssl._create_default_https_context = ssl._create_unverified_context
url = "https://github.com/discos/srttools_test_data/blob/main/data/sim.zip?raw=true"
with cwd(datadir):
# urllib.request.urlretrieve(url, 'sim.zip')
log.info(f"Downloading test data from {url}")
data = download_file(url)
shutil.copyfile(data, "sim.zip")
log.info("Unzipping")
with ZipFile("sim.zip", "r") as zipObj:
zipObj.extractall()
log.info("Done")
def print_garbage(prefix):
string = ""
for _ in range(5):
garbage = " " + tempfile.NamedTemporaryFile(prefix=prefix).name[1:]
string += garbage + "\n"
return string
def sim_config_file(filename, add_garbage=False, prefix=None, label=""):
"""Create a sample config file, to be modified by hand."""
string0 = f"""
[local]
workdir : .
datadir : .
productdir : test_image
[analysis]
projection : ARC
interpolation : spline
prefix : test_
list_of_directories :
gauss_ra{label}
gauss_dec{label}
defective
"""
string1 = """
calibrator_directories :
calibration
"""
string2 = """
skydip_directories :
gauss_skydip
noise_threshold : 5
pixel_size : 0.8
[debugging]
debug_file_format : png
"""
if prefix is None:
prefix = os.getcwd()
import tempfile
string = string0
if add_garbage:
string += print_garbage(prefix)
string += string1
if add_garbage:
string += print_garbage(prefix)
string += string2
with open(filename, "w") as fobj:
print(string, file=fobj)
return string
def _2d_gauss(x, y, sigma=2.5 / 60.0):
"""A Gaussian beam"""
import numpy as np
return np.exp(-(x ** 2 + y ** 2) / (2 * sigma ** 2))
def gauss_src_func(x, y):
return 25 * _2d_gauss(x, y, sigma=2.5 / 60)
def source_scan_func(x):
return 52 * _2d_gauss(x, 0, sigma=2.5 / 60)
def cal2_scan_func(x):
return 132.1 * _2d_gauss(x, 0, sigma=2.5 / 60)
def prepare_simulated_data(simdir):
from astropy import log
from srttools.simulate import sim_crossscans, simulate_map
from srttools.simulate import sim_position_switching
from srttools.io import mkdir_p
import numpy as np
np.random.seed(1241347)
t0 = time.time()
# ************* Create calibrators *******************
caldir = os.path.join(simdir, "calibration")
caldir2 = os.path.join(simdir, "calibration2")
caldir3 = os.path.join(simdir, "calibration_bad")
crossdir = os.path.join(simdir, "crossscans")
log.info("Fake calibrators: DummyCal, 1 Jy.")
mkdir_p(caldir)
sim_crossscans(5, caldir)
log.info("Fake calibrators: DummyCal2, 1.321 Jy.")
mkdir_p(caldir2)
sim_crossscans(5, caldir2, srcname="DummyCal2", scan_func=cal2_scan_func)
log.info("Fake calibrators: DummyCal2, wrong flux 0.52 Jy.")
mkdir_p(caldir3)
sim_crossscans(1, caldir3, srcname="DummyCal2", scan_func=source_scan_func)
log.info("Fake cross scans: DummySrc, 0.52 Jy.")
mkdir_p(crossdir)
sim_crossscans(5, crossdir, srcname="DummySrc", scan_func=source_scan_func)
simulated_flux = 0.25
# ************* Create large-ish map *******************
obsdir_ra = os.path.join(simdir, "gauss_ra")
obsdir_dec = os.path.join(simdir, "gauss_dec")
mkdir_p(obsdir_ra)
mkdir_p(obsdir_dec)
log.info(
"Fake map: Point-like (but Gaussian beam shape), "
"{} Jy.".format(simulated_flux)
)
simulate_map(
count_map=gauss_src_func,
length_ra=30.0,
length_dec=30.0,
outdir=(obsdir_ra, obsdir_dec),
mean_ra=180,
mean_dec=45,
speed=1.5,
spacing=0.5,
srcname="Dummy",
channel_ratio=0.8,
baseline="flat",
)
config_file = os.path.abspath(os.path.join(simdir, "test_config_sim.ini"))
sim_config_file(config_file, add_garbage=True, prefix="./")
# ************* Create small-ish map *******************
obsdir_ra = os.path.join(simdir, "gauss_ra_small")
obsdir_dec = os.path.join(simdir, "gauss_dec_small")
mkdir_p(obsdir_ra)
mkdir_p(obsdir_dec)
log.info(
"Fake map: Point-like (but Gaussian beam shape), "
"{} Jy.".format(simulated_flux)
)
simulate_map(
count_map=gauss_src_func,
length_ra=15.0,
length_dec=15.0,
outdir=(obsdir_ra, obsdir_dec),
mean_ra=180,
mean_dec=45,
speed=3,
spacing=1,
srcname="Dummy",
channel_ratio=0.8,
baseline="flat",
)
config_file = os.path.abspath(
os.path.join(simdir, "test_config_sim_small.ini")
)
sim_config_file(config_file, add_garbage=True, prefix="./", label="_small")
# ************* Create data to convert *******************
emptydir = os.path.join(simdir, "test_sdfits")
pswdir_legacy = os.path.join(simdir, "test_psw_legacy")
pswdir = os.path.join(simdir, "test_psw")
for d in [emptydir, pswdir, pswdir_legacy]:
mkdir_p(d)
sim_position_switching(pswdir, nbin=1024)
sim_position_switching(pswdir_legacy, nbin=1024, legacy_cal_format=True)
simulate_map(width_ra=2, width_dec=2.0, outdir=emptydir)
log.info(f"Dataset simulated in {time.time() - t0:.2f}s")
curdir = os.path.dirname(os.path.abspath(__file__))
datadir = os.path.join(curdir, "data")
simdir = os.path.join(datadir, "sim")
pswdir_probe = os.path.join(simdir, "test_psw")
config_probe = os.path.join(simdir, "test_config_sim.ini")
if not os.path.exists(pswdir_probe):
try:
download_test_data(datadir)
except Exception as e:
log.info("Download failed. Simulating dataset")
prepare_simulated_data(simdir)
else:
log.info("Test data already downloaded")
assert os.path.exists(simdir)
assert os.path.exists(config_probe)
assert os.path.exists(pswdir_probe)
|
matteobachetti/srt-single-dish-tools
|
srttools/tests/__init__.py
|
Python
|
bsd-3-clause
| 6,544
|
[
"Gaussian"
] |
6ab6633fd0c7b546a486e6c1527cd6e4323480010af0df0b70eb61d309ccebf4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.