text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
# Plotting the results of dissolution_pyrite_2 and the equivalent GWB simulation
import os
import sys
import matplotlib.pyplot as plt
f = open("gold/dissolution_pyrite_2_out.csv", "r")
data = [list(map(float, line.strip().split(","))) for line in f.readlines()[2:]]
f.close()
tim = [x[0] * 100 for x in data]
hematite = [x[1] for x in data]
pyrite = [x[2] for x in data]
co2aq = [x[3] * 1000 for x in data]
fe = [x[4] * 1000 for x in data]
hco3 = [x[5] * 1000 for x in data]
o2aq = [x[6] * 1000 for x in data]
so4 = [x[7] * 1000 for x in data]
pH = [x[8] for x in data]
gwb_tim = [i * 100 for i in range(11)]
gwb_hematite = [0.9997, 67.53, 133.8, 199.5, 264.3, 328, 390.2, 450.9, 509.9, 567.1, 622.4]
gwb_pH = [6.5, 2.571, 2.291, 2.134, 2.025, 1.943, 1.877, 1.822, 1.776, 1.736, 1.7]
plt.figure(0)
plt.plot([0] + tim, [0.9997] + hematite, 'k-', linewidth = 2.0, label = 'MOOSE')
plt.plot(gwb_tim, gwb_hematite, 'ks', label = 'GWB')
plt.legend()
plt.xlabel("Pyrite reacted (mg)");
plt.ylabel("Hematite mass (mg)")
plt.title("Hematite precipitated when reacting pyrite in a system open to O$_{2}$(g)");
plt.savefig("../../../doc/content/media/geochemistry/dissolution_pyrite_2_1.png")
plt.figure(1)
plt.plot([0] + tim, [6.5] + pH, 'k-', linewidth = 2.0, label = 'MOOSE')
plt.plot(gwb_tim, gwb_pH, 'ks', label = 'GWB')
plt.legend()
plt.xlabel("Pyrite reacted (mg)");
plt.ylabel("pH")
plt.title("pH when reacting pyrite in a system open to O$_{2}$(g)");
plt.savefig("../../../doc/content/media/geochemistry/dissolution_pyrite_2_2.png")
sys.exit(0)
|
harterj/moose
|
modules/geochemistry/test/tests/time_dependent_reactions/dissolution_pyrite_2.py
|
Python
|
lgpl-2.1
| 1,882
|
[
"MOOSE"
] |
3cff44932b2c935b2d41024f7ce74f5d9ff532db63206a890dfa1b03264f6fa6
|
# -*- coding: utf-8 -*-
"""Implementation of execution-related magic functions."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import ast
import bdb
import builtins as builtin_mod
import gc
import itertools
import os
import shlex
import sys
import time
import timeit
import math
from pdb import Restart
# cProfile was added in Python2.5
try:
import cProfile as profile
import pstats
except ImportError:
# profile isn't bundled by default in Debian for license reasons
try:
import profile, pstats
except ImportError:
profile = pstats = None
from IPython.core import oinspect
from IPython.core import magic_arguments
from IPython.core import page
from IPython.core.error import UsageError
from IPython.core.macro import Macro
from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic,
line_cell_magic, on_off, needs_local_scope)
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils.contexts import preserve_keys
from IPython.utils.capture import capture_output
from IPython.utils.ipstruct import Struct
from IPython.utils.module_paths import find_mod
from IPython.utils.path import get_py_filename, shellglob
from IPython.utils.timing import clock, clock2
from warnings import warn
from logging import error
from io import StringIO
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
class TimeitResult(object):
"""
Object returned by the timeit magic with info about the run.
Contains the following attributes :
loops: (int) number of loops done per measurement
repeat: (int) number of times the measurement has been repeated
best: (float) best execution time / number
all_runs: (list of float) execution time of each run (in s)
compile_time: (float) time of statement compilation (s)
"""
def __init__(self, loops, repeat, best, worst, all_runs, compile_time, precision):
self.loops = loops
self.repeat = repeat
self.best = best
self.worst = worst
self.all_runs = all_runs
self.compile_time = compile_time
self._precision = precision
self.timings = [ dt / self.loops for dt in all_runs]
@property
def average(self):
return math.fsum(self.timings) / len(self.timings)
@property
def stdev(self):
mean = self.average
return (math.fsum([(x - mean) ** 2 for x in self.timings]) / len(self.timings)) ** 0.5
def __str__(self):
pm = '+-'
if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding:
try:
u'\xb1'.encode(sys.stdout.encoding)
pm = u'\xb1'
except:
pass
return (
u"{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops} loop{loop_plural} each)"
.format(
pm = pm,
runs = self.repeat,
loops = self.loops,
loop_plural = "" if self.loops == 1 else "s",
run_plural = "" if self.repeat == 1 else "s",
mean = _format_time(self.average, self._precision),
std = _format_time(self.stdev, self._precision))
)
def _repr_pretty_(self, p , cycle):
unic = self.__str__()
p.text(u'<TimeitResult : '+unic+u'>')
class TimeitTemplateFiller(ast.NodeTransformer):
"""Fill in the AST template for timing execution.
This is quite closely tied to the template definition, which is in
:meth:`ExecutionMagics.timeit`.
"""
def __init__(self, ast_setup, ast_stmt):
self.ast_setup = ast_setup
self.ast_stmt = ast_stmt
def visit_FunctionDef(self, node):
"Fill in the setup statement"
self.generic_visit(node)
if node.name == "inner":
node.body[:1] = self.ast_setup.body
return node
def visit_For(self, node):
"Fill in the statement to be timed"
if getattr(getattr(node.body[0], 'value', None), 'id', None) == 'stmt':
node.body = self.ast_stmt.body
return node
class Timer(timeit.Timer):
"""Timer class that explicitly uses self.inner
which is an undocumented implementation detail of CPython,
not shared by PyPy.
"""
# Timer.timeit copied from CPython 3.4.2
def timeit(self, number=timeit.default_number):
"""Time 'number' executions of the main statement.
To be precise, this executes the setup statement once, and
then returns the time it takes to execute the main statement
a number of times, as a float measured in seconds. The
argument is the number of times through the loop, defaulting
to one million. The main statement, the setup statement and
the timer function to be used are passed to the constructor.
"""
it = itertools.repeat(None, number)
gcold = gc.isenabled()
gc.disable()
try:
timing = self.inner(it, self.timer)
finally:
if gcold:
gc.enable()
return timing
@magics_class
class ExecutionMagics(Magics):
"""Magics related to code execution, debugging, profiling, etc.
"""
def __init__(self, shell):
super(ExecutionMagics, self).__init__(shell)
if profile is None:
self.prun = self.profile_missing_notice
# Default execution function used to actually run user code.
self.default_runner = None
def profile_missing_notice(self, *args, **kwargs):
error("""\
The profile module could not be found. It has been removed from the standard
python packages because of its non-free license. To use profiling, install the
python-profiler package from non-free.""")
@skip_doctest
@line_cell_magic
def prun(self, parameter_s='', cell=None):
"""Run a statement through the python code profiler.
Usage, in line mode:
%prun [options] statement
Usage, in cell mode:
%%prun [options] [statement]
code...
code...
In cell mode, the additional code lines are appended to the (possibly
empty) statement in the first line. Cell mode allows you to easily
profile multiline blocks without having to put them in a separate
function.
The given statement (which doesn't require quote marks) is run via the
python profiler in a manner similar to the profile.run() function.
Namespaces are internally managed to work correctly; profile.run
cannot be used in IPython because it makes certain assumptions about
namespaces which do not hold under IPython.
Options:
-l <limit>
you can place restrictions on what or how much of the
profile gets printed. The limit value can be:
* A string: only information for function names containing this string
is printed.
* An integer: only these many lines are printed.
* A float (between 0 and 1): this fraction of the report is printed
(for example, use a limit of 0.4 to see the topmost 40% only).
You can combine several limits with repeated use of the option. For
example, ``-l __init__ -l 5`` will print only the topmost 5 lines of
information about class constructors.
-r
return the pstats.Stats object generated by the profiling. This
object has all the information about the profile in it, and you can
later use it for further analysis or in other functions.
-s <key>
sort profile by given key. You can provide more than one key
by using the option several times: '-s key1 -s key2 -s key3...'. The
default sorting key is 'time'.
The following is copied verbatim from the profile documentation
referenced below:
When more than one key is provided, additional keys are used as
secondary criteria when the there is equality in all keys selected
before them.
Abbreviations can be used for any key names, as long as the
abbreviation is unambiguous. The following are the keys currently
defined:
============ =====================
Valid Arg Meaning
============ =====================
"calls" call count
"cumulative" cumulative time
"file" file name
"module" file name
"pcalls" primitive call count
"line" line number
"name" function name
"nfl" name/file/line
"stdname" standard name
"time" internal time
============ =====================
Note that all sorts on statistics are in descending order (placing
most time consuming items first), where as name, file, and line number
searches are in ascending order (i.e., alphabetical). The subtle
distinction between "nfl" and "stdname" is that the standard name is a
sort of the name as printed, which means that the embedded line
numbers get compared in an odd way. For example, lines 3, 20, and 40
would (if the file names were the same) appear in the string order
"20" "3" and "40". In contrast, "nfl" does a numeric compare of the
line numbers. In fact, sort_stats("nfl") is the same as
sort_stats("name", "file", "line").
-T <filename>
save profile results as shown on screen to a text
file. The profile is still shown on screen.
-D <filename>
save (via dump_stats) profile statistics to given
filename. This data is in a format understood by the pstats module, and
is generated by a call to the dump_stats() method of profile
objects. The profile is still shown on screen.
-q
suppress output to the pager. Best used with -T and/or -D above.
If you want to run complete programs under the profiler's control, use
``%run -p [prof_opts] filename.py [args to program]`` where prof_opts
contains profiler specific options as described here.
You can read the complete documentation for the profile module with::
In [1]: import profile; profile.help()
"""
opts, arg_str = self.parse_options(parameter_s, 'D:l:rs:T:q',
list_all=True, posix=False)
if cell is not None:
arg_str += '\n' + cell
arg_str = self.shell.input_splitter.transform_cell(arg_str)
return self._run_with_profiler(arg_str, opts, self.shell.user_ns)
def _run_with_profiler(self, code, opts, namespace):
"""
Run `code` with profiler. Used by ``%prun`` and ``%run -p``.
Parameters
----------
code : str
Code to be executed.
opts : Struct
Options parsed by `self.parse_options`.
namespace : dict
A dictionary for Python namespace (e.g., `self.shell.user_ns`).
"""
# Fill default values for unspecified options:
opts.merge(Struct(D=[''], l=[], s=['time'], T=['']))
prof = profile.Profile()
try:
prof = prof.runctx(code, namespace, namespace)
sys_exit = ''
except SystemExit:
sys_exit = """*** SystemExit exception caught in code being profiled."""
stats = pstats.Stats(prof).strip_dirs().sort_stats(*opts.s)
lims = opts.l
if lims:
lims = [] # rebuild lims with ints/floats/strings
for lim in opts.l:
try:
lims.append(int(lim))
except ValueError:
try:
lims.append(float(lim))
except ValueError:
lims.append(lim)
# Trap output.
stdout_trap = StringIO()
stats_stream = stats.stream
try:
stats.stream = stdout_trap
stats.print_stats(*lims)
finally:
stats.stream = stats_stream
output = stdout_trap.getvalue()
output = output.rstrip()
if 'q' not in opts:
page.page(output)
print(sys_exit, end=' ')
dump_file = opts.D[0]
text_file = opts.T[0]
if dump_file:
prof.dump_stats(dump_file)
print('\n*** Profile stats marshalled to file',\
repr(dump_file)+'.',sys_exit)
if text_file:
pfile = open(text_file,'w')
pfile.write(output)
pfile.close()
print('\n*** Profile printout saved to text file',\
repr(text_file)+'.',sys_exit)
if 'r' in opts:
return stats
else:
return None
@line_magic
def pdb(self, parameter_s=''):
"""Control the automatic calling of the pdb interactive debugger.
Call as '%pdb on', '%pdb 1', '%pdb off' or '%pdb 0'. If called without
argument it works as a toggle.
When an exception is triggered, IPython can optionally call the
interactive pdb debugger after the traceback printout. %pdb toggles
this feature on and off.
The initial state of this feature is set in your configuration
file (the option is ``InteractiveShell.pdb``).
If you want to just activate the debugger AFTER an exception has fired,
without having to type '%pdb on' and rerunning your code, you can use
the %debug magic."""
par = parameter_s.strip().lower()
if par:
try:
new_pdb = {'off':0,'0':0,'on':1,'1':1}[par]
except KeyError:
print ('Incorrect argument. Use on/1, off/0, '
'or nothing for a toggle.')
return
else:
# toggle
new_pdb = not self.shell.call_pdb
# set on the shell
self.shell.call_pdb = new_pdb
print('Automatic pdb calling has been turned',on_off(new_pdb))
@skip_doctest
@magic_arguments.magic_arguments()
@magic_arguments.argument('--breakpoint', '-b', metavar='FILE:LINE',
help="""
Set break point at LINE in FILE.
"""
)
@magic_arguments.argument('statement', nargs='*',
help="""
Code to run in debugger.
You can omit this in cell magic mode.
"""
)
@line_cell_magic
def debug(self, line='', cell=None):
"""Activate the interactive debugger.
This magic command support two ways of activating debugger.
One is to activate debugger before executing code. This way, you
can set a break point, to step through the code from the point.
You can use this mode by giving statements to execute and optionally
a breakpoint.
The other one is to activate debugger in post-mortem mode. You can
activate this mode simply running %debug without any argument.
If an exception has just occurred, this lets you inspect its stack
frames interactively. Note that this will always work only on the last
traceback that occurred, so you must call this quickly after an
exception that you wish to inspect has fired, because if another one
occurs, it clobbers the previous one.
If you want IPython to automatically do this on every exception, see
the %pdb magic for more details.
"""
args = magic_arguments.parse_argstring(self.debug, line)
if not (args.breakpoint or args.statement or cell):
self._debug_post_mortem()
else:
code = "\n".join(args.statement)
if cell:
code += "\n" + cell
self._debug_exec(code, args.breakpoint)
def _debug_post_mortem(self):
self.shell.debugger(force=True)
def _debug_exec(self, code, breakpoint):
if breakpoint:
(filename, bp_line) = breakpoint.rsplit(':', 1)
bp_line = int(bp_line)
else:
(filename, bp_line) = (None, None)
self._run_with_debugger(code, self.shell.user_ns, filename, bp_line)
@line_magic
def tb(self, s):
"""Print the last traceback with the currently active exception mode.
See %xmode for changing exception reporting modes."""
self.shell.showtraceback()
@skip_doctest
@line_magic
def run(self, parameter_s='', runner=None,
file_finder=get_py_filename):
"""Run the named file inside IPython as a program.
Usage::
%run [-n -i -e -G]
[( -t [-N<N>] | -d [-b<N>] | -p [profile options] )]
( -m mod | file ) [args]
Parameters after the filename are passed as command-line arguments to
the program (put in sys.argv). Then, control returns to IPython's
prompt.
This is similar to running at a system prompt ``python file args``,
but with the advantage of giving you IPython's tracebacks, and of
loading all variables into your interactive namespace for further use
(unless -p is used, see below).
The file is executed in a namespace initially consisting only of
``__name__=='__main__'`` and sys.argv constructed as indicated. It thus
sees its environment as if it were being run as a stand-alone program
(except for sharing global objects such as previously imported
modules). But after execution, the IPython interactive namespace gets
updated with all variables defined in the program (except for __name__
and sys.argv). This allows for very convenient loading of code for
interactive work, while giving each program a 'clean sheet' to run in.
Arguments are expanded using shell-like glob match. Patterns
'*', '?', '[seq]' and '[!seq]' can be used. Additionally,
tilde '~' will be expanded into user's home directory. Unlike
real shells, quotation does not suppress expansions. Use
*two* back slashes (e.g. ``\\\\*``) to suppress expansions.
To completely disable these expansions, you can use -G flag.
Options:
-n
__name__ is NOT set to '__main__', but to the running file's name
without extension (as python does under import). This allows running
scripts and reloading the definitions in them without calling code
protected by an ``if __name__ == "__main__"`` clause.
-i
run the file in IPython's namespace instead of an empty one. This
is useful if you are experimenting with code written in a text editor
which depends on variables defined interactively.
-e
ignore sys.exit() calls or SystemExit exceptions in the script
being run. This is particularly useful if IPython is being used to
run unittests, which always exit with a sys.exit() call. In such
cases you are interested in the output of the test results, not in
seeing a traceback of the unittest module.
-t
print timing information at the end of the run. IPython will give
you an estimated CPU time consumption for your script, which under
Unix uses the resource module to avoid the wraparound problems of
time.clock(). Under Unix, an estimate of time spent on system tasks
is also given (for Windows platforms this is reported as 0.0).
If -t is given, an additional ``-N<N>`` option can be given, where <N>
must be an integer indicating how many times you want the script to
run. The final timing report will include total and per run results.
For example (testing the script uniq_stable.py)::
In [1]: run -t uniq_stable
IPython CPU timings (estimated):
User : 0.19597 s.
System: 0.0 s.
In [2]: run -t -N5 uniq_stable
IPython CPU timings (estimated):
Total runs performed: 5
Times : Total Per run
User : 0.910862 s, 0.1821724 s.
System: 0.0 s, 0.0 s.
-d
run your program under the control of pdb, the Python debugger.
This allows you to execute your program step by step, watch variables,
etc. Internally, what IPython does is similar to calling::
pdb.run('execfile("YOURFILENAME")')
with a breakpoint set on line 1 of your file. You can change the line
number for this automatic breakpoint to be <N> by using the -bN option
(where N must be an integer). For example::
%run -d -b40 myscript
will set the first breakpoint at line 40 in myscript.py. Note that
the first breakpoint must be set on a line which actually does
something (not a comment or docstring) for it to stop execution.
Or you can specify a breakpoint in a different file::
%run -d -b myotherfile.py:20 myscript
When the pdb debugger starts, you will see a (Pdb) prompt. You must
first enter 'c' (without quotes) to start execution up to the first
breakpoint.
Entering 'help' gives information about the use of the debugger. You
can easily see pdb's full documentation with "import pdb;pdb.help()"
at a prompt.
-p
run program under the control of the Python profiler module (which
prints a detailed report of execution times, function calls, etc).
You can pass other options after -p which affect the behavior of the
profiler itself. See the docs for %prun for details.
In this mode, the program's variables do NOT propagate back to the
IPython interactive namespace (because they remain in the namespace
where the profiler executes them).
Internally this triggers a call to %prun, see its documentation for
details on the options available specifically for profiling.
There is one special usage for which the text above doesn't apply:
if the filename ends with .ipy[nb], the file is run as ipython script,
just as if the commands were written on IPython prompt.
-m
specify module name to load instead of script path. Similar to
the -m option for the python interpreter. Use this option last if you
want to combine with other %run options. Unlike the python interpreter
only source modules are allowed no .pyc or .pyo files.
For example::
%run -m example
will run the example module.
-G
disable shell-like glob expansion of arguments.
"""
# Logic to handle issue #3664
# Add '--' after '-m <module_name>' to ignore additional args passed to a module.
if '-m' in parameter_s and '--' not in parameter_s:
argv = shlex.split(parameter_s, posix=(os.name == 'posix'))
for idx, arg in enumerate(argv):
if arg and arg.startswith('-') and arg != '-':
if arg == '-m':
argv.insert(idx + 2, '--')
break
else:
# Positional arg, break
break
parameter_s = ' '.join(shlex.quote(arg) for arg in argv)
# get arguments and set sys.argv for program to be run.
opts, arg_lst = self.parse_options(parameter_s,
'nidtN:b:pD:l:rs:T:em:G',
mode='list', list_all=1)
if "m" in opts:
modulename = opts["m"][0]
modpath = find_mod(modulename)
if modpath is None:
warn('%r is not a valid modulename on sys.path'%modulename)
return
arg_lst = [modpath] + arg_lst
try:
filename = file_finder(arg_lst[0])
except IndexError:
warn('you must provide at least a filename.')
print('\n%run:\n', oinspect.getdoc(self.run))
return
except IOError as e:
try:
msg = str(e)
except UnicodeError:
msg = e.message
error(msg)
return
if filename.lower().endswith(('.ipy', '.ipynb')):
with preserve_keys(self.shell.user_ns, '__file__'):
self.shell.user_ns['__file__'] = filename
self.shell.safe_execfile_ipy(filename)
return
# Control the response to exit() calls made by the script being run
exit_ignore = 'e' in opts
# Make sure that the running script gets a proper sys.argv as if it
# were run from a system shell.
save_argv = sys.argv # save it for later restoring
if 'G' in opts:
args = arg_lst[1:]
else:
# tilde and glob expansion
args = shellglob(map(os.path.expanduser, arg_lst[1:]))
sys.argv = [filename] + args # put in the proper filename
if 'n' in opts:
name = os.path.splitext(os.path.basename(filename))[0]
else:
name = '__main__'
if 'i' in opts:
# Run in user's interactive namespace
prog_ns = self.shell.user_ns
__name__save = self.shell.user_ns['__name__']
prog_ns['__name__'] = name
main_mod = self.shell.user_module
# Since '%run foo' emulates 'python foo.py' at the cmd line, we must
# set the __file__ global in the script's namespace
# TK: Is this necessary in interactive mode?
prog_ns['__file__'] = filename
else:
# Run in a fresh, empty namespace
# The shell MUST hold a reference to prog_ns so after %run
# exits, the python deletion mechanism doesn't zero it out
# (leaving dangling references). See interactiveshell for details
main_mod = self.shell.new_main_mod(filename, name)
prog_ns = main_mod.__dict__
# pickle fix. See interactiveshell for an explanation. But we need to
# make sure that, if we overwrite __main__, we replace it at the end
main_mod_name = prog_ns['__name__']
if main_mod_name == '__main__':
restore_main = sys.modules['__main__']
else:
restore_main = False
# This needs to be undone at the end to prevent holding references to
# every single object ever created.
sys.modules[main_mod_name] = main_mod
if 'p' in opts or 'd' in opts:
if 'm' in opts:
code = 'run_module(modulename, prog_ns)'
code_ns = {
'run_module': self.shell.safe_run_module,
'prog_ns': prog_ns,
'modulename': modulename,
}
else:
if 'd' in opts:
# allow exceptions to raise in debug mode
code = 'execfile(filename, prog_ns, raise_exceptions=True)'
else:
code = 'execfile(filename, prog_ns)'
code_ns = {
'execfile': self.shell.safe_execfile,
'prog_ns': prog_ns,
'filename': get_py_filename(filename),
}
try:
stats = None
if 'p' in opts:
stats = self._run_with_profiler(code, opts, code_ns)
else:
if 'd' in opts:
bp_file, bp_line = parse_breakpoint(
opts.get('b', ['1'])[0], filename)
self._run_with_debugger(
code, code_ns, filename, bp_line, bp_file)
else:
if 'm' in opts:
def run():
self.shell.safe_run_module(modulename, prog_ns)
else:
if runner is None:
runner = self.default_runner
if runner is None:
runner = self.shell.safe_execfile
def run():
runner(filename, prog_ns, prog_ns,
exit_ignore=exit_ignore)
if 't' in opts:
# timed execution
try:
nruns = int(opts['N'][0])
if nruns < 1:
error('Number of runs must be >=1')
return
except (KeyError):
nruns = 1
self._run_with_timing(run, nruns)
else:
# regular execution
run()
if 'i' in opts:
self.shell.user_ns['__name__'] = __name__save
else:
# update IPython interactive namespace
# Some forms of read errors on the file may mean the
# __name__ key was never set; using pop we don't have to
# worry about a possible KeyError.
prog_ns.pop('__name__', None)
with preserve_keys(self.shell.user_ns, '__file__'):
self.shell.user_ns.update(prog_ns)
finally:
# It's a bit of a mystery why, but __builtins__ can change from
# being a module to becoming a dict missing some key data after
# %run. As best I can see, this is NOT something IPython is doing
# at all, and similar problems have been reported before:
# http://coding.derkeiler.com/Archive/Python/comp.lang.python/2004-10/0188.html
# Since this seems to be done by the interpreter itself, the best
# we can do is to at least restore __builtins__ for the user on
# exit.
self.shell.user_ns['__builtins__'] = builtin_mod
# Ensure key global structures are restored
sys.argv = save_argv
if restore_main:
sys.modules['__main__'] = restore_main
else:
# Remove from sys.modules the reference to main_mod we'd
# added. Otherwise it will trap references to objects
# contained therein.
del sys.modules[main_mod_name]
return stats
def _run_with_debugger(self, code, code_ns, filename=None,
bp_line=None, bp_file=None):
"""
Run `code` in debugger with a break point.
Parameters
----------
code : str
Code to execute.
code_ns : dict
A namespace in which `code` is executed.
filename : str
`code` is ran as if it is in `filename`.
bp_line : int, optional
Line number of the break point.
bp_file : str, optional
Path to the file in which break point is specified.
`filename` is used if not given.
Raises
------
UsageError
If the break point given by `bp_line` is not valid.
"""
deb = self.shell.InteractiveTB.pdb
if not deb:
self.shell.InteractiveTB.pdb = self.shell.InteractiveTB.debugger_cls()
deb = self.shell.InteractiveTB.pdb
# deb.checkline() fails if deb.curframe exists but is None; it can
# handle it not existing. https://github.com/ipython/ipython/issues/10028
if hasattr(deb, 'curframe'):
del deb.curframe
# reset Breakpoint state, which is moronically kept
# in a class
bdb.Breakpoint.next = 1
bdb.Breakpoint.bplist = {}
bdb.Breakpoint.bpbynumber = [None]
deb.clear_all_breaks()
if bp_line is not None:
# Set an initial breakpoint to stop execution
maxtries = 10
bp_file = bp_file or filename
checkline = deb.checkline(bp_file, bp_line)
if not checkline:
for bp in range(bp_line + 1, bp_line + maxtries + 1):
if deb.checkline(bp_file, bp):
break
else:
msg = ("\nI failed to find a valid line to set "
"a breakpoint\n"
"after trying up to line: %s.\n"
"Please set a valid breakpoint manually "
"with the -b option." % bp)
raise UsageError(msg)
# if we find a good linenumber, set the breakpoint
deb.do_break('%s:%s' % (bp_file, bp_line))
if filename:
# Mimic Pdb._runscript(...)
deb._wait_for_mainpyfile = True
deb.mainpyfile = deb.canonic(filename)
# Start file run
print("NOTE: Enter 'c' at the %s prompt to continue execution." % deb.prompt)
try:
if filename:
# save filename so it can be used by methods on the deb object
deb._exec_filename = filename
while True:
try:
deb.run(code, code_ns)
except Restart:
print("Restarting")
if filename:
deb._wait_for_mainpyfile = True
deb.mainpyfile = deb.canonic(filename)
continue
else:
break
except:
etype, value, tb = sys.exc_info()
# Skip three frames in the traceback: the %run one,
# one inside bdb.py, and the command-line typed by the
# user (run by exec in pdb itself).
self.shell.InteractiveTB(etype, value, tb, tb_offset=3)
@staticmethod
def _run_with_timing(run, nruns):
"""
Run function `run` and print timing information.
Parameters
----------
run : callable
Any callable object which takes no argument.
nruns : int
Number of times to execute `run`.
"""
twall0 = time.time()
if nruns == 1:
t0 = clock2()
run()
t1 = clock2()
t_usr = t1[0] - t0[0]
t_sys = t1[1] - t0[1]
print("\nIPython CPU timings (estimated):")
print(" User : %10.2f s." % t_usr)
print(" System : %10.2f s." % t_sys)
else:
runs = range(nruns)
t0 = clock2()
for nr in runs:
run()
t1 = clock2()
t_usr = t1[0] - t0[0]
t_sys = t1[1] - t0[1]
print("\nIPython CPU timings (estimated):")
print("Total runs performed:", nruns)
print(" Times : %10s %10s" % ('Total', 'Per run'))
print(" User : %10.2f s, %10.2f s." % (t_usr, t_usr / nruns))
print(" System : %10.2f s, %10.2f s." % (t_sys, t_sys / nruns))
twall1 = time.time()
print("Wall time: %10.2f s." % (twall1 - twall0))
@skip_doctest
@line_cell_magic
@needs_local_scope
def timeit(self, line='', cell=None, local_ns=None):
"""Time execution of a Python statement or expression
Usage, in line mode:
%timeit [-n<N> -r<R> [-t|-c] -q -p<P> -o] statement
or in cell mode:
%%timeit [-n<N> -r<R> [-t|-c] -q -p<P> -o] setup_code
code
code...
Time execution of a Python statement or expression using the timeit
module. This function can be used both as a line and cell magic:
- In line mode you can time a single-line statement (though multiple
ones can be chained with using semicolons).
- In cell mode, the statement in the first line is used as setup code
(executed but not timed) and the body of the cell is timed. The cell
body has access to any variables created in the setup code.
Options:
-n<N>: execute the given statement <N> times in a loop. If this value
is not given, a fitting value is chosen.
-r<R>: repeat the loop iteration <R> times and take the best result.
Default: 3
-t: use time.time to measure the time, which is the default on Unix.
This function measures wall time.
-c: use time.clock to measure the time, which is the default on
Windows and measures wall time. On Unix, resource.getrusage is used
instead and returns the CPU user time.
-p<P>: use a precision of <P> digits to display the timing result.
Default: 3
-q: Quiet, do not print result.
-o: return a TimeitResult that can be stored in a variable to inspect
the result in more details.
Examples
--------
::
In [1]: %timeit pass
8.26 ns ± 0.12 ns per loop (mean ± std. dev. of 7 runs, 100000000 loops each)
In [2]: u = None
In [3]: %timeit u is None
29.9 ns ± 0.643 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each)
In [4]: %timeit -r 4 u == None
In [5]: import time
In [6]: %timeit -n1 time.sleep(2)
The times reported by %timeit will be slightly higher than those
reported by the timeit.py script when variables are accessed. This is
due to the fact that %timeit executes the statement in the namespace
of the shell, compared with timeit.py, which uses a single setup
statement to import function or create variables. Generally, the bias
does not matter as long as results from timeit.py are not mixed with
those from %timeit."""
opts, stmt = self.parse_options(line,'n:r:tcp:qo',
posix=False, strict=False)
if stmt == "" and cell is None:
return
timefunc = timeit.default_timer
number = int(getattr(opts, "n", 0))
default_repeat = 7 if timeit.default_repeat < 7 else timeit.default_repeat
repeat = int(getattr(opts, "r", default_repeat))
precision = int(getattr(opts, "p", 3))
quiet = 'q' in opts
return_result = 'o' in opts
if hasattr(opts, "t"):
timefunc = time.time
if hasattr(opts, "c"):
timefunc = clock
timer = Timer(timer=timefunc)
# this code has tight coupling to the inner workings of timeit.Timer,
# but is there a better way to achieve that the code stmt has access
# to the shell namespace?
transform = self.shell.input_splitter.transform_cell
if cell is None:
# called as line magic
ast_setup = self.shell.compile.ast_parse("pass")
ast_stmt = self.shell.compile.ast_parse(transform(stmt))
else:
ast_setup = self.shell.compile.ast_parse(transform(stmt))
ast_stmt = self.shell.compile.ast_parse(transform(cell))
ast_setup = self.shell.transform_ast(ast_setup)
ast_stmt = self.shell.transform_ast(ast_stmt)
# Check that these compile to valid Python code *outside* the timer func
# Invalid code may become valid when put inside the function & loop,
# which messes up error messages.
# https://github.com/ipython/ipython/issues/10636
self.shell.compile(ast_setup, "<magic-timeit-setup>", "exec")
self.shell.compile(ast_stmt, "<magic-timeit-stmt>", "exec")
# This codestring is taken from timeit.template - we fill it in as an
# AST, so that we can apply our AST transformations to the user code
# without affecting the timing code.
timeit_ast_template = ast.parse('def inner(_it, _timer):\n'
' setup\n'
' _t0 = _timer()\n'
' for _i in _it:\n'
' stmt\n'
' _t1 = _timer()\n'
' return _t1 - _t0\n')
timeit_ast = TimeitTemplateFiller(ast_setup, ast_stmt).visit(timeit_ast_template)
timeit_ast = ast.fix_missing_locations(timeit_ast)
# Track compilation time so it can be reported if too long
# Minimum time above which compilation time will be reported
tc_min = 0.1
t0 = clock()
code = self.shell.compile(timeit_ast, "<magic-timeit>", "exec")
tc = clock()-t0
ns = {}
glob = self.shell.user_ns
# handles global vars with same name as local vars. We store them in conflict_globs.
if local_ns is not None:
conflict_globs = {}
for var_name, var_val in glob.items():
if var_name in local_ns:
conflict_globs[var_name] = var_val
glob.update(local_ns)
exec(code, glob, ns)
timer.inner = ns["inner"]
# This is used to check if there is a huge difference between the
# best and worst timings.
# Issue: https://github.com/ipython/ipython/issues/6471
if number == 0:
# determine number so that 0.2 <= total time < 2.0
for index in range(0, 10):
number = 10 ** index
time_number = timer.timeit(number)
if time_number >= 0.2:
break
all_runs = timer.repeat(repeat, number)
best = min(all_runs) / number
worst = max(all_runs) / number
timeit_result = TimeitResult(number, repeat, best, worst, all_runs, tc, precision)
# Restore global vars from conflict_globs
if local_ns is not None:
if len(conflict_globs) > 0:
glob.update(conflict_globs)
if not quiet :
# Check best timing is greater than zero to avoid a
# ZeroDivisionError.
# In cases where the slowest timing is lesser than a micosecond
# we assume that it does not really matter if the fastest
# timing is 4 times faster than the slowest timing or not.
if worst > 4 * best and best > 0 and worst > 1e-6:
print("The slowest run took %0.2f times longer than the "
"fastest. This could mean that an intermediate result "
"is being cached." % (worst / best))
print( timeit_result )
if tc > tc_min:
print("Compiler time: %.2f s" % tc)
if return_result:
return timeit_result
@skip_doctest
@needs_local_scope
@line_cell_magic
def time(self,line='', cell=None, local_ns=None):
"""Time execution of a Python statement or expression.
The CPU and wall clock times are printed, and the value of the
expression (if any) is returned. Note that under Win32, system time
is always reported as 0, since it can not be measured.
This function can be used both as a line and cell magic:
- In line mode you can time a single-line statement (though multiple
ones can be chained with using semicolons).
- In cell mode, you can time the cell body (a directly
following statement raises an error).
This function provides very basic timing functionality. Use the timeit
magic for more control over the measurement.
Examples
--------
::
In [1]: %time 2**128
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00
Out[1]: 340282366920938463463374607431768211456L
In [2]: n = 1000000
In [3]: %time sum(range(n))
CPU times: user 1.20 s, sys: 0.05 s, total: 1.25 s
Wall time: 1.37
Out[3]: 499999500000L
In [4]: %time print 'hello world'
hello world
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00
Note that the time needed by Python to compile the given expression
will be reported if it is more than 0.1s. In this example, the
actual exponentiation is done by Python at compilation time, so while
the expression can take a noticeable amount of time to compute, that
time is purely due to the compilation:
In [5]: %time 3**9999;
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00 s
In [6]: %time 3**999999;
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00 s
Compiler : 0.78 s
"""
# fail immediately if the given expression can't be compiled
if line and cell:
raise UsageError("Can't use statement directly after '%%time'!")
if cell:
expr = self.shell.input_transformer_manager.transform_cell(cell)
else:
expr = self.shell.input_transformer_manager.transform_cell(line)
# Minimum time above which parse time will be reported
tp_min = 0.1
t0 = clock()
expr_ast = self.shell.compile.ast_parse(expr)
tp = clock()-t0
# Apply AST transformations
expr_ast = self.shell.transform_ast(expr_ast)
# Minimum time above which compilation time will be reported
tc_min = 0.1
if len(expr_ast.body)==1 and isinstance(expr_ast.body[0], ast.Expr):
mode = 'eval'
source = '<timed eval>'
expr_ast = ast.Expression(expr_ast.body[0].value)
else:
mode = 'exec'
source = '<timed exec>'
t0 = clock()
code = self.shell.compile(expr_ast, source, mode)
tc = clock()-t0
# skew measurement as little as possible
glob = self.shell.user_ns
wtime = time.time
# time execution
wall_st = wtime()
if mode=='eval':
st = clock2()
try:
out = eval(code, glob, local_ns)
except:
self.shell.showtraceback()
return
end = clock2()
else:
st = clock2()
try:
exec(code, glob, local_ns)
except:
self.shell.showtraceback()
return
end = clock2()
out = None
wall_end = wtime()
# Compute actual times and report
wall_time = wall_end-wall_st
cpu_user = end[0]-st[0]
cpu_sys = end[1]-st[1]
cpu_tot = cpu_user+cpu_sys
# On windows cpu_sys is always zero, so no new information to the next print
if sys.platform != 'win32':
print("CPU times: user %s, sys: %s, total: %s" % \
(_format_time(cpu_user),_format_time(cpu_sys),_format_time(cpu_tot)))
print("Wall time: %s" % _format_time(wall_time))
if tc > tc_min:
print("Compiler : %s" % _format_time(tc))
if tp > tp_min:
print("Parser : %s" % _format_time(tp))
return out
@skip_doctest
@line_magic
def macro(self, parameter_s=''):
"""Define a macro for future re-execution. It accepts ranges of history,
filenames or string objects.
Usage:\\
%macro [options] name n1-n2 n3-n4 ... n5 .. n6 ...
Options:
-r: use 'raw' input. By default, the 'processed' history is used,
so that magics are loaded in their transformed version to valid
Python. If this option is given, the raw input as typed at the
command line is used instead.
-q: quiet macro definition. By default, a tag line is printed
to indicate the macro has been created, and then the contents of
the macro are printed. If this option is given, then no printout
is produced once the macro is created.
This will define a global variable called `name` which is a string
made of joining the slices and lines you specify (n1,n2,... numbers
above) from your input history into a single string. This variable
acts like an automatic function which re-executes those lines as if
you had typed them. You just type 'name' at the prompt and the code
executes.
The syntax for indicating input ranges is described in %history.
Note: as a 'hidden' feature, you can also use traditional python slice
notation, where N:M means numbers N through M-1.
For example, if your history contains (print using %hist -n )::
44: x=1
45: y=3
46: z=x+y
47: print x
48: a=5
49: print 'x',x,'y',y
you can create a macro with lines 44 through 47 (included) and line 49
called my_macro with::
In [55]: %macro my_macro 44-47 49
Now, typing `my_macro` (without quotes) will re-execute all this code
in one pass.
You don't need to give the line-numbers in order, and any given line
number can appear multiple times. You can assemble macros with any
lines from your input history in any order.
The macro is a simple object which holds its value in an attribute,
but IPython's display system checks for macros and executes them as
code instead of printing them when you type their name.
You can view a macro's contents by explicitly printing it with::
print macro_name
"""
opts,args = self.parse_options(parameter_s,'rq',mode='list')
if not args: # List existing macros
return sorted(k for k,v in self.shell.user_ns.items() if isinstance(v, Macro))
if len(args) == 1:
raise UsageError(
"%macro insufficient args; usage '%macro name n1-n2 n3-4...")
name, codefrom = args[0], " ".join(args[1:])
#print 'rng',ranges # dbg
try:
lines = self.shell.find_user_code(codefrom, 'r' in opts)
except (ValueError, TypeError) as e:
print(e.args[0])
return
macro = Macro(lines)
self.shell.define_macro(name, macro)
if not ( 'q' in opts) :
print('Macro `%s` created. To execute, type its name (without quotes).' % name)
print('=== Macro contents: ===')
print(macro, end=' ')
@magic_arguments.magic_arguments()
@magic_arguments.argument('output', type=str, default='', nargs='?',
help="""The name of the variable in which to store output.
This is a utils.io.CapturedIO object with stdout/err attributes
for the text of the captured output.
CapturedOutput also has a show() method for displaying the output,
and __call__ as well, so you can use that to quickly display the
output.
If unspecified, captured output is discarded.
"""
)
@magic_arguments.argument('--no-stderr', action="store_true",
help="""Don't capture stderr."""
)
@magic_arguments.argument('--no-stdout', action="store_true",
help="""Don't capture stdout."""
)
@magic_arguments.argument('--no-display', action="store_true",
help="""Don't capture IPython's rich display."""
)
@cell_magic
def capture(self, line, cell):
"""run the cell, capturing stdout, stderr, and IPython's rich display() calls."""
args = magic_arguments.parse_argstring(self.capture, line)
out = not args.no_stdout
err = not args.no_stderr
disp = not args.no_display
with capture_output(out, err, disp) as io:
self.shell.run_cell(cell)
if args.output:
self.shell.user_ns[args.output] = io
def parse_breakpoint(text, current_file):
'''Returns (file, line) for file:line and (current_file, line) for line'''
colon = text.find(':')
if colon == -1:
return current_file, int(text)
else:
return text[:colon], int(text[colon+1:])
def _format_time(timespan, precision=3):
"""Formats the timespan in a human readable form"""
if timespan >= 60.0:
# we have more than a minute, format that in a human readable form
# Idea from http://snipplr.com/view/5713/
parts = [("d", 60*60*24),("h", 60*60),("min", 60), ("s", 1)]
time = []
leftover = timespan
for suffix, length in parts:
value = int(leftover / length)
if value > 0:
leftover = leftover % length
time.append(u'%s%s' % (str(value), suffix))
if leftover < 1:
break
return " ".join(time)
# Unfortunately the unicode 'micro' symbol can cause problems in
# certain terminals.
# See bug: https://bugs.launchpad.net/ipython/+bug/348466
# Try to prevent crashes by being more secure than it needs to
# E.g. eclipse is able to print a µ, but has no sys.stdout.encoding set.
units = [u"s", u"ms",u'us',"ns"] # the save value
if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding:
try:
u'\xb5'.encode(sys.stdout.encoding)
units = [u"s", u"ms",u'\xb5s',"ns"]
except:
pass
scaling = [1, 1e3, 1e6, 1e9]
if timespan > 0.0:
order = min(-int(math.floor(math.log10(timespan)) // 3), 3)
else:
order = 3
return u"%.*g %s" % (precision, timespan * scaling[order], units[order])
|
Foxfanmedium/python_training
|
OnlineCoursera/mail_ru/Python_1/env/Lib/site-packages/IPython/core/magics/execution.py
|
Python
|
apache-2.0
| 54,460
|
[
"VisIt"
] |
1d1a05a8b73f6263e725382e537af048a6825bbaaa35218fa35cff98d28503a0
|
from stencil_kernel import *
import sys
import numpy
import math
width = int(sys.argv[2])
height = int(sys.argv[3])
image_in = open(sys.argv[1], 'rb')
stdev_d = 3
radius = stdev_d * 3
class Kernel(StencilKernel):
def run(self, in_grid, out_grid):
self.kernel(in_grid, self.gaussian(stdev_d, radius*2), self.gaussian(70, 256), out_grid)
def gaussian(self, stdev, length):
result = StencilGrid([length])
scale = 1.0/(stdev*math.sqrt(2.0*math.pi))
divisor = -1.0 / (2.0 * stdev * stdev)
for x in xrange(length):
result[x] = scale * math.exp(float(x) * float(x) * divisor)
return result
def kernel(self, in_img, filter_d, filter_s, out_img):
for x in out_img.interior_points():
for y in in_img.neighbors(x, 1):
out_img[x] += in_img[y] * filter_d[int(distance(x, y))] * filter_s[abs(in_img[x] - in_img[y])]
pixels = map(ord, list(image_in.read(width * height))) # Read in grayscale values
intensity = float(sum(pixels))/len(pixels)
kernel = Kernel()
kernel.should_unroll = False
out_grid = StencilGrid([width,height])
out_grid.ghost_depth = radius
in_grid = StencilGrid([width,height])
in_grid.ghost_depth = radius
for x in range(-radius,radius+1):
for y in range(-radius,radius+1):
in_grid.neighbor_definition[1].append( (x,y) )
for x in range(0,width):
for y in range(0,height):
in_grid.data[(x, y)] = pixels[y * width + x]
kernel.run(in_grid, out_grid)
for x in range(0,width):
for y in range(0,height):
pixels[y * width + x] = out_grid.data[(x, y)]
out_intensity = float(sum(pixels))/len(pixels)
for i in range(0, len(pixels)):
pixels[i] = min(255, max(0, int(pixels[i] * (intensity/out_intensity))))
image_out = open(sys.argv[4], 'wb')
image_out.write(''.join(map(chr, pixels)))
|
richardxia/asp-multilevel-debug
|
specializers/stencil/demo/mallard/gaussian_filter.py
|
Python
|
bsd-3-clause
| 1,835
|
[
"Gaussian"
] |
883e75c97c47c4e745e85d40ccc63204cc4b3902289a0e2e678e97d7ad8f2bb0
|
"""Bulk Al(bcc) test"""
from math import sqrt
from ase import Atoms
from ase.visualize import view
from gpaw import GPAW
afcc = 3.985 # Theoretical fcc lattice parameter
a = afcc * 2**(-1/3.) # Assuming the same volume per atom
a = afcc * sqrt(2/3.) # Assuming the same nearest neighbor distance
bulk = Atoms(symbols='2Al',
positions=[(0, 0, 0),
(.5, .5, .5)],
pbc=True)
bulk.set_cell((a, a, a), scale_atoms=True)
# View 3x3x3 repeated structure
view(bulk * [3, 3, 3])
calc = GPAW(nbands=8)
bulk.set_calculator(calc)
# Convergence with respect to k-points:
calc.set(h=.25, txt='Al-fcc-k.txt')
for k in [4, 6, 8, 10]:
calc.set(kpts=(k, k, k))
print k, bulk.get_potential_energy()
# Convergence with respect to grid spacing:
calc.set(kpts=(8, 8, 8), txt='Al-bcc-h.txt')
for g in [12, 16, 20]:
h = a / g
calc.set(h=h)
print h, bulk.get_potential_energy()
# Set parameters to reasonably converged values
calc.set(h=.28, kpts=(8, 8, 8))
for a in [3.0, 3.1, 3.2, 3.3]:
calc.set(txt='bulk-bcc-a%.1f.txt' % a)
bulk.set_cell((a, a, a), scale_atoms=True)
print a, bulk.get_potential_energy()
# run: ag bulk-bcc*.txt
# Choose 'Tools -> Bulk Modulus' to get
# B = 74.633 GPa, and
# V = 32.472 A^3 <=> a = 3.190
# To be compared to the fcc values:
# B = 85.823 GPa, and
# V = 63.270 A^3 <=> a = 3.985
|
qsnake/gpaw
|
doc/exercises/aluminium/Al_bcc.py
|
Python
|
gpl-3.0
| 1,398
|
[
"ASE",
"GPAW"
] |
0caf4a41987e313d22d7012e4cf65ffcaf9a52c4ac2b90ca40e36011709bac0c
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
OTBUtils.py
---------------------
Date : 11-12-13
Copyright : (C) 2013 by CS Systemes d'information (CS SI)
Email : otb at c-s dot fr (CS SI)
Contributors : Julien Malik (CS SI) - creation of otbspecific
Oscar Picas (CS SI) -
Alexia Mondot (CS SI) - split otbspecific into 2 files
add functions
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
When QGIS is run, OTB algorithms are created according to xml files from description/ directory.
"""
__author__ = 'Julien Malik, Oscar Picas, Alexia Mondot'
__date__ = 'December 2013'
__copyright__ = '(C) 2013, CS Systemes d\'information (CS SI)'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
__version__ = "3.8"
import copy
try:
import processing
except ImportError, e:
raise Exception("Processing must be installed and available in PYTHONPATH")
try:
import otbApplication
except ImportError, e:
raise Exception("OTB python plugins must be installed and available in PYTHONPATH")
from processing.algs.otb.OTBUtils import (renameValueField,
remove_dependant_choices,
remove_other_choices,
remove_parameter_by_key,
defaultSplit,
split_by_choice,
defaultWrite,
remove_choice,
remove_independant_choices )
def getBinaryMorphologicalOperation(available_app, original_dom_document):
"""
Let ball as only available structype.
Split the application according to its filter dilate, erode, opening, closing.
"""
the_root = original_dom_document
renameValueField(the_root, 'structype.ball.xradius', 'name', 'The Structuring Element Radius')
renameValueField(the_root, 'structype.ball.xradius', 'description', 'The Structuring Element Radius')
remove_dependant_choices(the_root, 'structype', 'ball')
remove_other_choices(the_root, 'structype', 'ball')
remove_dependant_choices(the_root, 'filter', 'dilate')
remove_parameter_by_key(the_root, 'structype.ball.yradius')
#defaultWrite(available_app, the_root)
the_list = defaultSplit(available_app, the_root, 'filter')
return the_list
def getEdgeExtraction(available_app, original_dom_document):
"""
Let ball as only available filter (not an oval).
Split the application according to its filter gradient, sobel, touzi.
"""
the_root = original_dom_document
renameValueField(the_root, 'filter.touzi.xradius', 'name', 'The Radius')
renameValueField(the_root, 'filter.touzi.xradius', 'description', 'The Radius')
remove_parameter_by_key(the_root, 'filter.touzi.yradius')
split = split_by_choice(the_root, 'filter')
the_list = []
for key in split:
defaultWrite('%s-%s' % (available_app, key), split[key])
the_list.append(split[key])
return the_list
def getGrayScaleMorphologicalOperation(available_app, original_dom_document):
"""
Let ball as only available structype.
Split the application according to its filter dilate, erode, opening, closing.
"""
the_root = original_dom_document
renameValueField(the_root, 'structype.ball.xradius', 'name', 'The Structuring Element Radius')
renameValueField(the_root, 'structype.ball.xradius', 'description', 'The Structuring Element Radius')
remove_dependant_choices(the_root, 'structype', 'ball')
remove_other_choices(the_root, 'structype', 'ball')
remove_parameter_by_key(the_root, 'structype.ball.yradius')
split = defaultSplit(available_app, the_root, 'filter')
return split
def getOrthoRectification(available_app, original_dom_document):
"""
Let only mode auto.
Remove all parameters which should be updated once the input file given.
Split by SRS : EPSG, fit to ortho, lambert-wgs84 and UTM.
Each of these SRS have their own parameters modified in this fonction.
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
the_root = original_dom_document
remove_choice(the_root, 'outputs.mode', 'auto')
remove_independant_choices(the_root, 'outputs.mode', 'auto')
remove_choice(the_root, 'outputs.mode', 'outputroi')
remove_independant_choices(the_root, 'outputs.mode', 'outputroi')
remove_parameter_by_key(the_root, 'outputs.ulx')
remove_parameter_by_key(the_root, 'outputs.uly')
remove_parameter_by_key(the_root, 'outputs.sizex')
remove_parameter_by_key(the_root, 'outputs.sizey')
remove_parameter_by_key(the_root, 'outputs.spacingx')
remove_parameter_by_key(the_root, 'outputs.spacingy')
remove_parameter_by_key(the_root, 'outputs.lrx')
remove_parameter_by_key(the_root, 'outputs.lry')
remove_parameter_by_key(the_root, 'opt.rpc')
deleteGeoidSrtm(the_root)
remove_parameter_by_key(the_root, 'outputs.isotropic')
emptyMap = copy.deepcopy(the_root)
remove_parameter_by_key(the_root, 'outputs.ortho')
remove_choice(the_root, 'outputs.mode', 'orthofit')
remove_independant_choices(the_root, 'outputs.mode', 'orthofit')
merged = copy.deepcopy(the_root)
split = split_by_choice(the_root, 'map')
the_list = []
for key in split:
if key == 'utm':
the_doc = split[key]
remove_parameter_by_key(the_doc, 'map.epsg.code')
defaultWrite('%s-%s' % (available_app, key), the_doc)
the_list.append(the_doc)
elif key == 'epsg':
the_doc = split[key]
remove_parameter_by_key(the_doc, 'map.utm.northhem')
remove_parameter_by_key(the_doc, 'map.utm.zone')
defaultWrite('%s-%s' % (available_app, key), the_doc)
the_list.append(the_doc)
remove_choice(merged, 'map', 'utm')
remove_choice(merged, 'map', 'epsg')
remove_parameter_by_key(merged, 'map.epsg.code')
remove_parameter_by_key(merged, 'map.utm.northhem')
remove_parameter_by_key(merged, 'map.utm.zone')
old_app_name = merged.find('key').text
merged.find('key').text = '%s-%s' % (old_app_name, 'lambert-WGS84')
merged.find('longname').text = '%s (%s)' % (old_app_name, 'lambert-WGS84')
defaultWrite('%s-%s' % (available_app, 'lambert-WGS84'), merged)
the_list.append(merged)
remove_parameter_by_key(emptyMap, 'map')
remove_parameter_by_key(emptyMap, 'map.epsg.code')
remove_parameter_by_key(emptyMap, 'map.utm.northhem')
remove_parameter_by_key(emptyMap, 'map.utm.zone')
remove_choice(emptyMap, 'outputs.mode', 'autosize')
remove_independant_choices(emptyMap, 'outputs.mode', 'autosize')
remove_choice(emptyMap, 'outputs.mode', 'autospacing')
remove_independant_choices(emptyMap, 'outputs.mode', 'autospacing')
old_app_name = emptyMap.find('key').text
emptyMap.find('key').text = '%s-%s' % (old_app_name, 'fit-to-ortho')
emptyMap.find('longname').text = '%s (%s)' % (old_app_name, 'fit-to-ortho')
defaultWrite('%s-%s' % (available_app, 'fit-to-ortho'), emptyMap)
the_list.append(emptyMap)
return the_list
def getDimensionalityReduction(available_app, original_dom_document):
"""
Remove rescale.outmin and rescale.outmax and split by method (ica, maf, napca and pca) and adjust parameters of each resulting app.
"""
the_root = original_dom_document
remove_parameter_by_key(the_root, 'rescale.outmin')
remove_parameter_by_key(the_root, 'rescale.outmax')
split = split_by_choice(the_root, 'method')
the_list = []
for key in split:
if key == 'maf':
the_doc = split[key]
remove_parameter_by_key(the_doc, 'outinv')
defaultWrite('%s-%s' % (available_app, key), the_doc)
the_list.append(the_doc)
else:
defaultWrite('%s-%s' % (available_app, key), split[key])
the_list.append(split[key])
return the_list
def getPansharpening(available_app, original_dom_document):
"""
Split by method (bayes, lmvm, rcs)
"""
the_root = original_dom_document
split = split_by_choice(the_root, 'method')
the_list = []
for key in split:
defaultWrite('%s-%s' % (available_app, key), split[key])
the_list.append(split[key])
return the_list
def getPixelValue(available_app, original_dom_document):
the_root = original_dom_document
remove_parameter_by_key(the_root, 'cl')
defaultWrite(available_app, the_root)
return [the_root]
def getExtractROI(available_app, original_dom_document):
"""
Split by mode (standard, fit)
Adapt parameters of each resulting app.
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
the_root = original_dom_document
remove_parameter_by_key(the_root, 'cl')
deleteGeoidSrtm(the_root)
split = split_by_choice(the_root, 'mode')
the_list = []
for key in split:
if key == 'standard':
the_doc = split[key]
remove_parameter_by_key(the_doc, 'mode.fit.elev.dem')
remove_parameter_by_key(the_doc, 'mode.fit.elev.geoid')
remove_parameter_by_key(the_doc, 'mode.fit.elev.default')
remove_parameter_by_key(the_doc, 'mode.fit.ref')
defaultWrite('%s-%s' % (available_app, key), the_doc)
the_list.append(the_doc)
else:
#key == 'fit'
the_doc = split[key]
remove_parameter_by_key(the_doc, 'startx')
remove_parameter_by_key(the_doc, 'starty')
remove_parameter_by_key(the_doc, 'sizex')
remove_parameter_by_key(the_doc, 'sizey')
defaultWrite('%s-%s' % (available_app, key), the_doc)
the_list.append(split[key])
return the_list
def getQuicklook(available_app, original_dom_document):
the_root = original_dom_document
remove_parameter_by_key(the_root, 'cl')
defaultWrite(available_app, the_root)
return [the_root]
def getRigidTransformResample(available_app, original_dom_document):
"""
split by transformation (id, rotation, translation)
"""
the_root = original_dom_document
split = split_by_choice(the_root, 'transform.type')
the_list = []
for key in split:
defaultWrite('%s-%s' % (available_app, key), split[key])
the_list.append(split[key])
return the_list
def getHomologousPointsExtraction(available_app, original_dom_document):
the_list = defaultSplit(available_app, original_dom_document, 'mode')
return the_list
def getGenerateRPCSensorModel(available_app, original_dom_document):
the_root = original_dom_document
remove_dependant_choices(the_root, 'map', 'wgs')
remove_other_choices(the_root, 'map', 'wgs')
defaultWrite(available_app, the_root)
return [the_root]
def getRefineSensorModel(available_app, original_dom_document):
the_root = original_dom_document
remove_dependant_choices(the_root, 'map', 'wgs')
remove_other_choices(the_root, 'map', 'wgs')
defaultWrite(available_app, the_root)
return [the_root]
def getSegmentation(available_app, original_dom_document):
"""
Remove the choice raster and split by filter (cc, edison, meanshift, mprofiles, watershed)
"""
the_root = original_dom_document
#remove_choice(the_root, 'filter', 'edison')
#remove_independant_choices(the_root, 'filter', 'edison')
#remove_choice(the_root, 'filter', 'meanshift')
#remove_independant_choices(the_root, 'filter', 'meanshift')
remove_choice(the_root, 'mode', 'raster')
remove_independant_choices(the_root, 'mode', 'raster')
split = split_by_choice(the_root, 'filter')
the_list = []
for key in split:
defaultWrite('%s-%s' % (available_app, key), split[key])
the_list.append(split[key])
return the_list
def getKMeansClassification(available_app, original_dom_document):
the_root = original_dom_document
remove_parameter_by_key(the_root, 'rand')
defaultWrite(available_app, the_root)
return [the_root]
def getTrainSVMImagesClassifier(available_app, original_dom_document):
the_root = original_dom_document
remove_parameter_by_key(the_root, 'rand')
defaultWrite(available_app, the_root)
return [the_root]
def getComputeConfusionMatrix(available_app, original_dom_document):
"""
Split by ref (raster, vector)
"""
the_root = original_dom_document
#remove_independant_choices(the_root, 'ref', 'vector')
#remove_choice(the_root, 'ref', 'vector')
#defaultWrite(available_app, the_root)
split = split_by_choice(the_root, 'ref')
the_list = []
for key in split:
defaultWrite('%s-%s' % (available_app, key), split[key])
the_list.append(split[key])
return the_list
return [the_root]
def getOpticalCalibration(available_app, original_dom_document):
"""
Remove toc options (let toa) and remove all about atmo
"""
#the_list = defaultSplit(available_app, original_dom_document, 'level')
the_root = original_dom_document
remove_independant_choices(the_root, 'level', 'toc')
remove_choice(the_root, 'level', 'toc')
remove_parameter_by_key(the_root, 'atmo.aerosol')
remove_parameter_by_key(the_root, 'atmo.oz')
remove_parameter_by_key(the_root, 'atmo.wa')
remove_parameter_by_key(the_root, 'atmo.pressure')
remove_parameter_by_key(the_root, 'atmo.opt')
remove_parameter_by_key(the_root, 'atmo.aeronet')
remove_parameter_by_key(the_root, 'radius')
defaultWrite(available_app, the_root)
return [the_root]
def getSarRadiometricCalibration(available_app, original_dom_document):
# TODO ** before doing anything, check support for SAR data in Qgis
the_root = original_dom_document
defaultWrite(available_app, the_root)
return [the_root]
def getSmoothing(available_app, original_dom_document):
"""
Split by type (anidif, gaussian, mean)
"""
#import copy
#the_root = copy.deepcopy(original_dom_document)
#remove_dependant_choices(the_root, 'type', 'anidif')
#remove_other_choices(the_root, 'type', 'anidif')
#defaultWrite('%s-anidif' % available_app, the_root)
#the_root = copy.deepcopy(original_dom_document)
#remove_independant_choices(the_root, 'type', 'anidif')
#remove_choice(the_root, 'type', 'anidif')
#defaultWrite(available_app, the_root)
the_root = original_dom_document
split = split_by_choice(the_root, 'type')
the_list = []
for key in split:
defaultWrite('%s-%s' % (available_app, key), split[key])
the_list.append(split[key])
return the_list
#split = split_by_choice(the_root, 'type')
#the_list = []
#for key in split:
# defaultWrite('%s-%s' % (available_app, key), split[key])
# the_list.append(split[key])
#return the_list
def getColorMapping(available_app, original_dom_document):
"""
Remove the option colortolabel
Split by method : custom, continuous, optimal and image and adapt parameters of each resulting app
"""
the_root = original_dom_document
remove_independant_choices(the_root, 'op', 'colortolabel')
remove_choice(the_root, 'op', 'colortolabel')
split = split_by_choice(the_root, 'method')
the_list = []
for key in split:
if key == 'custom':
the_doc = split[key]
remove_parameter_by_key(the_doc, 'method.continuous.lut')
remove_parameter_by_key(the_doc, 'method.continuous.min')
remove_parameter_by_key(the_doc, 'method.continuous.max')
remove_parameter_by_key(the_doc, 'method.optimal.background')
remove_parameter_by_key(the_doc, 'method.image.in')
remove_parameter_by_key(the_doc, 'method.image.low')
remove_parameter_by_key(the_doc, 'method.image.up')
defaultWrite('%s-%s' % (available_app, key), the_doc)
the_list.append(the_doc)
elif key == 'continuous':
the_doc = split[key]
remove_parameter_by_key(the_doc, 'method.custom.lut')
remove_parameter_by_key(the_doc, 'method.optimal.background')
remove_parameter_by_key(the_doc, 'method.image.in')
remove_parameter_by_key(the_doc, 'method.image.low')
remove_parameter_by_key(the_doc, 'method.image.up')
defaultWrite('%s-%s' % (available_app, key), the_doc)
the_list.append(the_doc)
elif key == 'optimal':
the_doc = split[key]
remove_parameter_by_key(the_doc, 'method.custom.lut')
remove_parameter_by_key(the_doc, 'method.continuous.lut')
remove_parameter_by_key(the_doc, 'method.continuous.min')
remove_parameter_by_key(the_doc, 'method.continuous.max')
remove_parameter_by_key(the_doc, 'method.image.in')
remove_parameter_by_key(the_doc, 'method.image.low')
remove_parameter_by_key(the_doc, 'method.image.up')
defaultWrite('%s-%s' % (available_app, key), the_doc)
the_list.append(the_doc)
else:
#key == 'image'
the_doc = split[key]
remove_parameter_by_key(the_doc, 'method.custom.lut')
remove_parameter_by_key(the_doc, 'method.continuous.lut')
remove_parameter_by_key(the_doc, 'method.continuous.min')
remove_parameter_by_key(the_doc, 'method.continuous.max')
remove_parameter_by_key(the_doc, 'method.optimal.background')
defaultWrite('%s-%s' % (available_app, key), the_doc)
the_list.append(split[key])
return the_list
def getFusionOfClassifications(available_app, original_dom_document):
"""
Split by method of fusion of classification (dempstershafer, majorityvoting)
"""
the_root = original_dom_document
split = split_by_choice(the_root, 'method')
the_list = []
for key in split:
defaultWrite('%s-%s' % (available_app, key), split[key])
the_list.append(split[key])
return the_list
def getTrainImagesClassifier(available_app, original_dom_document):
"""
Split by classifier (ann, bayes, boost, dt, gbt, knn, libsvm, rf, svm)
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
the_root = original_dom_document
deleteGeoidSrtm(the_root)
split = split_by_choice(the_root, 'classifier')
the_list = []
for key in split:
defaultWrite('%s-%s' % (available_app, key), split[key])
the_list.append(split[key])
return the_list
def getLineSegmentDetection(available_app, original_dom_document):
"""
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
the_root = original_dom_document
remove_parameter_by_key(the_root, 'elev.default')
remove_parameter_by_key(the_root, 'elev.geoid')
remove_parameter_by_key(the_root, 'elev.dem')
defaultWrite(available_app, the_root)
return [the_root]
def getImageEnvelope(available_app, original_dom_document):
"""
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
the_root = original_dom_document
remove_parameter_by_key(the_root, 'elev.default')
remove_parameter_by_key(the_root, 'elev.geoid')
remove_parameter_by_key(the_root, 'elev.dem')
defaultWrite(available_app, the_root)
return [the_root]
def getReadImageInfo(available_app, original_dom_document):
"""
Remove parameters that are output of the application.
"""
the_root = original_dom_document
remove_parameter_by_key(the_root, 'outkwl')
remove_parameter_by_key(the_root, 'indexx')
remove_parameter_by_key(the_root, 'indexy')
remove_parameter_by_key(the_root, 'sizex')
remove_parameter_by_key(the_root, 'sizey')
remove_parameter_by_key(the_root, 'spacingx')
remove_parameter_by_key(the_root, 'spacingy')
remove_parameter_by_key(the_root, 'originx')
remove_parameter_by_key(the_root, 'originy')
remove_parameter_by_key(the_root, 'estimatedgroundspacingx')
remove_parameter_by_key(the_root, 'estimatedgroundspacingy')
remove_parameter_by_key(the_root, 'numberbands')
remove_parameter_by_key(the_root, 'sensor')
remove_parameter_by_key(the_root, 'id')
remove_parameter_by_key(the_root, 'time')
remove_parameter_by_key(the_root, 'ullat')
remove_parameter_by_key(the_root, 'ullon')
remove_parameter_by_key(the_root, 'urlat')
remove_parameter_by_key(the_root, 'urlon')
remove_parameter_by_key(the_root, 'lrlat')
remove_parameter_by_key(the_root, 'lrlon')
remove_parameter_by_key(the_root, 'lllat')
remove_parameter_by_key(the_root, 'lllon')
remove_parameter_by_key(the_root, 'town')
remove_parameter_by_key(the_root, 'country')
remove_parameter_by_key(the_root, 'rgb.r')
remove_parameter_by_key(the_root, 'rgb.g')
remove_parameter_by_key(the_root, 'rgb.b')
remove_parameter_by_key(the_root, 'projectionref')
remove_parameter_by_key(the_root, 'keyword')
remove_parameter_by_key(the_root, 'gcp.count')
remove_parameter_by_key(the_root, 'gcp.proj')
defaultWrite(available_app, the_root)
return [the_root]
def getComputeModulusAndPhase(available_app, original_dom_document):
"""
Split the application according the field nbinput.
For each of the resulting apps, give a new name.
"""
the_root = original_dom_document
split = split_by_choice(the_root, 'nbinput')
the_list = []
for key in split:
if key == 'one':
the_doc = split[key]
old_app_name = the_doc.find('key').text
the_doc.find('key').text = '%s-%s' % (old_app_name, 'OneEntry')
the_doc.find('longname').text = '%s (%s)' % (old_app_name, 'OneEntry')
defaultWrite('%s-%s' % (available_app, 'OneEntry'), the_doc)
the_list.append(the_doc)
else :
the_doc = split[key]
old_app_name = the_doc.find('key').text
the_doc.find('key').text = '%s-%s' % (old_app_name, 'TwoEntries')
the_doc.find('longname').text = '%s (%s)' % (old_app_name, 'TwoEntries')
defaultWrite('%s-%s' % (available_app, 'TwoEntries'), the_doc)
the_list.append(the_doc)
return the_list
def getCompareImages(available_app, original_dom_document):
"""
Remove mse, mae, psnr as they are output of the algorithm.
"""
the_root = original_dom_document
remove_parameter_by_key(the_root, 'mse')
remove_parameter_by_key(the_root, 'mae')
remove_parameter_by_key(the_root, 'psnr')
defaultWrite(available_app, the_root)
return [the_root]
def getRadiometricIndices(available_app, original_dom_document):
"""
These 3 indices are missing. Remove them from the list.
"""
the_root = original_dom_document
remove_choice(the_root, 'list', 'laindvilog')
remove_choice(the_root, 'list', 'lairefl')
remove_choice(the_root, 'list', 'laindviformo')
defaultWrite(available_app, the_root)
return [the_root]
def getConnectedComponentSegmentation(available_app, original_dom_document):
"""
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
the_root = original_dom_document
deleteGeoidSrtm( the_root )
defaultWrite(available_app, the_root)
return [the_root]
def getKmzExport(available_app, original_dom_document):
"""
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
the_root = original_dom_document
deleteGeoidSrtm( the_root )
defaultWrite(available_app, the_root)
return [the_root]
def getSuperimpose(available_app, original_dom_document):
"""
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
the_root = original_dom_document
deleteGeoidSrtm( the_root )
defaultWrite(available_app, the_root)
return [the_root]
def getStereoFramework(available_app, original_dom_document):
"""
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
the_root = original_dom_document
deleteGeoidSrtm( the_root )
defaultWrite(available_app, the_root)
return [the_root]
def deleteGeoidSrtm(doc) :
"""
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
t4 = [item for item in doc.findall('.//parameter') if item.find('key').text.endswith("elev.geoid")]
for t5 in t4:
doc.remove(t5)
t4 = [item for item in doc.findall('.//parameter') if item.find('key').text.endswith("elev.dem")]
for t5 in t4:
doc.remove(t5)
|
herow/planning_qgis
|
python/plugins/processing/algs/otb/maintenance/OTBSpecific_XMLcreation.py
|
Python
|
gpl-2.0
| 26,427
|
[
"Gaussian"
] |
f97b0a27517ae6c84fb869df7ed5a224ac0483c699dff652113280b7f148d499
|
from setuptools import setup
setup(name='SportStat',
version='0.1',
description='sport statistics data analysis and REST API',
url='',
author='Nathan Breitsch, Brian Breitsch',
author_email='brianbreitsch@gmail.com',
packages=['sportstat', 'sportstat_server'],
zip_safe=False)
|
brianbreitsch/sportstat
|
setup.py
|
Python
|
bsd-3-clause
| 320
|
[
"Brian"
] |
e5fba82f3c6dae0f21219f49080360b3e666c2876839f333b32ffd64ee6f583d
|
# created by Chirath R, chirath.02@gmail.com
# -*- coding: utf-8 -*-
import datetime
from django.contrib.auth.models import User
from django import forms
from django.contrib.sites.shortcuts import get_current_site
from django.forms.utils import ErrorList
from django.shortcuts import redirect
from django.urls import reverse, reverse_lazy
from django.views import View
from django.views.generic import DetailView, CreateView, ListView, UpdateView, DeleteView
from django.core.mail import send_mail
from django.core.mail import EmailMessage
from fosswebsite.settings import join_application_mail_list, join_application_reply_to
from workshop.forms import WorkshopRegistrationForm, FeedbackForm, WorkshopForm
from workshop.models import Workshop, WorkshopRegistration, WorkshopGallery, WorkshopFeedback
class WorkshopDetailView(DetailView):
model = Workshop
pk_url_kwarg = 'workshop_id'
def get_context_data(self, **kwargs):
context = super(WorkshopDetailView, self).get_context_data(**kwargs)
registrations = WorkshopRegistration.objects.filter(workshop=self.get_object())
print(len(registrations))
no_of_seats_left = self.get_object().number_of_seats - len(registrations)
context['seats_left'] = True
if no_of_seats_left <= 0:
context['seats_left'] = False
no_of_seats_left = 0
context['no_of_seats_left'] = no_of_seats_left
feedback = WorkshopFeedback.objects.filter(workshop=self.get_object())
context['feedback'] = feedback
if self.request.user.is_superuser or self.request.user == self.get_object().user:
context['edit_permission'] = True
return context
class WorkshopCreateView(CreateView):
form_class = WorkshopForm
template_name = 'base/form.html'
success_url = '/workshop'
def get(self, request, *args, **kwargs):
if not (request.user.is_superuser or request.user == self.get_object().user):
redirect('permission_denied')
return super(WorkshopCreateView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(WorkshopCreateView, self).get_context_data(**kwargs)
context['heading'] = 'New Workshop'
context['title'] = 'Workshops'
return context
def form_valid(self, form):
form.instance.user = self.request.user
return super(WorkshopCreateView, self).form_valid(form)
# TODO(1) update workshop crashesg
class WorkshopUpdateView(UpdateView):
form_class = WorkshopForm
template_name = 'base/form.html'
model = Workshop
def get(self, request, *args, **kwargs):
if not (request.user.is_superuser or request.user == self.get_object().user):
redirect('permission_denied')
return super(WorkshopUpdateView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(WorkshopUpdateView, self).get_context_data(**kwargs)
context['heading'] = 'Update Workshop'
context['title'] = 'Workshops'
return context
def form_valid(self, form):
form.instance.user = self.request.user
return super(WorkshopUpdateView, self).form_valid(form)
def post(self, request, *args, **kwargs):
if not (request.user.is_superuser or request.user == self.get_object().user):
redirect('permission_denied')
return super(WorkshopUpdateView, self).post(request, *args, **kwargs)
class WorkshopDeleteView(DeleteView):
model = Workshop
template_name = 'workshop/confirm_delete.html'
success_url = reverse_lazy('workshop_list')
def get(self, request, *args, **kwargs):
if not (request.user.is_superuser or request.user == self.get_object().user):
redirect('permission_denied')
return super(WorkshopDeleteView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
if not (request.user.is_superuser or request.user == self.get_object().user):
redirect('permission_denied')
return super(WorkshopDeleteView, self).post(request, *args, **kwargs)
class WorkshopRegisterFormView(CreateView):
form_class = WorkshopRegistrationForm
template_name = 'base/form.html'
success_url = '/workshop/success/'
def get(self, request, *args, **kwargs):
workshop = Workshop.objects.get(id=self.kwargs.get('workshop_id', None))
if len(WorkshopRegistration.objects.filter(workshop=workshop)) >= workshop.number_of_seats:
return redirect(reverse('workshop_detail', kwargs={'workshop_id': kwargs.get('workshop_id', None)}))
return super(WorkshopRegisterFormView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(WorkshopRegisterFormView, self).get_context_data(**kwargs)
context['heading'] = Workshop.objects.get(id=self.kwargs.get('workshop_id', None)).name
return context
def form_valid(self, form):
workshop = Workshop.objects.get(id=self.kwargs.get('workshop_id', None))
try:
application = WorkshopRegistration.objects.filter(workshop=workshop , email=form.cleaned_data.get('email'))
except WorkshopRegistration.DoesNotExist:
application = None
if application.exists():
form._errors[forms.forms.NON_FIELD_ERRORS] = ErrorList([
u'You are already registered'
])
return self.form_invalid(form)
valid_form = super(WorkshopRegisterFormView, self).form_valid(form)
# generate urls
# list_url = ''.join(['http://', get_current_site(self.request).domain,
# reverse('workshop_list', kwargs={'workshop_id': workshop.id})])
#
# # mail data
# subject = 'Registration for ' + workshop.name + ' - ' + form.cleaned_data.get('name')
# content = form.cleaned_data.get('name') + ' registered for ' + workshop.name + ' at ' + \
# str(datetime.datetime.now()) + '. \n\nPlease visit ' + list_url + ' for more details.'
#
# to_address_list = list(User.objects.filter(is_superuser=True).values_list('email', flat=True))
# sent mail when application is submitted
# send_mail(subject, content, 'amritapurifoss@gmail.com', to_address_list, fail_silently=False)
mail_content = "Hi " + form.cleaned_data.get('name') + ", \n\n" + \
"Great to know that you are interested in '" + workshop.name + "' workshop conducted by " + \
"FOSS@Amrita. We got your application and it's being processed. " + \
"Please come to the ground floor lab on 27th or 28th after 4:30pm for registration. " \
"Also there will be meeting in Acharya hall at 4 pm on 27th July (Friday)." + \
" \n\nThank you, \n\nFOSS@Amrita"
contest_mail_content = "Hi " + form.cleaned_data.get('name') + ", \n\n" + \
"Great to know that you are interested in '" + workshop.name + "' contest conducted " + \
"by FOSS@Amrita. We got your application, you need to send us a link to your artwork" + \
"/video (Upload your work to Google drive and share the link).\n\n" + \
"You can submit the link and your details on this page: " + workshop.link + \
" latest by " + str(workshop.end_date_time.date()) + ".\n\n" \
"Contact us at " + str(workshop.contact_info) + ". \n\nNote: You need to submit " + \
"your work before the last date to be eligible for the prize." + \
" \n\nThank you, \n\nFOSS@Amrita"
to_address_list = [form.cleaned_data.get('email')]
# if workshop.price == 0.0:
# mail_content = mail_content
email = EmailMessage(
workshop.name + ' registration',
mail_content,
'amritapurifoss@gmail.com',
to_address_list,
join_application_mail_list,
reply_to=join_application_reply_to,
headers={'Message-ID': 'foss@amrita'},
)
email.send(fail_silently=False)
to_address_list.remove(form.cleaned_data.get('email'))
self.object.workshop = workshop
self.object.save()
return valid_form
class WorkshopRegistrationListView(ListView):
model = WorkshopRegistration
def get_context_data(self, **kwargs):
paid = str(self.request.GET.get('paid', None))
gender = str(self.request.GET.get('gender', None))
workshop = Workshop.objects.get(id=self.kwargs.get('workshop_id', None))
context = super(WorkshopRegistrationListView, self).get_context_data(**kwargs)
save = False
if paid == 'True':
context['object_list'] = WorkshopRegistration.objects.filter(workshop=workshop, paid=True)
elif paid == 'False':
context['object_list'] = WorkshopRegistration.objects.filter(workshop=workshop, paid=False)
elif gender == 'male':
context['object_list'] = WorkshopRegistration.objects.filter(workshop=workshop, male_or_female='Male')
elif gender == 'female':
context['object_list'] = WorkshopRegistration.objects.filter(workshop=workshop, male_or_female='Female')
else:
context['object_list'] = WorkshopRegistration.objects.filter(workshop=workshop)
save = True
context['save'] = save
context['object_list'] = context['object_list'].order_by('-date')
context['workshop_id'] = workshop.id
context["count"] = context['object_list'].count()
context["workshop_name"] = workshop.name
return context
class WorkshopRegistrationUpdateView(UpdateView):
model = WorkshopRegistration
def post(self, request, *args, **kwargs):
workshop = Workshop.objects.get(id=self.kwargs.get('workshop_id', None))
workshop_registration_list = WorkshopRegistration.objects.filter(workshop=workshop)
for workshop_registration in workshop_registration_list:
workshop_registration.paid = False
workshop_registration.save()
for key, value in request.POST.items():
try:
workshop_registration_id = int(key)
except ValueError as verr:
workshop_registration_id = None # do job to handle: s does not contain anything convertible to int
except Exception as ex:
workshop_registration_id = None # do job to handle: Exception occurred while converting to int
if workshop_registration_id and value == 'on':
workshop_registration = WorkshopRegistration.objects.get(id=workshop_registration_id)
workshop_registration.paid = True
workshop_registration.save()
return redirect(reverse('workshop_registration_list', kwargs={'workshop_id': kwargs.get('workshop_id', None)}))
class WorkshopListView(ListView):
model = Workshop
def get_context_data(self, **kwargs):
context = super(WorkshopListView, self).get_context_data(**kwargs)
workshop = Workshop.objects.all().order_by('-start_date_time')
workshops = []
for i in workshop:
reg = WorkshopRegistration.objects.filter(workshop=i)
num = i.number_of_seats - len(reg)
if num < 0:
num = 0
workshops.append([i, num])
context['workshops'] = workshops
return context
class WorkshopFeedbackCreateView(CreateView):
form_class = FeedbackForm
template_name = 'base/form.html'
success_url = '/workshop/feedback/success'
def get_context_data(self, **kwargs):
context = super(WorkshopFeedbackCreateView, self).get_context_data(**kwargs)
context['heading'] = 'Feedback Form'
return context
def form_valid(self, form):
workshop = Workshop.objects.get(id=self.kwargs.get('workshop_id', None))
valid_form = super(WorkshopFeedbackCreateView, self).form_valid(form)
self.object.workshop = workshop
self.object.save()
return valid_form
class WorkshopGalleryListView(ListView):
model = WorkshopGallery
def get_context_data(self, **kwargs):
context = super(WorkshopGalleryListView, self).get_context_data(**kwargs)
context['id'] = self.kwargs['pk']
return context
class WorkshopGalleryCreateView(CreateView):
model = WorkshopGallery
fields = ['workshop', 'image']
def form_valid(self, form):
form.instance.created_by = self.request.user
response = super(WorkshopGalleryCreateView, self).form_valid(form)
return response
def post(self, request, *args, **kwargs):
workshop = Workshop.objects.get(id=self.kwargs['pk'])
image = request.FILES.get('image')
WorkshopGallery(workshop=workshop, image=image).save()
return redirect('image_list', self.kwargs['pk'])
class WorkshopGalleryDeleteView(DeleteView):
model = WorkshopGallery
def get_success_url(self):
return reverse('image_list', kwargs={'pk': self.object.workshop.id})
def post(self, request, *args, **kwargs):
if not (request.user.is_superuser or request.user == self.get_object().created_by):
redirect('permission_denied')
return super(WorkshopGalleryDeleteView, self).post(request, *args, **kwargs)
|
amfoss/fosswebsite
|
workshop/views.py
|
Python
|
mit
| 13,671
|
[
"VisIt"
] |
6306bd938f22b06a3da5295d81046b369d7f4a7e3afe6052fef43e053ac705be
|
import numpy as np
import warnings
from .. import Spectrum
def test_eqw():
dx = 0.1
x = np.arange(-6,6,dx)
y = 1-np.exp(-x**2 / 2.)
with warnings.catch_warnings():
# ignore warning about creating an empty header
warnings.simplefilter('ignore')
sp = Spectrum(xarr=x, data=y)
sp.baseline(exclude=[-5,5], order=0, subtract=False)
sp.specfit(fittype='gaussian', guesses=(-1, 0, 0.5))
eqw_nofit = sp.specfit.EQW(fitted=False)
np.testing.assert_almost_equal(sp.specfit.EQW(), (1-y).sum() * dx, decimal=5)
np.testing.assert_almost_equal(sp.specfit.EQW(continuum=1), (1-y).sum() * dx, decimal=5)
np.testing.assert_almost_equal(eqw_nofit, (1-y).sum() * dx, decimal=4)
return sp
def test_eqw_plot():
dx = 0.1
x = np.arange(-6,6,dx)
y = 1-np.exp(-x**2 / 2.)
with warnings.catch_warnings():
# ignore warning about creating an empty header
warnings.simplefilter('ignore')
sp = Spectrum(xarr=x, data=y)
sp.plotter()
sp.baseline(exclude=[-5,5], order=0, subtract=False)
sp.specfit(fittype='gaussian', guesses=(-1, 0, 0.5))
eqw = sp.specfit.EQW()
eqw_nofit = sp.specfit.EQW(fitted=False)
eqw_cont = sp.specfit.EQW(plotcolor='g', fitted=True, continuum=1,
plot=True,
components=False, annotate=True,
loc='lower left', xmin=None, xmax=None)
np.testing.assert_almost_equal(eqw, (1-y).sum() * dx, decimal=5)
np.testing.assert_almost_equal(eqw_cont, (1-y).sum() * dx, decimal=5)
np.testing.assert_almost_equal(eqw_nofit, (1-y).sum() * dx, decimal=4)
return sp
|
low-sky/pyspeckit
|
pyspeckit/spectrum/tests/test_eqw.py
|
Python
|
mit
| 1,681
|
[
"Gaussian"
] |
315c59d01e3919435773639eb63246c8c0cbf9c8702d6d0a96642673af708916
|
# -*-python-*-
#
# Copyright (C) 1999-2013 The ViewCVS Group. All Rights Reserved.
#
# By using this file, you agree to the terms and conditions set forth in
# the LICENSE.html file which can be found at the top level of the ViewVC
# distribution or at http://viewvc.org/license-1.html.
#
# For more information, visit http://viewvc.org/
#
# -----------------------------------------------------------------------
#
# idiff: display differences between files highlighting intraline changes
#
# -----------------------------------------------------------------------
from __future__ import generators
import difflib
import sys
import re
import ezt
import sapi
def sidebyside(fromlines, tolines, context):
"""Generate side by side diff"""
### for some reason mdiff chokes on \n's in input lines
line_strip = lambda line: line.rstrip("\n")
fromlines = map(line_strip, fromlines)
tolines = map(line_strip, tolines)
gap = False
for fromdata, todata, flag in difflib._mdiff(fromlines, tolines, context):
if fromdata is None and todata is None and flag is None:
gap = True
else:
from_item = _mdiff_split(flag, fromdata)
to_item = _mdiff_split(flag, todata)
yield _item(gap=ezt.boolean(gap), columns=(from_item, to_item))
gap = False
_re_mdiff = re.compile("\0([+-^])(.*?)\1")
def _mdiff_split(flag, (line_number, text)):
"""Break up row from mdiff output into segments"""
segments = []
pos = 0
while True:
m = _re_mdiff.search(text, pos)
if not m:
segments.append(_item(text=sapi.escape(text[pos:]), type=None))
break
if m.start() > pos:
segments.append(_item(text=sapi.escape(text[pos:m.start()]), type=None))
if m.group(1) == "+":
segments.append(_item(text=sapi.escape(m.group(2)), type="add"))
elif m.group(1) == "-":
segments.append(_item(text=sapi.escape(m.group(2)), type="remove"))
elif m.group(1) == "^":
segments.append(_item(text=sapi.escape(m.group(2)), type="change"))
pos = m.end()
return _item(segments=segments, line_number=line_number)
def unified(fromlines, tolines, context):
"""Generate unified diff"""
diff = difflib.Differ().compare(fromlines, tolines)
lastrow = None
for row in _trim_context(diff, context):
if row[0].startswith("? "):
yield _differ_split(lastrow, row[0])
lastrow = None
else:
if lastrow:
yield _differ_split(lastrow, None)
lastrow = row
if lastrow:
yield _differ_split(lastrow, None)
def _trim_context(lines, context_size):
"""Trim context lines that don't surround changes from Differ results
yields (line, leftnum, rightnum, gap) tuples"""
# circular buffer to hold context lines
context_buffer = [None] * (context_size or 0)
context_start = context_len = 0
# number of context lines left to print after encountering a change
context_owed = 0
# current line numbers
leftnum = rightnum = 0
# whether context lines have been dropped
gap = False
for line in lines:
row = save = None
if line.startswith("- "):
leftnum = leftnum + 1
row = line, leftnum, None
context_owed = context_size
elif line.startswith("+ "):
rightnum = rightnum + 1
row = line, None, rightnum
context_owed = context_size
else:
if line.startswith(" "):
leftnum = leftnum = leftnum + 1
rightnum = rightnum = rightnum + 1
if context_owed > 0:
context_owed = context_owed - 1
elif context_size is not None:
save = True
row = line, leftnum, rightnum
if save:
# don't yield row right away, store it in buffer
context_buffer[(context_start + context_len) % context_size] = row
if context_len == context_size:
context_start = (context_start + 1) % context_size
gap = True
else:
context_len = context_len + 1
else:
# yield row, but first drain stuff in buffer
context_len == context_size
while context_len:
yield context_buffer[context_start] + (gap,)
gap = False
context_start = (context_start + 1) % context_size
context_len = context_len - 1
yield row + (gap,)
gap = False
_re_differ = re.compile(r"[+-^]+")
def _differ_split(row, guide):
"""Break row into segments using guide line"""
line, left_number, right_number, gap = row
if left_number and right_number:
type = ""
elif left_number:
type = "remove"
elif right_number:
type = "add"
segments = []
pos = 2
if guide:
assert guide.startswith("? ")
for m in _re_differ.finditer(guide, pos):
if m.start() > pos:
segments.append(_item(text=sapi.escape(line[pos:m.start()]), type=None))
segments.append(_item(text=sapi.escape(line[m.start():m.end()]),
type="change"))
pos = m.end()
segments.append(_item(text=sapi.escape(line[pos:]), type=None))
return _item(gap=ezt.boolean(gap), type=type, segments=segments,
left_number=left_number, right_number=right_number)
class _item:
def __init__(self, **kw):
vars(self).update(kw)
try:
### Using difflib._mdiff function here was the easiest way of obtaining
### intraline diffs for use in ViewVC, but it doesn't exist prior to
### Python 2.4 and is not part of the public difflib API, so for now
### fall back if it doesn't exist.
difflib._mdiff
except AttributeError:
sidebyside = None
|
marcellodesales/svnedge-console
|
svn-server/lib/viewvc/idiff.py
|
Python
|
agpl-3.0
| 5,484
|
[
"VisIt"
] |
c02786d086859a6372538f01c2f25f595018066556f7ce535d688c8cec4db555
|
#!/usr/bin/env python
#
# Scipy.stats examples.
#
#
# agomez (at) tacc.utexas.edu
# 30 Oct 2014
#
# ---------------------------------------------------------------------
import scipy as sp
import numpy as np
s = sp.rand(50)
#Show the mean, variance, std. deviation and the median
mean = sp.mean(s)
std = sp.std(s)
print("Mean : {0:8.6f}".format(mean))
print("Variance : {0:8.6f}".format(sp.var(s)))
print("Std. deviation : {0:8.6f}".format(std))
print("Median : {0:8.6f}".format(sp.median(s)))
from scipy import stats
x = sp.linspace(-3*std, 3*std, 50)
#survival function (probability that the variate has a value greater than the given value
y = stats.norm.sf(x, loc=mean, scale=std)
import matplotlib.pyplot as plt
plt.plot(x,y, color="black")
plt.xlabel("Variate")
plt.ylabel("Probability")
plt.title("SF for Gaussian of mean = {0} & std. deviation = {1}".format(mean, std))
plt.show()
|
antoniogi/HPC
|
Python/TACC_HPC/3_scipy/stats.py
|
Python
|
apache-2.0
| 893
|
[
"Gaussian"
] |
08cbc67583df07f3103c0e77d1c858d49ac5d2b1c680ba0a1774b404cfe9052c
|
"""This module defines an alternative ASE interface to VASP.
In contrast to the original VASP-interface an additional
correction can be added to the potential energy and the forces.
"""
import numpy as np
import vasp
import sys
from ase.dftd.dftd_interface import d2_pbc as d2_pbc
from ase.dftd.dftd_interface import d3_pbc as d3_pbc
from ase.dftd.dftd_interface import xc_name as xc_name
class Vasp_d2(vasp.Vasp):
#
def get_potential_energy(self, atoms, force_consistent=False, dft_d_cutoff_radius=30.0):
"""
Altered version of the original get_potential_energy function
in the class Vasp. The function obtains the DFT energy by using
the original call for the VASP package and adds the DFT-D2 contribution
to the converged SCF-energy.
"""
self.update(atoms)
# Conversion factors a.u. -> eV
Eh__2__eV = 27.211396132
#
# Get functional name as string
functional = xc_name(str.lower(vasp.Vasp.get_xc_functional(self)))
#
# Calling original VASP-calculator for energy
self.energy_free_or_zero = vasp.Vasp.get_potential_energy(self, atoms, force_consistent)
#
# Call DFT-D module: Energy and gradients
self.dispersion_correction, self.dft_d_gradient_contribution = d2_pbc(atoms, functional)
#
# Convert to proper units
self.dispersion_correction = self.dispersion_correction * Eh__2__eV
#
# Print out components (Useful?)
print >> sys.stdout, ' '
print >> sys.stdout, 'DFT total energy : ', self.energy_free_or_zero
print >> sys.stdout, 'DFT-D2 correction : ', self.dispersion_correction
print >> sys.stdout, ' '
print >> sys.stdout, 'DFT-D2 final corrected energy: ', self.energy_free_or_zero + self.dispersion_correction
print >> sys.stdout, ' '
#
# Adding correction contribution to energy
return self.energy_free_or_zero + self.dispersion_correction
def get_forces(self, atoms, dft_d_cutoff_radius=30.0):
"""
Altered version of the original get_forces function in the Vasp-class.
The function obtains the DFT forces by using the original call for the
VASP package and adds the DFT-D2 contribution to the calculated forces.
"""
self.update(atoms)
# Conversion factors a.u. -> eV and a.u. -> eV/Angst
Eh__2__eV = 27.211396132
Eh_rb__2__eV_Angst = 51.422086162
#
# Get functional name as string
functional = xc_name(str.lower(vasp.Vasp.get_xc_functional(self)))
#
# Calling original VASP-calculator for forces
self.dft_forces = vasp.Vasp.get_forces(self, atoms)
#
# Call DFT-D module: Energy and gradients
self.dispersion_correction, self.dft_d_gradient_contribution = d2_pbc(atoms, functional)
#
# Convert to proper units
self.dispersion_correction = self.dispersion_correction * Eh__2__eV
self.dft_d_gradient_contribution = self.dft_d_gradient_contribution * Eh_rb__2__eV_Angst
#
print
print 'DFT-D total gradients:', -self.dft_d_gradient_contribution[0] + self.forces[0]
for ind_i in range(1,len(self.forces)):
print ' ', -self.dft_d_gradient_contribution[ind_i] + self.forces[ind_i]
print
#
# Adding correction contributions to forces
# Note the (-) sign: DFT-D module delivers gradients, not forces
return self.dft_forces - self.dft_d_gradient_contribution
#
# End of class Vasp_d2
#
#
class Vasp_d3(vasp.Vasp):
#
def get_potential_energy(self, atoms, force_consistent=False, dft_d_cutoff_radius=30.0):
"""
Altered version of the original get_potential_energy function
in the class Vasp. The function obtains the DFT energy by using
the original call for the VASP package and adds the DFT-D3 contribution
to the converged SCF-energy.
"""
# self.update(atoms)
# Conversion factors a.u. -> eV
Eh__2__eV = 27.211396132
#
# Get functional name as string
functional = xc_name(str.lower(vasp.Vasp.get_xc_functional(self)))
#
# Calling original VASP-calculator for energy
self.energy_free_or_zero = vasp.Vasp.get_potential_energy(self, atoms, force_consistent)
#
# Call DFT-D module: Energy and gradients
self.dispersion_correction, self.dft_d_gradient_contribution = d3_pbc(atoms, functional)
#
# Convert to proper units
self.dispersion_correction = self.dispersion_correction * Eh__2__eV
#
# Print out components (Useful?)
print >> sys.stdout, ' '
print >> sys.stdout, 'DFT total energy : ', self.energy_free_or_zero
print >> sys.stdout, 'DFT-D3 correction : ', self.dispersion_correction
print >> sys.stdout, ' '
print >> sys.stdout, 'DFT-D3 final corrected energy: ', self.energy_free_or_zero + self.dispersion_correction
print >> sys.stdout, ' '
#
# Adding correction contribution to energy
return self.energy_free_or_zero + self.dispersion_correction
def get_forces(self, atoms, dft_d_cutoff_radius=30.0):
"""
Altered version of the original get_forces function in the Vasp-class.
The function obtains the DFT forces by using the original call for the
VASP package and adds the DFT-D3 contribution to the calculated forces.
"""
# self.update(atoms)
# Conversion factors a.u. -> eV and a.u. -> eV/Angst
Eh__2__eV = 27.211396132
Eh_rb__2__eV_Angst = 51.422086162
#
# Get functional name as string
functional = xc_name(str.lower(vasp.Vasp.get_xc_functional(self)))
#
# Calling original VASP-calculator for forces
self.dft_forces = vasp.Vasp.get_forces(self, atoms)
#
# Call DFT-D module: Energy and gradients
self.dispersion_correction, self.dft_d_gradient_contribution = d3_pbc(atoms, functional)
#
# Convert to proper units
self.dispersion_correction = self.dispersion_correction * Eh__2__eV
self.dft_d_gradient_contribution = self.dft_d_gradient_contribution * Eh_rb__2__eV_Angst
#
#print
#print 'DFT-D3 total gradients:', -self.dft_d_gradient_contribution[0] + self.forces[0]
#for ind_i in range(1,len(self.forces)):
# print ' ', -self.dft_d_gradient_contribution[ind_i] + self.forces[ind_i]
#print
#
# Adding correction contributions to forces
# Note the (-) sign: DFT-D module delivers gradients, not forces
return self.dft_forces - self.dft_d_gradient_contribution
#
# End of class Vasp_d3
|
alexei-matveev/ase-local
|
ase/calculators/vasp_d.py
|
Python
|
gpl-2.0
| 6,228
|
[
"ASE",
"VASP"
] |
6314aa57aa74f4d45df7961b2f4c3de438e3188a3511db7979cbbc50fa3be43d
|
#!/usr/bin/env python
#
# earthshook.py
# pyanalysis
#
# Created by Brian Baughman on 1/23/10.
# Copyright (c) 2010 Brian Baughman. All rights reserved.
#
try:
import re, sys, time
from time import sleep
from os import environ, _exit
except:
print 'Failed to load base modules'
sys.exit(-1)
try:
from bitly import shorten
import tweepy
# Home directory
homedir = environ['HOME']
curtime = time.strftime('%Y-%m-%d %H:%M:%S')
# stop if something looks wrong
except:
print 'Failed to load modules'
_exit(-1)
# Define input files and output log file
try:
quakelog = environ['QUAKELOG']
except:
quakelog = '%s/logs/quakealert.log'%homedir
try:
twapi = environ['TWAPI']
except:
twapi = '%s/.twapi'%homedir
try:
twusr = environ['TWUSR']
except:
twusr = '%s/.twusr'%homedir
# Define some regular expressions
anglere = '\([0-9\.\-]+ degrees\)'
milesre = '\([0-9\.\-]+ miles\)'
bitlyre = '"shortCNAMEUrl":\s*"([^"]+)"'
# Define stuff to find links
linktag = 'For subsequent updates, maps, and technical information, see:'
linkbase = 'http://earthquake.usgs.gov'
try:
log = open(quakelog, 'a')
except:
log = sys.stdout
log.write('%s: Cannot open log file: %s\n'%(curtime,quakelog))
################################################################################
# Useful functions
################################################################################
def easy_exit(eval):
'''
Function to clean up before exiting and exiting itself
'''
try:
log.close()
except:
_exit(eval)
_exit(eval)
try:
twapif = open(twapi,'r')
consumer_key, consumer_secret = twapif.readlines()
consumer_key = consumer_key.strip()
consumer_secret = consumer_secret.strip()
twapif.close()
except:
log.write('Failed to load twitter API info!\n')
easy_exit(-2)
try:
twusrf = open(twusr,'r')
usrs = twusrf.readlines()
key, secret = usrs[0].split()
key = key.strip()
secret = secret.strip()
twusrf.close()
except:
log.write('Failed to load twitter user info!\n')
easy_exit(-3)
def clean(lines):
rv = []
for l in lines:
if l.strip()=='':
continue
else:
rv.append(l.strip())
return rv
def gettbody(lines):
try:
lines
except:
return None
if lines is None:
return None
start=-1
end=len(lines)
for i in range(end):
cline = lines[i]
if cline.find('== PRELIMINARY EARTHQUAKE REPORT ==')>=0:
start=i
elif cline.find('DISCLAIMER:')>=0:
if start==-1:
return None
else:
return lines[start:i+1]
return None
def getlink(lines):
try:
lines
except:
return None
if lines is None:
return None
end = len(lines)
for i in range(end):
if lines[i]==linktag:
if lines[i+1].find(linkbase)>=0:
return lines[i+1]
return None
class evtinfo:
def __init__(self):
self.mag = None
self.lat = None
self.long = None
self.localtime = None
self.utctime = None
self.nearby = None
def prepinfo(info):
rv = evtinfo()
try:
info
except:
return rv
if info is None:
return rv
end = len(info)
for i in range(end):
s = info[i]
sloc = s.find(':')
if sloc<0:
continue
nm = s[0:sloc].strip()
dt = s[sloc+1:].strip()
if nm=='Magnitude':
rv.mag=dt
elif nm=='Universal Time (UTC)':
rv.utctime='%s UTC'%dt
elif nm=='Time near the Epicenter':
rv.localtime=dt
elif nm=='Geographic coordinates':
try:
tlat,tlong = dt.split(',')
tlat = tlat.strip()
tlong = tlong.strip()
if tlat.find('N')>=0:
rv.lat = '+%s'%tlat.replace('N','')
elif tlat.find('S')>=0:
rv.lat = '-%s'%tlat.replace('S','')
if tlong.find('E')>=0:
rv.long = '+%s'%tlong.replace('E','')
elif tlong.find('W')>=0:
rv.long = '-%s'%tlong.replace('W','')
except:
continue
elif nm=='Location with respect to nearby cities':
csub = re.sub(anglere,'',info[i+1].strip())
csub = re.sub(milesre,'',csub)
rv.nearby = csub.replace(' ',' ')
return rv
def formate(pinfo):
rv=''
if pinfo.mag!=None:
rv='%s earthquake '%pinfo.mag
if pinfo.utctime!=None:
rv='%soccurred at %s'%(rv,pinfo.utctime)
if pinfo.nearby!=None:
rv='%s, %s'%(rv,pinfo.nearby)
return rv
def toascii(s):
rv = []
for c in s:
try:
c.decode('ascii')
rv.append(c)
except:
continue
rv = ''.join(rv)
return rv
################################################################################
# Read in data from standard input
ogdata = sys.stdin.readlines()
sdata = clean(ogdata)
lk = getlink(sdata)
tb = gettbody(sdata)
pio = prepinfo(tb)
fmttw = formate(pio)
if lk!=None:
shrtlk = shorten(lk)
fmttw='%s. %s'%(fmttw,shrtlk)
else:
log.write('Failed to find link.\n')
fmttw = fmttw.strip().replace(' ',' ').decode('unicode_escape')
try:
# Create twitter authentication handler
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(key, secret)
# Create interface to twitter API
api = tweepy.API(auth)
except:
log.write('Failed TAuth: %s\n'%fmttw)
if len(fmttw)>140:
fmttw = fmttw.replace(' earthquake occurred',' quake')
for i in xrange(5):
try:
stt = api.update_status(status=fmttw,lat=float(pio.lat),long=float(pio.long))
log.write('%s\n'%toascii(fmttw))
easy_exit(0)
except:
sleep(5)
log.write('Failed Send: %s\n'%fmttw)
easy_exit(0)
|
bbaugh/earthshook
|
earthshook.py
|
Python
|
gpl-2.0
| 5,498
|
[
"Brian"
] |
92d084b70470fd1a4e0d39623f43881e1def60ff476bdce1c2fac4d29aa4d7e3
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import errno
import json
from sqlalchemy import orm
from DIRAC.DataManagementSystem.Client.FTS3Job import FTS3Job
from DIRAC.DataManagementSystem.private import FTS3Utilities
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.FrameworkSystem.Client.Logger import gLogger
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.Core.Utilities.DErrno import cmpError
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.DataManagementSystem.Client.FTS3File import FTS3File
from DIRAC.Core.Utilities.JEncode import JSerializable
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.RequestManagementSystem.Client.Operation import Operation as rmsOperation
from DIRAC.RequestManagementSystem.Client.File import File as rmsFile
from DIRAC.RequestManagementSystem.Client.Request import Request as rmsRequest
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
class FTS3Operation(JSerializable):
""" Abstract class to represent an operation to be executed by FTS. It is a
container for FTSFiles, as well as for FTSJobs.
There can be a mapping between one FTS3Operation and one RMS Operation.
The FTS3Operation takes care of generating the appropriate FTSJobs,
and to perform a callback when the work with FTS is over. The actual
generation and callback depends on the subclass.
This class should not be instantiated directly, but rather one of its
subclass
"""
ALL_STATES = ['Active', # Default state until FTS has done everything
'Processed', # Interactions with FTS done, but callback not done
'Finished', # Everything was done
'Canceled', # Canceled by the user
'Failed', # I don't know yet
]
FINAL_STATES = ['Finished', 'Canceled', 'Failed']
INIT_STATE = 'Active'
_attrToSerialize = ['operationID', 'username', 'userGroup', 'rmsReqID', 'rmsOpID',
'sourceSEs', 'ftsFiles', 'activity', 'priority',
'ftsJobs', 'creationTime', 'lastUpdate', 'error', 'status']
def __init__(self, ftsFiles=None, username=None, userGroup=None, rmsReqID=-1,
rmsOpID=0, sourceSEs=None, activity=None, priority=None):
"""
:param ftsFiles: list of FTS3Files object that belongs to the operation
:param username: username whose proxy should be used
:param userGroup: group that should be used with username
:param rmsReqID: ID of the Request in the RMS system
:param rmsOpID: ID of the Operation in the RMS system
:param sourceSEs: list of SE to be used as source (if applicable)
:param activity: FTS activity to use
:param priority: FTS priority to use
"""
############################
# persistent attributes
self.username = username
self.userGroup = userGroup
self.rmsReqID = rmsReqID
self.rmsOpID = rmsOpID
if isinstance(sourceSEs, list):
sourceSEs = ','.join(sourceSEs)
self.sourceSEs = sourceSEs
self.ftsFiles = ftsFiles if ftsFiles else []
self.activity = activity
self.priority = priority
self.ftsJobs = []
now = datetime.datetime.utcnow().replace(microsecond=0)
self.creationTime = now
self.lastUpdate = now
self.error = None
self.status = FTS3Operation.INIT_STATE
########################
self.reqClient = None
self.dManager = None
self._log = None
self.fts3Plugin = None
self.init_on_load()
@orm.reconstructor
def init_on_load(self):
""" This method initializes some attributes.
It is called by sqlalchemy (which does not call __init__)
"""
self._vo = None
# Note that in the case of an FTS3Operation created from an RMS
# object, the members here will probably be "wrong" in the sense
# that the VO will not be known by then.
# It does not really matter however, since we do not perform anything
# on an operation created this way, it's just to be then serialized
# in the DB.
self.dManager = DataManager()
self.rssClient = ResourceStatus()
self.fts3Plugin = FTS3Utilities.getFTS3Plugin(vo=self.vo)
opID = getattr(self, 'operationID', None)
loggerName = '%s/' % opID if opID else ''
loggerName += 'req_%s/op_%s' % (self.rmsReqID, self.rmsOpID)
self._log = gLogger.getSubLogger(loggerName, True)
@property
def vo(self):
""":returns: return vo of the usergroup """
if self._vo:
return self._vo
if self.userGroup:
self._vo = getVOForGroup(self.userGroup)
return self._vo
def isTotallyProcessed(self):
""" Returns True if and only if there is nothing
else to be done by FTS for this operation.
All files are successful or definitely failed
"""
if self.status == 'Processed':
return True
fileStatuses = set([f.status for f in self.ftsFiles])
# If all the files are in a final state
if fileStatuses <= set(FTS3File.FINAL_STATES):
self.status = 'Processed'
return True
return False
def _getFilesToSubmit(self, maxAttemptsPerFile=10):
""" Return the list of FTS3files that can be submitted
Either because they never were submitted, or because
we can make more attempts
:param maxAttemptsPerFile: the maximum number of attempts to be tried for a file
:return: List of FTS3File to submit
"""
toSubmit = []
for ftsFile in self.ftsFiles:
if ftsFile.attempt >= maxAttemptsPerFile:
ftsFile.status = 'Defunct'
# The file was never submitted or
# The file failed from the point of view of FTS
# but no more than the maxAttemptsPerFile
elif ftsFile.status in [FTS3File.INIT_STATE] + FTS3File.FTS_FAILED_STATES:
toSubmit.append(ftsFile)
return toSubmit
@staticmethod
def _checkSEAccess(seName, accessType, vo=None):
"""Check the Status of a storage element
:param seName: name of the StorageElement
:param accessType ReadAccess, WriteAccess,CheckAccess,RemoveAccess
:return: S_ERROR if not allowed or error, S_OK() otherwise
"""
# Check that the target is writable
# access = self.rssClient.getStorageElementStatus( seName, accessType )
# if not access["OK"]:
# return access
# if access["Value"][seName][accessType] not in ( "Active", "Degraded" ):
# return S_ERROR( "%s does not have %s in Active or Degraded" % ( seName, accessType ) )
status = StorageElement(seName, vo=vo).getStatus()
if not status['OK']:
return status
status = status['Value']
accessType = accessType.replace('Access', '')
if not status[accessType]:
return S_ERROR(errno.EACCES, "%s does not have %s in Active or Degraded" % (seName, accessType))
return S_OK()
def _createNewJob(self, jobType, ftsFiles, targetSE, sourceSE=None):
""" Create a new FTS3Job object
:param jobType: type of job to create (Transfer, Staging, Removal)
:param ftsFiles: list of FTS3File objects the job has to work on
:param targetSE: SE on which to operate
:param sourceSE: source SE, only useful for Transfer jobs
:return: FTS3Job object
"""
newJob = FTS3Job()
newJob.type = jobType
newJob.sourceSE = sourceSE
newJob.targetSE = targetSE
newJob.activity = self.activity
newJob.priority = self.priority
newJob.username = self.username
newJob.userGroup = self.userGroup
newJob.vo = self.vo
newJob.filesToSubmit = ftsFiles
newJob.operationID = getattr(self, 'operationID')
newJob.rmsReqID = self.rmsReqID
return newJob
def _callback(self):
"""Actually performs the callback
"""
raise NotImplementedError("You should not be using the base class")
def callback(self):
""" Trigger the callback once all the FTS interactions are done
and update the status of the Operation to 'Finished' if successful
"""
self.reqClient = ReqClient()
res = self._callback()
if res['OK']:
self.status = 'Finished'
return res
def prepareNewJobs(self, maxFilesPerJob=100, maxAttemptsPerFile=10):
""" Prepare the new jobs that have to be submitted
:param maxFilesPerJob: maximum number of files assigned to a job
:param maxAttemptsPerFile: maximum number of retry after an fts failure
:return: list of jobs
"""
raise NotImplementedError("You should not be using the base class")
def _updateRmsOperationStatus(self):
""" Update the status of the Files in the rms operation
:return: S_OK with a dict:
* request: rms Request object
* operation: rms Operation object
* ftsFilesByTarget: dict {SE: [ftsFiles that were successful]}
"""
log = self._log.getSubLogger("_updateRmsOperationStatus/%s/%s" %
(getattr(self, 'operationID'), self.rmsReqID), child=True)
res = self.reqClient.getRequest(self.rmsReqID)
if not res['OK']:
return res
request = res['Value']
res = request.getWaiting()
if not res["OK"]:
log.error("Unable to find 'Scheduled' operation in request")
res = self.reqClient.putRequest(request, useFailoverProxy=False, retryMainService=3)
if not res['OK']:
log.error("Could not put back the request !", res['Message'])
return S_ERROR("Could not find scheduled operation")
operation = res['Value']
# We index the files of the operation by their IDs
rmsFileIDs = {}
for opFile in operation:
rmsFileIDs[opFile.FileID] = opFile
# Files that failed to transfer
defunctRmsFileIDs = set()
# { SE : [FTS3Files] }
ftsFilesByTarget = {}
for ftsFile in self.ftsFiles:
if ftsFile.status == 'Defunct':
log.info(
"File failed to transfer, setting it to failed in RMS", "%s %s" %
(ftsFile.lfn, ftsFile.targetSE))
defunctRmsFileIDs.add(ftsFile.rmsFileID)
continue
if ftsFile.status == 'Canceled':
log.info(
"File canceled, setting it Failed in RMS", "%s %s" %
(ftsFile.lfn, ftsFile.targetSE))
defunctRmsFileIDs.add(ftsFile.rmsFileID)
continue
# SHOULD NEVER HAPPEN !
if ftsFile.status != 'Finished':
log.error(
"Callback called with file in non terminal state", "%s %s" %
(ftsFile.lfn, ftsFile.targetSE))
res = self.reqClient.putRequest(request, useFailoverProxy=False, retryMainService=3)
if not res['OK']:
log.error("Could not put back the request !", res['Message'])
return S_ERROR("Callback called with file in non terminal state")
ftsFilesByTarget.setdefault(ftsFile.targetSE, []).append(ftsFile)
# Now, we set the rmsFile as done in the operation, providing
# that they are not in the defunctFiles.
# We cannot do this in the previous list because in the FTS system,
# each destination is a separate line in the DB but not in the RMS
for ftsFile in self.ftsFiles:
opFile = rmsFileIDs[ftsFile.rmsFileID]
opFile.Status = 'Failed' if ftsFile.rmsFileID in defunctRmsFileIDs else 'Done'
return S_OK({'request': request, 'operation': operation, 'ftsFilesByTarget': ftsFilesByTarget})
@classmethod
def fromRMSObjects(cls, rmsReq, rmsOp, username):
""" Construct an FTS3Operation object from the RMS Request and Operation corresponding.
The attributes taken are the OwnerGroup, Request and Operation IDS, sourceSE,
and activity and priority if they are defined in the Argument field of the operation
:param rmsReq: RMS Request object
:param rmsOp: RMS Operation object
:param username: username to which associate the FTS3Operation (normally comes from the Req OwnerDN)
:returns: FTS3Operation object
"""
ftsOp = cls()
ftsOp.username = username
ftsOp.userGroup = rmsReq.OwnerGroup
ftsOp.rmsReqID = rmsReq.RequestID
ftsOp.rmsOpID = rmsOp.OperationID
ftsOp.sourceSEs = rmsOp.SourceSE
try:
argumentDic = json.loads(rmsOp.Arguments)
ftsOp.activity = argumentDic['activity']
ftsOp.priority = argumentDic['priority']
except Exception:
pass
return ftsOp
class FTS3TransferOperation(FTS3Operation):
""" Class to be used for a Replication operation
"""
def prepareNewJobs(self, maxFilesPerJob=100, maxAttemptsPerFile=10):
log = self._log.getSubLogger("_prepareNewJobs", child=True)
filesToSubmit = self._getFilesToSubmit(maxAttemptsPerFile=maxAttemptsPerFile)
log.debug("%s ftsFiles to submit" % len(filesToSubmit))
newJobs = []
# {targetSE : [FTS3Files] }
res = FTS3Utilities.groupFilesByTarget(filesToSubmit)
if not res['OK']:
return res
filesGroupedByTarget = res['Value']
for targetSE, ftsFiles in filesGroupedByTarget.items():
res = self._checkSEAccess(targetSE, 'WriteAccess', vo=self.vo)
if not res['OK']:
# If the SE is currently banned, we just skip it
if cmpError(res, errno.EACCES):
log.info("Write access currently not permitted to %s, skipping." % targetSE)
else:
log.error(res)
for ftsFile in ftsFiles:
ftsFile.attempt += 1
continue
sourceSEs = self.sourceSEs.split(',') if self.sourceSEs is not None else []
# { sourceSE : [FTSFiles] }
res = FTS3Utilities.selectUniqueSource(ftsFiles, self.fts3Plugin, allowedSources=sourceSEs)
if not res['OK']:
return res
uniqueTransfersBySource, failedFiles = res['Value']
# Treat the errors of the failed files
for ftsFile, errMsg in failedFiles.items():
log.error("Error when selecting random sources", "%s, %s" % (ftsFile.lfn, errMsg))
# If the error is that the file does not exist in the catalog
# fail it !
if cmpError(errMsg, errno.ENOENT):
log.error("The file does not exist, setting it Defunct", "%s" % ftsFile.lfn)
ftsFile.status = 'Defunct'
# We don't need to check the source, since it is already filtered by the DataManager
for sourceSE, ftsFiles in uniqueTransfersBySource.items():
if self.__needsMultiHopStaging(sourceSE, targetSE):
log.verbose("Needs multihop staging, max files per job is 1")
maxFilesPerJob = 1
for ftsFilesChunk in breakListIntoChunks(ftsFiles, maxFilesPerJob):
newJob = self._createNewJob('Transfer', ftsFilesChunk, targetSE, sourceSE=sourceSE)
newJobs.append(newJob)
return S_OK(newJobs)
def __needsMultiHopStaging(self, sourceSEName, destSEName):
""" Checks whether transfers between the two SE given as parameters
need a multi hop transfer to stage with a different protocol
than the transfer one.
:param str sourceSEName: source storage element name
:param str destSEName: destination storage element name
:returns: boolean
"""
srcSE = StorageElement(sourceSEName, vo=self.vo)
dstSE = StorageElement(destSEName, vo=self.vo)
srcIsTape = srcSE.getStatus()['Value'].get('TapeSE', True)
if not srcIsTape:
return False
# To know if we will need a multihop staging transfer,
# we check whether we can generate transfer URLs
# for a fake LFN, and see if the protocol we get
# is compatible with staging
tpcProtocols = self.fts3Plugin.selectTPCProtocols(sourceSEName=sourceSEName, destSEName=destSEName)
res = dstSE.generateTransferURLsBetweenSEs('/%s/fakeLFN' % self.vo, srcSE, protocols=tpcProtocols)
# There is an error, but let's ignore it,
# it will be dealt with in the FTS3Job logic
if not res['OK']:
return False
srcProto, _destProto = res['Value']['Protocols']
if srcProto not in srcSE.localStageProtocolList:
return True
return False
def _callback(self):
"""" After a Transfer operation, we have to update the matching Request in the
RMS, and add the registration operation just before the ReplicateAndRegister one
NOTE: we don't use ReqProxy when putting the request back to avoid operational hell
"""
log = self._log.getSubLogger("callback", child=True)
# In case there is no Request associated to the Transfer
# we do not do the callback. Not really advised, but there is a feature
# request to use the FTS3 system without RMS
if self.rmsReqID == -1:
return S_OK()
# Now we check the status of the Request.
# in principle, it should be scheduled
res = self.reqClient.getRequestStatus(self.rmsReqID)
if not res['OK']:
log.error("Could not get request status", res)
return res
status = res['Value']
# If it is not scheduled, something went wrong
# and we will not modify it
if status != 'Scheduled':
# If the Request is in a final state, just leave it,
# and we consider our job done.
# (typically happens when the callback had already been done but not persisted to the FTS3DB)
if status in rmsRequest.FINAL_STATES:
log.warn(
"Request with id %s is not Scheduled (%s), but okay it is in a Final State" %
(self.rmsReqID, status))
return S_OK()
# If the Request is not in a final state, then something really wrong is going on,
# and we do not do anything, keep ourselves pending
else:
return S_ERROR("Request with id %s is not Scheduled:%s" % (self.rmsReqID, status))
res = self._updateRmsOperationStatus()
if not res['OK']:
return res
ftsFilesByTarget = res['Value']['ftsFilesByTarget']
request = res['Value']['request']
operation = res['Value']['operation']
registrationProtocols = DMSHelpers(vo=self.vo).getRegistrationProtocols()
log.info("will create %s 'RegisterReplica' operations" % len(ftsFilesByTarget))
for target, ftsFileList in ftsFilesByTarget.items():
log.info(
"creating 'RegisterReplica' operation for targetSE %s with %s files..." %
(target, len(ftsFileList)))
registerOperation = rmsOperation()
registerOperation.Type = "RegisterReplica"
registerOperation.Status = "Waiting"
registerOperation.TargetSE = target
if operation.Catalog:
registerOperation.Catalog = operation.Catalog
targetSE = StorageElement(target, vo=self.vo)
for ftsFile in ftsFileList:
opFile = rmsFile()
opFile.LFN = ftsFile.lfn
opFile.Checksum = ftsFile.checksum
# TODO: are we really ever going to change type... ?
opFile.ChecksumType = 'ADLER32'
opFile.Size = ftsFile.size
res = returnSingleResult(targetSE.getURL(ftsFile.lfn, protocol=registrationProtocols))
# This should never happen !
if not res["OK"]:
log.error("Could not get url", res['Message'])
continue
opFile.PFN = res["Value"]
registerOperation.addFile(opFile)
request.insertBefore(registerOperation, operation)
return self.reqClient.putRequest(request, useFailoverProxy=False, retryMainService=3)
class FTS3StagingOperation(FTS3Operation):
""" Class to be used for a Staging operation
"""
def prepareNewJobs(self, maxFilesPerJob=100, maxAttemptsPerFile=10):
log = gLogger.getSubLogger("_prepareNewJobs", child=True)
filesToSubmit = self._getFilesToSubmit(maxAttemptsPerFile=maxAttemptsPerFile)
log.debug("%s ftsFiles to submit" % len(filesToSubmit))
newJobs = []
# {targetSE : [FTS3Files] }
filesGroupedByTarget = FTS3Utilities.groupFilesByTarget(filesToSubmit)
for targetSE, ftsFiles in filesGroupedByTarget.items():
res = self._checkSEAccess(targetSE, 'ReadAccess', vo=self.vo)
if not res['OK']:
log.error(res)
continue
for ftsFilesChunk in breakListIntoChunks(ftsFiles, maxFilesPerJob):
newJob = self._createNewJob('Staging', ftsFilesChunk, targetSE, sourceSE=targetSE)
newJobs.append(newJob)
return S_OK(newJobs)
def _callback(self):
"""" After a Staging operation, we have to update the matching Request in the
RMS, and nothing more. If a callback is to be performed, it will be the next
operation in the request, and put by the caller
NOTE: we don't use ReqProxy when putting the request back to avoid operational hell
"""
res = self._updateRmsOperationStatus()
if not res['OK']:
return res
request = res['Value']['request']
return self.reqClient.putRequest(request, useFailoverProxy=False, retryMainService=3)
|
yujikato/DIRAC
|
src/DIRAC/DataManagementSystem/Client/FTS3Operation.py
|
Python
|
gpl-3.0
| 21,190
|
[
"DIRAC"
] |
145d0380a23870a509ff13402a28650d36a8f60aba4b293e8399e7081e76a95e
|
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _, gobject
from zeroinstall.support import tasks
from zeroinstall.injector import handler, download
version = '1.13'
class GUIHandler(handler.Handler):
dl_callbacks = None # Download -> [ callback ]
pulse = None
mainwindow = None
def _reset_counters(self):
if not self.monitored_downloads:
self.n_completed_downloads = 0
self.total_bytes_downloaded = 0
return False
def abort_all_downloads(self):
for dl in self.monitored_downloads:
dl.abort()
def downloads_changed(self):
if self.monitored_downloads and self.pulse is None:
def pulse():
self.mainwindow.update_download_status(only_update_visible = True)
return True
pulse()
self.pulse = gobject.timeout_add(200, pulse)
elif len(self.monitored_downloads) == 0:
# Delay before resetting, in case we start a new download quickly
gobject.timeout_add(500, self._reset_counters)
# Stop animation
if self.pulse:
gobject.source_remove(self.pulse)
self.pulse = None
self.mainwindow.update_download_status()
def impl_added_to_store(self, impl):
self.mainwindow.update_download_status(only_update_visible = True)
@tasks.async
def _switch_to_main_window(self, reason):
if self.mainwindow.systray_icon:
self.mainwindow.systray_icon.set_tooltip(reason)
self.mainwindow.systray_icon.set_blinking(True)
# Wait for the user to click the icon, then continue
yield self.mainwindow.systray_icon_blocker
yield tasks.TimeoutBlocker(0.5, 'Delay')
@tasks.async
def confirm_import_feed(self, pending, valid_sigs):
yield self._switch_to_main_window(_('Need to confirm a new GPG key'))
from zeroinstall.gtkui import trust_box
box = trust_box.TrustBox(pending, valid_sigs, parent = self.mainwindow.window)
box.show()
yield box.closed
@tasks.async
def confirm_install(self, message):
yield self._switch_to_main_window(_('Need to confirm installation of distribution packages'))
from zeroinstall.injector.download import DownloadAborted
import dialog
import gtk
box = gtk.MessageDialog(self.mainwindow.window,
gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION, gtk.BUTTONS_CANCEL,
str(message))
box.set_position(gtk.WIN_POS_CENTER)
install = dialog.MixedButton(_('Install'), gtk.STOCK_OK)
install.set_can_default(True)
box.add_action_widget(install, gtk.RESPONSE_OK)
install.show_all()
box.set_default_response(gtk.RESPONSE_OK)
box.show()
response = dialog.DialogResponse(box)
yield response
box.destroy()
if response.response != gtk.RESPONSE_OK:
raise DownloadAborted()
def report_error(self, ex, tb = None):
if isinstance(ex, download.DownloadAborted):
return # No need to tell the user about this, since they caused it
self.mainwindow.report_exception(ex, tb = tb)
|
michel-slm/0install
|
zeroinstall/0launch-gui/gui.py
|
Python
|
lgpl-2.1
| 2,898
|
[
"VisIt"
] |
123016491bb0f050d5a865a7acf19a240be7a2ea49a1f81f2f37b0248a9ddf2b
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from urllib import quote
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1364979193.743975
__CHEETAH_genTimestamp__ = 'Wed Apr 3 17:53:13 2013'
__CHEETAH_src__ = '/home/fermi/Work/Model/tmsingle/openpli3.0/build-tmsingle/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-0.1+git1+279a2577c3bc6defebd4bf9e61a046dcf7f37c01-r0.72/git/plugin/controllers/views/ajax/epgpop.tmpl'
__CHEETAH_srcLastModified__ = 'Wed Apr 3 17:10:17 2013'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class epgpop(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(epgpop, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
if len(VFFSL(SL,"events",True)) == 0: # generated from line 2, col 1
write(u'''<html xmlns="http://www.w3.org/1999/xhtml">
<html>
<head>
<title>No items found.</title>
</head>
<body style="background: #FFFFFF; scrollbar: auto;">
<img src="/images/not_found.jpg" title="No items found" border="0">
</body>
</html>
''')
else: # generated from line 12, col 1
write(u'''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<link type="text/css" href="/css/jquery-ui-1.8.18.custom.css" rel="stylesheet" />\t
<script type="text/javascript" src="/js/jquery-1.6.2.min.js"></script>
<script type="text/javascript" src="/js/jquery-ui-1.8.18.custom.min.js"></script>
<script type="text/javascript" src="/js/openwebif.js"></script>
<title>Open Webif Epg</title>
</head>
\t<body style="background: #1C478E; scrollbar: auto;">
\t\t
''')
for event in VFFSL(SL,"events",True): # generated from line 28, col 1
write(u'''\t\t<table style="font-size:12px;" width="100%" border="0" cellspacing="1" cellpadding="5">
\t\t\t<tr style="background-color: #f0f7fc;">
\t\t\t\t<td style="padding:0px" width="102px"><img width="100px" height="60px" src="''')
_v = VFFSL(SL,"event.picon",True) # u'$event.picon' on line 31, col 81
if _v is not None: write(_filter(_v, rawExpr=u'$event.picon')) # from line 31, col 81.
write(u'''" title="" border="0"></td>
\t\t\t\t<td style="font-size:13px;color: #061C37;font-weight: bold;" width="30%">''')
_v = VFFSL(SL,"event.sname",True) # u'$event.sname' on line 32, col 78
if _v is not None: write(_filter(_v, rawExpr=u'$event.sname')) # from line 32, col 78.
write(u'''<br />''')
_v = VFFSL(SL,"event.date",True) # u'$event.date' on line 32, col 96
if _v is not None: write(_filter(_v, rawExpr=u'$event.date')) # from line 32, col 96.
write(u'''</td>
\t\t\t\t<td>''')
_v = VFFSL(SL,"event.title",True) # u'$event.title' on line 33, col 9
if _v is not None: write(_filter(_v, rawExpr=u'$event.title')) # from line 33, col 9.
write(u'''</td>
\t\t\t</tr>
\t\t\t<tr style="background-color: #F0F7FC">
\t\t\t\t<td>''')
_v = VFFSL(SL,"event.begin",True) # u'$event.begin' on line 36, col 9
if _v is not None: write(_filter(_v, rawExpr=u'$event.begin')) # from line 36, col 9.
write(u'''</td>
\t\t\t\t<td>''')
_v = VFFSL(SL,"event.duration",True) # u'$event.duration' on line 37, col 9
if _v is not None: write(_filter(_v, rawExpr=u'$event.duration')) # from line 37, col 9.
write(u''' min.</td>
\t\t\t\t<td>''')
_v = VFFSL(SL,"event.shortdesc",True) # u'$event.shortdesc' on line 38, col 9
if _v is not None: write(_filter(_v, rawExpr=u'$event.shortdesc')) # from line 38, col 9.
write(u'''</td>
\t\t\t</tr>
\t\t\t<tr style="background-color: #F0F7FC">
\t\t\t\t<td valign="top">''')
_v = VFFSL(SL,"event.end",True) # u'$event.end' on line 41, col 22
if _v is not None: write(_filter(_v, rawExpr=u'$event.end')) # from line 41, col 22.
write(u'''</td>
\t\t\t\t<td colspan="2" rowspan="2">''')
_v = VFFSL(SL,"event.longdesc",True) # u'$event.longdesc' on line 42, col 33
if _v is not None: write(_filter(_v, rawExpr=u'$event.longdesc')) # from line 42, col 33.
write(u'''</td>
\t\t\t</tr>
\t\t\t<tr style="background-color: #F0F7FC">
\t\t\t\t<td style="padding:0px">
\t\t\t\t\t<a href="#" onclick="addTimerEvent(\'''')
_v = VFFSL(SL,"event.sref",True) # u'$event.sref' on line 46, col 42
if _v is not None: write(_filter(_v, rawExpr=u'$event.sref')) # from line 46, col 42.
write(u"""',""")
_v = VFFSL(SL,"event.id",True) # u'$event.id' on line 46, col 55
if _v is not None: write(_filter(_v, rawExpr=u'$event.id')) # from line 46, col 55.
write(u''');return false;"><img src="/images/timer.png" title="Add Timer" border="0"></a>\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t
\t\t\t\t\t<a target="_blank" href="http://www.imdb.com/find?s=all&q=''')
_v = VFFSL(SL,"quote",False)(VFFSL(SL,"event.title",True)) # u'$quote($event.title)' on line 47, col 68
if _v is not None: write(_filter(_v, rawExpr=u'$quote($event.title)')) # from line 47, col 68.
write(u'''"><img src="/images/imdb.png" title="Search IMDb" border="0"></a>
\t\t\t\t</td>
\t\t\t</tr>
\t\t\t<tr>
\t\t\t\t<td colspan="3"></td>
\t\t\t</tr>
\t\t</table>
''')
write(u'''\t\t\t\t
\t</body>
</html>
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_epgpop= 'respond'
## END CLASS DEFINITION
if not hasattr(epgpop, '_initCheetahAttributes'):
templateAPIClass = getattr(epgpop, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(epgpop)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=epgpop()).run()
|
pli3/Openwebif
|
plugin/controllers/views/ajax/epgpop.py
|
Python
|
gpl-2.0
| 9,261
|
[
"VisIt"
] |
97b136f58a9205cf3d4aa579c3c17daf9b72f40610acdd0414802d387775580d
|
import time
import socket
import random
import logging
import warnings
import six
from six.moves import range
from six.moves.queue import Queue
from io import BytesIO
from gzip import GzipFile
from itertools import count
import requests
from collections import deque
from threading import Thread, Event
from .utils import xauth, iterqueue
from .serialization import jsonencode
logger = logging.getLogger('hubstorage.batchuploader')
class BatchUploader(object):
# Wait time between all batches status checks
worker_loop_delay = 1.0
# Max number of retry attempts before giving up.
worker_max_retries = 200
# The delay increases exponentially with the number of attempts but is
# bounded with these values on both sides.
worker_min_interval = 30
worker_max_interval = 600
# Each delay is also randomized by multiplying by random(0.5, 1.5), so the
# total delay using the current parameters is an almost gaussian random
# number with the following characteristics (see Irwin-Hall distribution):
#
# - average = 30hrs
# - minimum = 15hrs
# - maximum = 45hrs
# - standard deviation = approx. 40m, which means that 95% of the time the
# total delay will be within 2*std = 1h20m of the average.
def __init__(self, client):
self.client = client
self.closed = False
self._wait_event = Event()
self._writers = deque()
self._thread = Thread(target=self._worker)
self._thread.daemon = True
self._thread.start()
def create_writer(self, url, start=0, auth=None, size=1000, interval=15,
qsize=None, content_encoding='identity',
maxitemsize=1024 ** 2, callback=None):
assert not self.closed, 'Can not create new writers when closed'
auth = xauth(auth) or self.client.auth
w = _BatchWriter(url=url,
auth=auth,
size=size,
start=start,
interval=interval,
qsize=qsize,
maxitemsize=maxitemsize,
content_encoding=content_encoding,
uploader=self,
callback=callback)
self._writers.append(w)
return w
def close(self, timeout=None):
self.closed = True
self.interrupt()
self._thread.join(timeout)
def interrupt(self):
self._wait_event.set()
def __del__(self):
if not self.closed:
warnings.warn("%r not closed properly, some items may have been "
"lost!: %r" % (self.__class__.__name__, self._writers))
def _interruptable_sleep(self):
self._wait_event.wait(self.worker_loop_delay)
self._wait_event.clear()
def _worker(self):
ctr = count()
while True:
if not self._writers:
# Stop thread if closed and idle, but if open wait for writers
if self.closed:
break
self._interruptable_sleep()
continue
# Delay once all writers are processed
if (next(ctr) % len(self._writers) == 0) and not self.closed:
self._interruptable_sleep()
# Get next writer to process
w = self._writers.popleft()
# Close open writers if uploader is closed
if self.closed and not w.closed:
w.close(block=False)
# Checkpoint writer if eligible
now = time.time()
if w.itemsq.qsize() >= w.size or w.closed or w.flushme \
or w.checkpoint < now - w.interval:
self._checkpoint(w)
w.checkpoint = now
# Re-queue pending or open writers
if not (w.closed and w.itemsq.empty()):
self._writers.append(w)
def _checkpoint(self, w):
q = w.itemsq
qiter = iterqueue(q, w.size)
data = self._content_encode(qiter, w)
if qiter.count > 0:
response = self._tryupload({
'url': w.url,
'offset': w.offset,
'data': data,
'auth': w.auth,
'content-encoding': w.content_encoding,
})
w.offset += qiter.count
for _ in range(qiter.count):
q.task_done()
if w.callback is not None:
try:
w.callback(response)
except Exception:
logger.exception("Callback for %s failed", w.url)
def _content_encode(self, qiter, w):
ce = w.content_encoding
if ce == 'identity':
return _encode_identity(qiter)
elif ce == 'gzip':
return _encode_gzip(qiter)
else:
raise ValueError('Writer using unknown content encoding: %s' % ce)
def _tryupload(self, batch):
"""Retry uploads in case of server failures
Use polinomial backoff with 10 minutes maximum interval that accounts
for ~30 hours of total retry time.
>>> sum(min(x**2, 600) for x in range(200)) / 3600
30
"""
url = batch['url']
offset = batch['offset']
for retryn in range(self.worker_max_retries):
emsg = ''
try:
r = self._upload(batch)
r.raise_for_status()
if not (200 <= r.status_code < 300):
logger.warning('Discarding write to url=%s offset=%s: '
'[HTTP error %s] %s\n%s', url, offset,
r.status_code, r.reason, r.text.rstrip())
return r
except (socket.error, requests.RequestException) as e:
if isinstance(e, requests.HTTPError):
emsg = "[HTTP error {0}] {1}".format(e.response.status_code,
e.response.text.rstrip())
else:
emsg = str(e)
logger.info("Retrying url=%s offset=%s: %s", url, offset, emsg)
except Exception:
logger.exception('Non retryable failure on url=%s offset=%s',
url, offset)
break
backoff = min(max(retryn ** 2, self.worker_min_interval),
self.worker_max_interval)
time.sleep(backoff * (0.5 + random.random()))
def _upload(self, batch):
params = {'start': batch['offset']}
headers = {'content-encoding': batch['content-encoding']}
return self.client.session.request(
method='POST',
url=batch['url'],
data=batch['data'],
auth=batch['auth'],
timeout=self.client.connection_timeout,
params=params,
headers=headers,
)
class ValueTooLarge(ValueError):
"""Raised when a serialized item is greater than 1MB"""
class _BatchWriter(object):
#: Truncate overly big items to that many bytes for the error message.
ERRMSG_DATA_TRUNCATION_LEN = 1024
def __init__(self, url, start, auth, size, interval, qsize,
maxitemsize, content_encoding, uploader, callback=None):
self.url = url
self.offset = start
self._nextid = count(start)
self.auth = auth
self.size = size
self.interval = interval
self.maxitemsize = maxitemsize
self.content_encoding = content_encoding
self.checkpoint = time.time()
self.itemsq = Queue(size * 2 if qsize is None else qsize)
self.closed = False
self.flushme = False
self.uploader = uploader
self.callback = callback
def write(self, item):
assert not self.closed, 'attempting writes to a closed writer'
data = jsonencode(item)
if len(data) > self.maxitemsize:
truncated_data = data[:self.ERRMSG_DATA_TRUNCATION_LEN] + "..."
raise ValueTooLarge(
'Value exceeds max encoded size of {} bytes: {!r}'
.format(self.maxitemsize, truncated_data))
self.itemsq.put(data)
if self.itemsq.full():
self.uploader.interrupt()
return next(self._nextid)
def flush(self):
self.flushme = True
self._waitforq()
self.flushme = False
def close(self, block=True):
self.closed = True
if block:
self._waitforq()
def _waitforq(self):
self.uploader.interrupt()
self.itemsq.join()
def __str__(self):
return self.url
def _encode_identity(iterable):
data = BytesIO()
for item in iterable:
if isinstance(item, six.text_type):
item = item.encode('utf8')
data.write(item)
data.write(b'\n')
return data.getvalue()
def _encode_gzip(iterable):
data = BytesIO()
with GzipFile(fileobj=data, mode='w') as gzo:
for item in iterable:
if isinstance(item, six.text_type):
item = item.encode('utf8')
gzo.write(item)
gzo.write(b'\n')
return data.getvalue()
|
scrapinghub/python-hubstorage
|
hubstorage/batchuploader.py
|
Python
|
bsd-3-clause
| 9,293
|
[
"Gaussian"
] |
6b43d3a17bbe9c5ab23ec8650cc491a2ad77db697383c6db8165cacac30a92d4
|
import json
import logging
from datetime import datetime
import ddt
from django.urls import reverse
from django.http import Http404
from django.test.client import Client, RequestFactory
from django.test.utils import override_settings
from django.utils import translation
from edx_django_utils.cache import RequestCache
from mock import ANY, Mock, call, patch
from six import text_type
from course_modes.models import CourseMode
from course_modes.tests.factories import CourseModeFactory
from django_comment_client.constants import TYPE_ENTRY, TYPE_SUBCATEGORY
from django_comment_client.permissions import get_team
from django_comment_client.tests.group_id import (
CohortedTopicGroupIdTestMixin,
GroupIdAssertionMixin,
NonCohortedTopicGroupIdTestMixin
)
from django_comment_client.tests.unicode import UnicodeTestMixin
from django_comment_client.tests.utils import (
CohortedTestCase,
ForumsEnableMixin,
config_course_discussions,
topic_name_to_id
)
from django_comment_client.utils import strip_none
from django_comment_common.models import (
CourseDiscussionSettings,
ForumsConfig,
FORUM_ROLE_STUDENT,
)
from django_comment_common.utils import ThreadContext, seed_permissions_roles
from lms.djangoapps.courseware.exceptions import CourseAccessRedirect
from lms.djangoapps.discussion import views
from lms.djangoapps.discussion.views import _get_discussion_default_topic_id
from lms.djangoapps.discussion.views import course_discussions_settings_handler
from lms.djangoapps.teams.tests.factories import CourseTeamFactory, CourseTeamMembershipFactory
from lms.lib.comment_client.utils import CommentClientPaginatedResult
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
from openedx.core.djangoapps.course_groups.tests.helpers import config_course_cohorts
from openedx.core.djangoapps.course_groups.tests.test_views import CohortViewsTestCase
from openedx.core.djangoapps.util.testing import ContentGroupTestCase
from openedx.core.djangoapps.waffle_utils.testutils import WAFFLE_TABLES
from openedx.features.enterprise_support.tests.mixins.enterprise import EnterpriseTestConsentRequired
from student.roles import CourseStaffRole, UserBasedRole
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from util.testing import EventTestMixin, UrlResetMixin
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import (
TEST_DATA_MONGO_MODULESTORE,
ModuleStoreTestCase,
SharedModuleStoreTestCase
)
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls
log = logging.getLogger(__name__)
QUERY_COUNT_TABLE_BLACKLIST = WAFFLE_TABLES
# pylint: disable=missing-docstring
class ViewsExceptionTestCase(UrlResetMixin, ModuleStoreTestCase):
shard = 4
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
# Patching the ENABLE_DISCUSSION_SERVICE value affects the contents of urls.py,
# so we need to call super.setUp() which reloads urls.py (because
# of the UrlResetMixin)
super(ViewsExceptionTestCase, self).setUp()
# create a course
self.course = CourseFactory.create(org='MITx', course='999',
display_name='Robot Super Course')
# Patch the comment client user save method so it does not try
# to create a new cc user when creating a django user
with patch('student.models.cc.User.save'):
uname = 'student'
email = 'student@edx.org'
password = 'test'
# Create the student
self.student = UserFactory(username=uname, password=password, email=email)
# Enroll the student in the course
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
# Log the student in
self.client = Client()
assert self.client.login(username=uname, password=password)
config = ForumsConfig.current()
config.enabled = True
config.save()
@patch('student.models.cc.User.from_django_user')
@patch('student.models.cc.User.active_threads')
def test_user_profile_exception(self, mock_threads, mock_from_django_user):
# Mock the code that makes the HTTP requests to the cs_comment_service app
# for the profiled user's active threads
mock_threads.return_value = [], 1, 1
# Mock the code that makes the HTTP request to the cs_comment_service app
# that gets the current user's info
mock_from_django_user.return_value = Mock()
url = reverse('user_profile',
kwargs={'course_id': text_type(self.course.id), 'user_id': '12345'}) # There is no user 12345
self.response = self.client.get(url)
self.assertEqual(self.response.status_code, 404)
@patch('student.models.cc.User.from_django_user')
@patch('student.models.cc.User.subscribed_threads')
def test_user_followed_threads_exception(self, mock_threads, mock_from_django_user):
# Mock the code that makes the HTTP requests to the cs_comment_service app
# for the profiled user's active threads
mock_threads.return_value = CommentClientPaginatedResult(collection=[], page=1, num_pages=1)
# Mock the code that makes the HTTP request to the cs_comment_service app
# that gets the current user's info
mock_from_django_user.return_value = Mock()
url = reverse('followed_threads',
kwargs={'course_id': text_type(self.course.id), 'user_id': '12345'}) # There is no user 12345
self.response = self.client.get(url)
self.assertEqual(self.response.status_code, 404)
def make_mock_thread_data(
course,
text,
thread_id,
num_children,
group_id=None,
group_name=None,
commentable_id=None,
is_commentable_divided=None,
):
data_commentable_id = (
commentable_id or course.discussion_topics.get('General', {}).get('id') or "dummy_commentable_id"
)
thread_data = {
"id": thread_id,
"type": "thread",
"title": text,
"body": text,
"commentable_id": data_commentable_id,
"resp_total": 42,
"resp_skip": 25,
"resp_limit": 5,
"group_id": group_id,
"context": (
ThreadContext.COURSE if get_team(data_commentable_id) is None else ThreadContext.STANDALONE
)
}
if group_id is not None:
thread_data['group_name'] = group_name
if is_commentable_divided is not None:
thread_data['is_commentable_divided'] = is_commentable_divided
if num_children is not None:
thread_data["children"] = [{
"id": "dummy_comment_id_{}".format(i),
"type": "comment",
"body": text,
} for i in range(num_children)]
return thread_data
def make_mock_collection_data(
course,
text,
thread_id,
num_children=None,
group_id=None,
commentable_id=None,
thread_list=None
):
if thread_list:
return [
make_mock_thread_data(course=course, text=text, num_children=num_children, **thread)
for thread in thread_list
]
else:
return [
make_mock_thread_data(
course=course,
text=text,
thread_id=thread_id,
num_children=num_children,
group_id=group_id,
commentable_id=commentable_id,
)
]
def make_mock_perform_request_impl(
course,
text,
thread_id="dummy_thread_id",
group_id=None,
commentable_id=None,
num_thread_responses=1,
thread_list=None
):
def mock_perform_request_impl(*args, **kwargs):
url = args[1]
if url.endswith("threads") or url.endswith("user_profile"):
return {
"collection": make_mock_collection_data(
course, text, thread_id, None, group_id, commentable_id, thread_list
)
}
elif thread_id and url.endswith(thread_id):
return make_mock_thread_data(
course=course,
text=text,
thread_id=thread_id,
num_children=num_thread_responses,
group_id=group_id,
commentable_id=commentable_id
)
elif "/users/" in url:
res = {
"default_sort_key": "date",
"upvoted_ids": [],
"downvoted_ids": [],
"subscribed_thread_ids": [],
}
# comments service adds these attributes when course_id param is present
if kwargs.get('params', {}).get('course_id'):
res.update({
"threads_count": 1,
"comments_count": 2
})
return res
else:
return None
return mock_perform_request_impl
def make_mock_request_impl(
course,
text,
thread_id="dummy_thread_id",
group_id=None,
commentable_id=None,
num_thread_responses=1,
thread_list=None,
):
impl = make_mock_perform_request_impl(
course,
text,
thread_id=thread_id,
group_id=group_id,
commentable_id=commentable_id,
num_thread_responses=num_thread_responses,
thread_list=thread_list
)
def mock_request_impl(*args, **kwargs):
data = impl(*args, **kwargs)
if data:
return Mock(status_code=200, text=json.dumps(data), json=Mock(return_value=data))
else:
return Mock(status_code=404)
return mock_request_impl
class StringEndsWithMatcher(object):
def __init__(self, suffix):
self.suffix = suffix
def __eq__(self, other):
return other.endswith(self.suffix)
class PartialDictMatcher(object):
def __init__(self, expected_values):
self.expected_values = expected_values
def __eq__(self, other):
return all([
key in other and other[key] == value
for key, value in self.expected_values.iteritems()
])
@patch('requests.request', autospec=True)
class SingleThreadTestCase(ForumsEnableMixin, ModuleStoreTestCase):
shard = 4
CREATE_USER = False
def setUp(self):
super(SingleThreadTestCase, self).setUp()
self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
self.student = UserFactory.create()
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
def test_ajax(self, mock_request):
text = "dummy content"
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
text_type(self.course.id),
"dummy_discussion_id",
"test_thread_id"
)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content)
# strip_none is being used to perform the same transform that the
# django view performs prior to writing thread data to the response
self.assertEquals(
response_data["content"],
strip_none(make_mock_thread_data(course=self.course, text=text, thread_id=thread_id, num_children=1))
)
mock_request.assert_called_with(
"get",
StringEndsWithMatcher(thread_id), # url
data=None,
params=PartialDictMatcher({"mark_as_read": True, "user_id": 1, "recursive": True}),
headers=ANY,
timeout=ANY
)
def test_skip_limit(self, mock_request):
text = "dummy content"
thread_id = "test_thread_id"
response_skip = "45"
response_limit = "15"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get(
"dummy_url",
{"resp_skip": response_skip, "resp_limit": response_limit},
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
text_type(self.course.id),
"dummy_discussion_id",
"test_thread_id"
)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content)
# strip_none is being used to perform the same transform that the
# django view performs prior to writing thread data to the response
self.assertEquals(
response_data["content"],
strip_none(make_mock_thread_data(course=self.course, text=text, thread_id=thread_id, num_children=1))
)
mock_request.assert_called_with(
"get",
StringEndsWithMatcher(thread_id), # url
data=None,
params=PartialDictMatcher({
"mark_as_read": True,
"user_id": 1,
"recursive": True,
"resp_skip": response_skip,
"resp_limit": response_limit,
}),
headers=ANY,
timeout=ANY
)
def test_post(self, mock_request):
request = RequestFactory().post("dummy_url")
response = views.single_thread(
request,
text_type(self.course.id),
"dummy_discussion_id",
"dummy_thread_id"
)
self.assertEquals(response.status_code, 405)
def test_not_found(self, mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
# Mock request to return 404 for thread request
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy", thread_id=None)
self.assertRaises(
Http404,
views.single_thread,
request,
text_type(self.course.id),
"test_discussion_id",
"test_thread_id"
)
@ddt.ddt
@patch('requests.request', autospec=True)
class SingleThreadQueryCountTestCase(ForumsEnableMixin, ModuleStoreTestCase):
"""
Ensures the number of modulestore queries and number of sql queries are
independent of the number of responses retrieved for a given discussion thread.
"""
shard = 4
MODULESTORE = TEST_DATA_MONGO_MODULESTORE
def setUp(self):
super(SingleThreadQueryCountTestCase, self).setUp()
@ddt.data(
# Old mongo with cache. There is an additional SQL query for old mongo
# because the first time that disabled_xblocks is queried is in call_single_thread,
# vs. the creation of the course (CourseFactory.create). The creation of the
# course is outside the context manager that is verifying the number of queries,
# and with split mongo, that method ends up querying disabled_xblocks (which is then
# cached and hence not queried as part of call_single_thread).
(ModuleStoreEnum.Type.mongo, False, 1, 5, 2, 17, 5),
(ModuleStoreEnum.Type.mongo, False, 50, 5, 2, 17, 5),
# split mongo: 3 queries, regardless of thread response size.
(ModuleStoreEnum.Type.split, False, 1, 3, 3, 17, 5),
(ModuleStoreEnum.Type.split, False, 50, 3, 3, 17, 5),
# Enabling Enterprise integration should have no effect on the number of mongo queries made.
(ModuleStoreEnum.Type.mongo, True, 1, 5, 2, 17, 5),
(ModuleStoreEnum.Type.mongo, True, 50, 5, 2, 17, 5),
# split mongo: 3 queries, regardless of thread response size.
(ModuleStoreEnum.Type.split, True, 1, 3, 3, 17, 5),
(ModuleStoreEnum.Type.split, True, 50, 3, 3, 17, 5),
)
@ddt.unpack
def test_number_of_mongo_queries(
self,
default_store,
enterprise_enabled,
num_thread_responses,
num_uncached_mongo_calls,
num_cached_mongo_calls,
num_uncached_sql_queries,
num_cached_sql_queries,
mock_request
):
with modulestore().default_store(default_store):
course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
student = UserFactory.create()
CourseEnrollmentFactory.create(user=student, course_id=course.id)
test_thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=course, text="dummy content", thread_id=test_thread_id, num_thread_responses=num_thread_responses
)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = student
def call_single_thread():
"""
Call single_thread and assert that it returns what we expect.
"""
with patch.dict("django.conf.settings.FEATURES", dict(ENABLE_ENTERPRISE_INTEGRATION=enterprise_enabled)):
response = views.single_thread(
request,
text_type(course.id),
"dummy_discussion_id",
test_thread_id
)
self.assertEquals(response.status_code, 200)
self.assertEquals(len(json.loads(response.content)["content"]["children"]), num_thread_responses)
# Test uncached first, then cached now that the cache is warm.
cached_calls = [
[num_uncached_mongo_calls, num_uncached_sql_queries],
[num_cached_mongo_calls, num_cached_sql_queries],
]
for expected_mongo_calls, expected_sql_queries in cached_calls:
with self.assertNumQueries(expected_sql_queries, table_blacklist=QUERY_COUNT_TABLE_BLACKLIST):
with check_mongo_calls(expected_mongo_calls):
call_single_thread()
@patch('requests.request', autospec=True)
class SingleCohortedThreadTestCase(CohortedTestCase):
shard = 4
def _create_mock_cohorted_thread(self, mock_request):
self.mock_text = "dummy content"
self.mock_thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=self.mock_text,
thread_id=self.mock_thread_id,
group_id=self.student_cohort.id,
commentable_id="cohorted_topic",
)
def test_ajax(self, mock_request):
self._create_mock_cohorted_thread(mock_request)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
text_type(self.course.id),
"cohorted_topic",
self.mock_thread_id
)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEquals(
response_data["content"],
make_mock_thread_data(
course=self.course,
commentable_id="cohorted_topic",
text=self.mock_text,
thread_id=self.mock_thread_id,
num_children=1,
group_id=self.student_cohort.id,
group_name=self.student_cohort.name,
is_commentable_divided=True,
)
)
def test_html(self, mock_request):
self._create_mock_cohorted_thread(mock_request)
self.client.login(username=self.student.username, password='test')
response = self.client.get(
reverse('single_thread', kwargs={
'course_id': unicode(self.course.id),
'discussion_id': "cohorted_topic",
'thread_id': self.mock_thread_id,
})
)
self.assertEquals(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
html = response.content
# Verify that the group name is correctly included in the HTML
self.assertRegexpMatches(html, r'"group_name": "student_cohort"')
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class SingleThreadAccessTestCase(CohortedTestCase):
shard = 4
def call_view(self, mock_request, commentable_id, user, group_id, thread_group_id=None, pass_group_id=True):
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy context", thread_id=thread_id, group_id=thread_group_id
)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data,
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = user
return views.single_thread(
request,
text_type(self.course.id),
commentable_id,
thread_id
)
def test_student_non_cohorted(self, mock_request):
resp = self.call_view(mock_request, "non_cohorted_topic", self.student, self.student_cohort.id)
self.assertEqual(resp.status_code, 200)
def test_student_same_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=self.student_cohort.id
)
self.assertEqual(resp.status_code, 200)
# this test ensures that a thread response from the cs with group_id: null
# behaves the same as a thread response without a group_id (see: TNL-444)
def test_student_global_thread_in_cohorted_topic(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=None
)
self.assertEqual(resp.status_code, 200)
def test_student_different_cohort(self, mock_request):
self.assertRaises(
Http404,
lambda: self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=self.moderator_cohort.id
)
)
def test_moderator_non_cohorted(self, mock_request):
resp = self.call_view(mock_request, "non_cohorted_topic", self.moderator, self.moderator_cohort.id)
self.assertEqual(resp.status_code, 200)
def test_moderator_same_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.moderator,
self.moderator_cohort.id,
thread_group_id=self.moderator_cohort.id
)
self.assertEqual(resp.status_code, 200)
def test_moderator_different_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.moderator,
self.moderator_cohort.id,
thread_group_id=self.student_cohort.id
)
self.assertEqual(resp.status_code, 200)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class SingleThreadGroupIdTestCase(CohortedTestCase, GroupIdAssertionMixin):
shard = 4
cs_endpoint = "/threads/dummy_thread_id"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True, is_ajax=False):
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy context", group_id=self.student_cohort.id
)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
self.client.login(username=user.username, password='test')
return self.client.get(
reverse('single_thread', args=[unicode(self.course.id), commentable_id, "dummy_thread_id"]),
data=request_data,
**headers
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=False
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['content']
)
@patch('requests.request', autospec=True)
class ForumFormDiscussionContentGroupTestCase(ForumsEnableMixin, ContentGroupTestCase):
"""
Tests `forum_form_discussion api` works with different content groups.
Discussion modules are setup in ContentGroupTestCase class i.e
alpha_module => alpha_group_discussion => alpha_cohort => alpha_user/community_ta
beta_module => beta_group_discussion => beta_cohort => beta_user
"""
shard = 4
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ForumFormDiscussionContentGroupTestCase, self).setUp()
self.thread_list = [
{"thread_id": "test_general_thread_id"},
{"thread_id": "test_global_group_thread_id", "commentable_id": self.global_module.discussion_id},
{"thread_id": "test_alpha_group_thread_id", "group_id": self.alpha_module.group_access[0][0], "commentable_id": self.alpha_module.discussion_id}, # pylint: disable=line-too-long
{"thread_id": "test_beta_group_thread_id", "group_id": self.beta_module.group_access[0][0], "commentable_id": self.beta_module.discussion_id} # pylint: disable=line-too-long
]
def assert_has_access(self, response, expected_discussion_threads):
"""
Verify that a users have access to the threads in their assigned
cohorts and non-cohorted modules.
"""
discussion_data = json.loads(response.content)['discussion_data']
self.assertEqual(len(discussion_data), expected_discussion_threads)
def call_view(self, mock_request, user):
mock_request.side_effect = make_mock_request_impl(
course=self.course,
text="dummy content",
thread_list=self.thread_list
)
self.client.login(username=user.username, password='test')
return self.client.get(
reverse("forum_form_discussion", args=[unicode(self.course.id)]),
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
def test_community_ta_user(self, mock_request):
"""
Verify that community_ta user has access to all threads regardless
of cohort.
"""
response = self.call_view(
mock_request,
self.community_ta
)
self.assert_has_access(response, 4)
def test_alpha_cohort_user(self, mock_request):
"""
Verify that alpha_user has access to alpha_cohort and non-cohorted
threads.
"""
response = self.call_view(
mock_request,
self.alpha_user
)
self.assert_has_access(response, 3)
def test_beta_cohort_user(self, mock_request):
"""
Verify that beta_user has access to beta_cohort and non-cohorted
threads.
"""
response = self.call_view(
mock_request,
self.beta_user
)
self.assert_has_access(response, 3)
@patch('requests.request', autospec=True)
class SingleThreadContentGroupTestCase(ForumsEnableMixin, UrlResetMixin, ContentGroupTestCase):
shard = 4
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(SingleThreadContentGroupTestCase, self).setUp()
def assert_can_access(self, user, discussion_id, thread_id, should_have_access):
"""
Verify that a user has access to a thread within a given
discussion_id when should_have_access is True, otherwise
verify that the user does not have access to that thread.
"""
def call_single_thread():
self.client.login(username=user.username, password='test')
return self.client.get(
reverse('single_thread', args=[unicode(self.course.id), discussion_id, thread_id])
)
if should_have_access:
self.assertEqual(call_single_thread().status_code, 200)
else:
self.assertEqual(call_single_thread().status_code, 404)
def test_staff_user(self, mock_request):
"""
Verify that the staff user can access threads in the alpha,
beta, and global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_xblock in [self.alpha_module, self.beta_module, self.global_module]:
self.assert_can_access(self.staff_user, discussion_xblock.discussion_id, thread_id, True)
def test_alpha_user(self, mock_request):
"""
Verify that the alpha user can access threads in the alpha and
global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_xblock in [self.alpha_module, self.global_module]:
self.assert_can_access(self.alpha_user, discussion_xblock.discussion_id, thread_id, True)
self.assert_can_access(self.alpha_user, self.beta_module.discussion_id, thread_id, False)
def test_beta_user(self, mock_request):
"""
Verify that the beta user can access threads in the beta and
global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_xblock in [self.beta_module, self.global_module]:
self.assert_can_access(self.beta_user, discussion_xblock.discussion_id, thread_id, True)
self.assert_can_access(self.beta_user, self.alpha_module.discussion_id, thread_id, False)
def test_non_cohorted_user(self, mock_request):
"""
Verify that the non-cohorted user can access threads in just the
global discussion module.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
self.assert_can_access(self.non_cohorted_user, self.global_module.discussion_id, thread_id, True)
self.assert_can_access(self.non_cohorted_user, self.alpha_module.discussion_id, thread_id, False)
self.assert_can_access(self.non_cohorted_user, self.beta_module.discussion_id, thread_id, False)
def test_course_context_respected(self, mock_request):
"""
Verify that course threads go through discussion_category_id_access method.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy content", thread_id=thread_id
)
# Beta user does not have access to alpha_module.
self.assert_can_access(self.beta_user, self.alpha_module.discussion_id, thread_id, False)
def test_standalone_context_respected(self, mock_request):
"""
Verify that standalone threads don't go through discussion_category_id_access method.
"""
# For this rather pathological test, we are assigning the alpha module discussion_id (commentable_id)
# to a team so that we can verify that standalone threads don't go through discussion_category_id_access.
thread_id = "test_thread_id"
CourseTeamFactory(
name="A team",
course_id=self.course.id,
topic_id='topic_id',
discussion_topic_id=self.alpha_module.discussion_id
)
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy content", thread_id=thread_id,
commentable_id=self.alpha_module.discussion_id
)
# If a thread returns context other than "course", the access check is not done, and the beta user
# can see the alpha discussion module.
self.assert_can_access(self.beta_user, self.alpha_module.discussion_id, thread_id, True)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class InlineDiscussionContextTestCase(ForumsEnableMixin, ModuleStoreTestCase):
shard = 4
def setUp(self):
super(InlineDiscussionContextTestCase, self).setUp()
self.course = CourseFactory.create()
CourseEnrollmentFactory(user=self.user, course_id=self.course.id)
self.discussion_topic_id = "dummy_topic"
self.team = CourseTeamFactory(
name="A team",
course_id=self.course.id,
topic_id='topic_id',
discussion_topic_id=self.discussion_topic_id
)
self.team.add_user(self.user)
def test_context_can_be_standalone(self, mock_request):
mock_request.side_effect = make_mock_request_impl(
course=self.course,
text="dummy text",
commentable_id=self.discussion_topic_id
)
request = RequestFactory().get("dummy_url")
request.user = self.user
response = views.inline_discussion(
request,
unicode(self.course.id),
self.discussion_topic_id,
)
json_response = json.loads(response.content)
self.assertEqual(json_response['discussion_data'][0]['context'], ThreadContext.STANDALONE)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class InlineDiscussionGroupIdTestCase(
CohortedTestCase,
CohortedTopicGroupIdTestMixin,
NonCohortedTopicGroupIdTestMixin
):
shard = 4
cs_endpoint = "/threads"
def setUp(self):
super(InlineDiscussionGroupIdTestCase, self).setUp()
self.cohorted_commentable_id = 'cohorted_topic'
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
kwargs = {'commentable_id': self.cohorted_commentable_id}
if group_id:
# avoid causing a server error when the LMS chokes attempting
# to find a group name for the group_id, when we're testing with
# an invalid one.
try:
CourseUserGroup.objects.get(id=group_id)
kwargs['group_id'] = group_id
except CourseUserGroup.DoesNotExist:
pass
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data
)
request.user = user
return views.inline_discussion(
request,
text_type(self.course.id),
commentable_id
)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
self.cohorted_commentable_id,
self.student,
self.student_cohort.id
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class ForumFormDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
shard = 4
cs_endpoint = "/threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True, is_ajax=False):
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
self.client.login(username=user.username, password='test')
return self.client.get(
reverse("forum_form_discussion", args=[unicode(self.course.id)]),
data=request_data,
**headers
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class UserProfileDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
shard = 4
cs_endpoint = "/active_threads"
def call_view_for_profiled_user(
self, mock_request, requesting_user, profiled_user, group_id, pass_group_id, is_ajax=False
):
"""
Calls "user_profile" view method on behalf of "requesting_user" to get information about
the user "profiled_user".
"""
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
self.client.login(username=requesting_user.username, password='test')
return self.client.get(
reverse('user_profile', args=[unicode(self.course.id), profiled_user.id]),
data=request_data,
**headers
)
def call_view(self, mock_request, _commentable_id, user, group_id, pass_group_id=True, is_ajax=False):
return self.call_view_for_profiled_user(
mock_request, user, user, group_id, pass_group_id=pass_group_id, is_ajax=is_ajax
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=False
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
def _test_group_id_passed_to_user_profile(
self, mock_request, expect_group_id_in_request, requesting_user, profiled_user, group_id, pass_group_id
):
"""
Helper method for testing whether or not group_id was passed to the user_profile request.
"""
def get_params_from_user_info_call(for_specific_course):
"""
Returns the request parameters for the user info call with either course_id specified or not,
depending on value of 'for_specific_course'.
"""
# There will be 3 calls from user_profile. One has the cs_endpoint "active_threads", and it is already
# tested. The other 2 calls are for user info; one of those calls is for general information about the user,
# and it does not specify a course_id. The other call does specify a course_id, and if the caller did not
# have discussion moderator privileges, it should also contain a group_id.
for r_call in mock_request.call_args_list:
if not r_call[0][1].endswith(self.cs_endpoint):
params = r_call[1]["params"]
has_course_id = "course_id" in params
if (for_specific_course and has_course_id) or (not for_specific_course and not has_course_id):
return params
self.assertTrue(
False,
"Did not find appropriate user_profile call for 'for_specific_course'=" + for_specific_course
)
mock_request.reset_mock()
self.call_view_for_profiled_user(
mock_request,
requesting_user,
profiled_user,
group_id,
pass_group_id=pass_group_id,
is_ajax=False
)
# Should never have a group_id if course_id was not included in the request.
params_without_course_id = get_params_from_user_info_call(False)
self.assertNotIn("group_id", params_without_course_id)
params_with_course_id = get_params_from_user_info_call(True)
if expect_group_id_in_request:
self.assertIn("group_id", params_with_course_id)
self.assertEqual(group_id, params_with_course_id["group_id"])
else:
self.assertNotIn("group_id", params_with_course_id)
def test_group_id_passed_to_user_profile_student(self, mock_request):
"""
Test that the group id is always included when requesting user profile information for a particular
course if the requester does not have discussion moderation privileges.
"""
def verify_group_id_always_present(profiled_user, pass_group_id):
"""
Helper method to verify that group_id is always present for student in course
(non-privileged user).
"""
self._test_group_id_passed_to_user_profile(
mock_request, True, self.student, profiled_user, self.student_cohort.id, pass_group_id
)
# In all these test cases, the requesting_user is the student (non-privileged user).
# The profile returned on behalf of the student is for the profiled_user.
verify_group_id_always_present(profiled_user=self.student, pass_group_id=True)
verify_group_id_always_present(profiled_user=self.student, pass_group_id=False)
verify_group_id_always_present(profiled_user=self.moderator, pass_group_id=True)
verify_group_id_always_present(profiled_user=self.moderator, pass_group_id=False)
def test_group_id_user_profile_moderator(self, mock_request):
"""
Test that the group id is only included when a privileged user requests user profile information for a
particular course and user if the group_id is explicitly passed in.
"""
def verify_group_id_present(profiled_user, pass_group_id, requested_cohort=self.moderator_cohort):
"""
Helper method to verify that group_id is present.
"""
self._test_group_id_passed_to_user_profile(
mock_request, True, self.moderator, profiled_user, requested_cohort.id, pass_group_id
)
def verify_group_id_not_present(profiled_user, pass_group_id, requested_cohort=self.moderator_cohort):
"""
Helper method to verify that group_id is not present.
"""
self._test_group_id_passed_to_user_profile(
mock_request, False, self.moderator, profiled_user, requested_cohort.id, pass_group_id
)
# In all these test cases, the requesting_user is the moderator (privileged user).
# If the group_id is explicitly passed, it will be present in the request.
verify_group_id_present(profiled_user=self.student, pass_group_id=True)
verify_group_id_present(profiled_user=self.moderator, pass_group_id=True)
verify_group_id_present(
profiled_user=self.student, pass_group_id=True, requested_cohort=self.student_cohort
)
# If the group_id is not explicitly passed, it will not be present because the requesting_user
# has discussion moderator privileges.
verify_group_id_not_present(profiled_user=self.student, pass_group_id=False)
verify_group_id_not_present(profiled_user=self.moderator, pass_group_id=False)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class FollowedThreadsDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
shard = 4
cs_endpoint = "/subscribed_threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data,
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = user
return views.followed_threads(
request,
text_type(self.course.id),
user.id
)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class InlineDiscussionTestCase(ForumsEnableMixin, ModuleStoreTestCase):
shard = 4
def setUp(self):
super(InlineDiscussionTestCase, self).setUp()
self.course = CourseFactory.create(org="TestX", number="101", display_name="Test Course")
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
self.discussion1 = ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion1",
display_name='Discussion1',
discussion_category="Chapter",
discussion_target="Discussion1"
)
def send_request(self, mock_request, params=None):
"""
Creates and returns a request with params set, and configures
mock_request to return appropriate values.
"""
request = RequestFactory().get("dummy_url", params if params else {})
request.user = self.student
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy content", commentable_id=self.discussion1.discussion_id
)
return views.inline_discussion(
request, text_type(self.course.id), self.discussion1.discussion_id
)
def test_context(self, mock_request):
team = CourseTeamFactory(
name='Team Name',
topic_id='A topic',
course_id=self.course.id,
discussion_topic_id=self.discussion1.discussion_id
)
team.add_user(self.student)
response = self.send_request(mock_request)
self.assertEqual(mock_request.call_args[1]['params']['context'], ThreadContext.STANDALONE)
@patch('requests.request', autospec=True)
class UserProfileTestCase(ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
shard = 4
TEST_THREAD_TEXT = 'userprofile-test-text'
TEST_THREAD_ID = 'userprofile-test-thread-id'
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(UserProfileTestCase, self).setUp()
self.course = CourseFactory.create()
self.student = UserFactory.create()
self.profiled_user = UserFactory.create()
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
CourseEnrollmentFactory.create(user=self.profiled_user, course_id=self.course.id)
def get_response(self, mock_request, params, **headers):
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=self.TEST_THREAD_TEXT, thread_id=self.TEST_THREAD_ID
)
self.client.login(username=self.student.username, password='test')
response = self.client.get(
reverse('user_profile', kwargs={
'course_id': unicode(self.course.id),
'user_id': self.profiled_user.id,
}),
data=params,
**headers
)
mock_request.assert_any_call(
"get",
StringEndsWithMatcher('/users/{}/active_threads'.format(self.profiled_user.id)),
data=None,
params=PartialDictMatcher({
"course_id": text_type(self.course.id),
"page": params.get("page", 1),
"per_page": views.THREADS_PER_PAGE
}),
headers=ANY,
timeout=ANY
)
return response
def check_html(self, mock_request, **params):
response = self.get_response(mock_request, params)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
html = response.content
self.assertRegexpMatches(html, r'data-page="1"')
self.assertRegexpMatches(html, r'data-num-pages="1"')
self.assertRegexpMatches(html, r'<span class="discussion-count">1</span> discussion started')
self.assertRegexpMatches(html, r'<span class="discussion-count">2</span> comments')
self.assertRegexpMatches(html, r''id': '{}''.format(self.TEST_THREAD_ID))
self.assertRegexpMatches(html, r''title': '{}''.format(self.TEST_THREAD_TEXT))
self.assertRegexpMatches(html, r''body': '{}''.format(self.TEST_THREAD_TEXT))
self.assertRegexpMatches(html, r''username': u'{}''.format(self.student.username))
def check_ajax(self, mock_request, **params):
response = self.get_response(mock_request, params, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json; charset=utf-8')
response_data = json.loads(response.content)
self.assertEqual(
sorted(response_data.keys()),
["annotated_content_info", "discussion_data", "num_pages", "page"]
)
self.assertEqual(len(response_data['discussion_data']), 1)
self.assertEqual(response_data["page"], 1)
self.assertEqual(response_data["num_pages"], 1)
self.assertEqual(response_data['discussion_data'][0]['id'], self.TEST_THREAD_ID)
self.assertEqual(response_data['discussion_data'][0]['title'], self.TEST_THREAD_TEXT)
self.assertEqual(response_data['discussion_data'][0]['body'], self.TEST_THREAD_TEXT)
def test_html(self, mock_request):
self.check_html(mock_request)
def test_ajax(self, mock_request):
self.check_ajax(mock_request)
def test_404_non_enrolled_user(self, __):
"""
Test that when student try to visit un-enrolled students' discussion profile,
the system raises Http404.
"""
unenrolled_user = UserFactory.create()
request = RequestFactory().get("dummy_url")
request.user = self.student
with self.assertRaises(Http404):
views.user_profile(
request,
text_type(self.course.id),
unenrolled_user.id
)
def test_404_profiled_user(self, mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
with self.assertRaises(Http404):
views.user_profile(
request,
text_type(self.course.id),
-999
)
def test_404_course(self, mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
with self.assertRaises(Http404):
views.user_profile(
request,
"non/existent/course",
self.profiled_user.id
)
def test_post(self, mock_request):
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=self.TEST_THREAD_TEXT, thread_id=self.TEST_THREAD_ID
)
request = RequestFactory().post("dummy_url")
request.user = self.student
response = views.user_profile(
request,
text_type(self.course.id),
self.profiled_user.id
)
self.assertEqual(response.status_code, 405)
@patch('requests.request', autospec=True)
class CommentsServiceRequestHeadersTestCase(ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
shard = 4
CREATE_USER = False
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CommentsServiceRequestHeadersTestCase, self).setUp()
username = "foo"
password = "bar"
# Invoke UrlResetMixin
super(CommentsServiceRequestHeadersTestCase, self).setUp()
self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
self.student = UserFactory.create(username=username, password=password)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
self.assertTrue(
self.client.login(username=username, password=password)
)
self.addCleanup(translation.deactivate)
def assert_all_calls_have_header(self, mock_request, key, value):
expected = call(
ANY, # method
ANY, # url
data=ANY,
params=ANY,
headers=PartialDictMatcher({key: value}),
timeout=ANY
)
for actual in mock_request.call_args_list:
self.assertEqual(expected, actual)
def test_accept_language(self, mock_request):
lang = "eo"
text = "dummy content"
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
self.client.get(
reverse(
"single_thread",
kwargs={
"course_id": text_type(self.course.id),
"discussion_id": "dummy_discussion_id",
"thread_id": thread_id,
}
),
HTTP_ACCEPT_LANGUAGE=lang,
)
self.assert_all_calls_have_header(mock_request, "Accept-Language", lang)
@override_settings(COMMENTS_SERVICE_KEY="test_api_key")
def test_api_key(self, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy", thread_id="dummy")
self.client.get(
reverse(
"forum_form_discussion",
kwargs={"course_id": text_type(self.course.id)}
),
)
self.assert_all_calls_have_header(mock_request, "X-Edx-Api-Key", "test_api_key")
class InlineDiscussionUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin):
shard = 4
@classmethod
def setUpClass(cls):
with super(InlineDiscussionUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(InlineDiscussionUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
def setUp(self):
super(InlineDiscussionUnicodeTestCase, self).setUp()
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
response = views.inline_discussion(
request, text_type(self.course.id), self.course.discussion_topics['General']['id']
)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class ForumFormDiscussionUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin):
shard = 4
@classmethod
def setUpClass(cls):
with super(ForumFormDiscussionUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(ForumFormDiscussionUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
def setUp(self):
super(ForumFormDiscussionUnicodeTestCase, self).setUp()
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.forum_form_discussion(request, text_type(self.course.id))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
@ddt.ddt
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class ForumDiscussionXSSTestCase(ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
shard = 4
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ForumDiscussionXSSTestCase, self).setUp()
username = "foo"
password = "bar"
self.course = CourseFactory.create()
self.student = UserFactory.create(username=username, password=password)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
self.assertTrue(self.client.login(username=username, password=password))
@ddt.data('"><script>alert(1)</script>', '<script>alert(1)</script>', '</script><script>alert(1)</script>')
@patch('student.models.cc.User.from_django_user')
def test_forum_discussion_xss_prevent(self, malicious_code, mock_user, mock_req): # pylint: disable=unused-argument
"""
Test that XSS attack is prevented
"""
mock_user.return_value.to_dict.return_value = {}
reverse_url = "%s%s" % (reverse(
"forum_form_discussion",
kwargs={"course_id": unicode(self.course.id)}), '/forum_form_discussion')
# Test that malicious code does not appear in html
url = "%s?%s=%s" % (reverse_url, 'sort_key', malicious_code)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertNotIn(malicious_code, resp.content)
@ddt.data('"><script>alert(1)</script>', '<script>alert(1)</script>', '</script><script>alert(1)</script>')
@patch('student.models.cc.User.from_django_user')
@patch('student.models.cc.User.active_threads')
def test_forum_user_profile_xss_prevent(self, malicious_code, mock_threads, mock_from_django_user, mock_request):
"""
Test that XSS attack is prevented
"""
mock_threads.return_value = [], 1, 1
mock_from_django_user.return_value.to_dict.return_value = {
'upvoted_ids': [],
'downvoted_ids': [],
'subscribed_thread_ids': []
}
mock_request.side_effect = make_mock_request_impl(course=self.course, text='dummy')
url = reverse('user_profile',
kwargs={'course_id': unicode(self.course.id), 'user_id': str(self.student.id)})
# Test that malicious code does not appear in html
url_string = "%s?%s=%s" % (url, 'page', malicious_code)
resp = self.client.get(url_string)
self.assertEqual(resp.status_code, 200)
self.assertNotIn(malicious_code, resp.content)
class ForumDiscussionSearchUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin):
shard = 4
@classmethod
def setUpClass(cls):
with super(ForumDiscussionSearchUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(ForumDiscussionSearchUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
def setUp(self):
super(ForumDiscussionSearchUnicodeTestCase, self).setUp()
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
data = {
"ajax": 1,
"text": text,
}
request = RequestFactory().get("dummy_url", data)
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.forum_form_discussion(request, text_type(self.course.id))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class SingleThreadUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin):
shard = 4
@classmethod
def setUpClass(cls):
with super(SingleThreadUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create(discussion_topics={'dummy_discussion_id': {'id': 'dummy_discussion_id'}})
@classmethod
def setUpTestData(cls):
super(SingleThreadUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
def setUp(self):
super(SingleThreadUnicodeTestCase, self).setUp()
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.single_thread(request, text_type(self.course.id), "dummy_discussion_id", thread_id)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["content"]["title"], text)
self.assertEqual(response_data["content"]["body"], text)
class UserProfileUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin):
shard = 4
@classmethod
def setUpClass(cls):
with super(UserProfileUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(UserProfileUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
def setUp(self):
super(UserProfileUnicodeTestCase, self).setUp()
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.user_profile(request, text_type(self.course.id), str(self.student.id))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class FollowedThreadsUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin):
shard = 4
@classmethod
def setUpClass(cls):
with super(FollowedThreadsUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(FollowedThreadsUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
def setUp(self):
super(FollowedThreadsUnicodeTestCase, self).setUp()
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.followed_threads(request, text_type(self.course.id), str(self.student.id))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class EnrollmentTestCase(ForumsEnableMixin, ModuleStoreTestCase):
"""
Tests for the behavior of views depending on if the student is enrolled
in the course
"""
shard = 4
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(EnrollmentTestCase, self).setUp()
self.course = CourseFactory.create()
self.student = UserFactory.create()
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def test_unenrolled(self, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text='dummy')
request = RequestFactory().get('dummy_url')
request.user = self.student
with self.assertRaises(CourseAccessRedirect):
views.forum_form_discussion(request, course_id=text_type(self.course.id))
@patch('requests.request', autospec=True)
class EnterpriseConsentTestCase(EnterpriseTestConsentRequired, ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
"""
Ensure that the Enterprise Data Consent redirects are in place only when consent is required.
"""
shard = 4
CREATE_USER = False
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
# Invoke UrlResetMixin setUp
super(EnterpriseConsentTestCase, self).setUp()
username = "foo"
password = "bar"
self.discussion_id = 'dummy_discussion_id'
self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': self.discussion_id}})
self.student = UserFactory.create(username=username, password=password)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
self.assertTrue(
self.client.login(username=username, password=password)
)
self.addCleanup(translation.deactivate)
@patch('openedx.features.enterprise_support.api.enterprise_customer_for_request')
def test_consent_required(self, mock_enterprise_customer_for_request, mock_request):
"""
Test that enterprise data sharing consent is required when enabled for the various discussion views.
"""
# ENT-924: Temporary solution to replace sensitive SSO usernames.
mock_enterprise_customer_for_request.return_value = None
thread_id = 'dummy'
course_id = unicode(self.course.id)
mock_request.side_effect = make_mock_request_impl(course=self.course, text='dummy', thread_id=thread_id)
for url in (
reverse('forum_form_discussion',
kwargs=dict(course_id=course_id)),
reverse('single_thread',
kwargs=dict(course_id=course_id, discussion_id=self.discussion_id, thread_id=thread_id)),
):
self.verify_consent_required(self.client, url)
class DividedDiscussionsTestCase(CohortViewsTestCase):
shard = 4
def create_divided_discussions(self):
"""
Set up a divided discussion in the system, complete with all the fixings
"""
divided_inline_discussions = ['Topic A']
divided_course_wide_discussions = ["Topic B"]
divided_discussions = divided_inline_discussions + divided_course_wide_discussions
# inline discussion
ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id=topic_name_to_id(self.course, "Topic A"),
discussion_category="Chapter",
discussion_target="Discussion",
start=datetime.now()
)
# course-wide discussion
discussion_topics = {
"Topic B": {"id": "Topic B"},
}
config_course_cohorts(
self.course,
is_cohorted=True,
)
config_course_discussions(
self.course,
discussion_topics=discussion_topics,
divided_discussions=divided_discussions
)
return divided_inline_discussions, divided_course_wide_discussions
class CourseDiscussionTopicsTestCase(DividedDiscussionsTestCase):
"""
Tests the `divide_discussion_topics` view.
"""
shard = 4
def test_non_staff(self):
"""
Verify that we cannot access divide_discussion_topics if we're a non-staff user.
"""
self._verify_non_staff_cannot_access(views.discussion_topics, "GET", [unicode(self.course.id)])
def test_get_discussion_topics(self):
"""
Verify that discussion_topics is working for HTTP GET.
"""
# create inline & course-wide discussion to verify the different map.
self.create_divided_discussions()
response = self.get_handler(self.course, handler=views.discussion_topics)
start_date = response['inline_discussions']['subcategories']['Chapter']['start_date']
expected_response = {
"course_wide_discussions": {
'children': [['Topic B', TYPE_ENTRY]],
'entries': {
'Topic B': {
'sort_key': 'A',
'is_divided': True,
'id': topic_name_to_id(self.course, "Topic B"),
'start_date': response['course_wide_discussions']['entries']['Topic B']['start_date']
}
}
},
"inline_discussions": {
'subcategories': {
'Chapter': {
'subcategories': {},
'children': [['Discussion', TYPE_ENTRY]],
'entries': {
'Discussion': {
'sort_key': None,
'is_divided': True,
'id': topic_name_to_id(self.course, "Topic A"),
'start_date': start_date
}
},
'sort_key': 'Chapter',
'start_date': start_date
}
},
'children': [['Chapter', TYPE_SUBCATEGORY]]
}
}
self.assertEqual(response, expected_response)
class CourseDiscussionsHandlerTestCase(DividedDiscussionsTestCase):
"""
Tests the course_discussion_settings_handler
"""
shard = 4
def get_expected_response(self):
"""
Returns the static response dict.
"""
return {
u'always_divide_inline_discussions': False,
u'divided_inline_discussions': [],
u'divided_course_wide_discussions': [],
u'id': 1,
u'division_scheme': u'cohort',
u'available_division_schemes': [u'cohort']
}
def test_non_staff(self):
"""
Verify that we cannot access course_discussions_settings_handler if we're a non-staff user.
"""
self._verify_non_staff_cannot_access(
course_discussions_settings_handler, "GET", [unicode(self.course.id)]
)
self._verify_non_staff_cannot_access(
course_discussions_settings_handler, "PATCH", [unicode(self.course.id)]
)
def test_update_always_divide_inline_discussion_settings(self):
"""
Verify that course_discussions_settings_handler is working for always_divide_inline_discussions via HTTP PATCH.
"""
config_course_cohorts(self.course, is_cohorted=True)
response = self.get_handler(self.course, handler=course_discussions_settings_handler)
expected_response = self.get_expected_response()
self.assertEqual(response, expected_response)
expected_response['always_divide_inline_discussions'] = True
response = self.patch_handler(
self.course, data=expected_response, handler=course_discussions_settings_handler
)
self.assertEqual(response, expected_response)
def test_update_course_wide_discussion_settings(self):
"""
Verify that course_discussions_settings_handler is working for divided_course_wide_discussions via HTTP PATCH.
"""
# course-wide discussion
discussion_topics = {
"Topic B": {"id": "Topic B"},
}
config_course_cohorts(self.course, is_cohorted=True)
config_course_discussions(self.course, discussion_topics=discussion_topics)
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response = self.get_expected_response()
self.assertEqual(response, expected_response)
expected_response['divided_course_wide_discussions'] = [topic_name_to_id(self.course, "Topic B")]
response = self.patch_handler(
self.course, data=expected_response, handler=views.course_discussions_settings_handler
)
self.assertEqual(response, expected_response)
def test_update_inline_discussion_settings(self):
"""
Verify that course_discussions_settings_handler is working for divided_inline_discussions via HTTP PATCH.
"""
config_course_cohorts(self.course, is_cohorted=True)
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response = self.get_expected_response()
self.assertEqual(response, expected_response)
RequestCache.clear_all_namespaces()
now = datetime.now()
# inline discussion
ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="Topic_A",
discussion_category="Chapter",
discussion_target="Discussion",
start=now
)
expected_response['divided_inline_discussions'] = ["Topic_A"]
response = self.patch_handler(
self.course, data=expected_response, handler=views.course_discussions_settings_handler
)
self.assertEqual(response, expected_response)
def test_get_settings(self):
"""
Verify that course_discussions_settings_handler is working for HTTP GET.
"""
divided_inline_discussions, divided_course_wide_discussions = self.create_divided_discussions()
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response = self.get_expected_response()
expected_response['divided_inline_discussions'] = [topic_name_to_id(self.course, name)
for name in divided_inline_discussions]
expected_response['divided_course_wide_discussions'] = [topic_name_to_id(self.course, name)
for name in divided_course_wide_discussions]
self.assertEqual(response, expected_response)
def test_update_settings_with_invalid_field_data_type(self):
"""
Verify that course_discussions_settings_handler return HTTP 400 if field data type is incorrect.
"""
config_course_cohorts(self.course, is_cohorted=True)
response = self.patch_handler(
self.course,
data={'always_divide_inline_discussions': ''},
expected_response_code=400,
handler=views.course_discussions_settings_handler
)
self.assertEqual(
"Incorrect field type for `{}`. Type must be `{}`".format('always_divide_inline_discussions', bool.__name__),
response.get("error")
)
def test_available_schemes(self):
# Cohorts disabled, single enrollment mode.
config_course_cohorts(self.course, is_cohorted=False)
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response = self.get_expected_response()
expected_response['available_division_schemes'] = []
self.assertEqual(response, expected_response)
# Add 2 enrollment modes
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.AUDIT)
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.VERIFIED)
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response['available_division_schemes'] = [CourseDiscussionSettings.ENROLLMENT_TRACK]
self.assertEqual(response, expected_response)
# Enable cohorts
config_course_cohorts(self.course, is_cohorted=True)
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response['available_division_schemes'] = [
CourseDiscussionSettings.COHORT, CourseDiscussionSettings.ENROLLMENT_TRACK
]
self.assertEqual(response, expected_response)
class DefaultTopicIdGetterTestCase(ModuleStoreTestCase):
"""
Tests the `_get_discussion_default_topic_id` helper.
"""
shard = 4
def test_no_default_topic(self):
discussion_topics = {
'dummy discussion': {
'id': 'dummy_discussion_id',
},
}
course = CourseFactory.create(discussion_topics=discussion_topics)
expected_id = None
result = _get_discussion_default_topic_id(course)
self.assertEqual(expected_id, result)
def test_default_topic_id(self):
discussion_topics = {
'dummy discussion': {
'id': 'dummy_discussion_id',
},
'another discussion': {
'id': 'another_discussion_id',
'default': True,
},
}
course = CourseFactory.create(discussion_topics=discussion_topics)
expected_id = 'another_discussion_id'
result = _get_discussion_default_topic_id(course)
self.assertEqual(expected_id, result)
class ThreadViewedEventTestCase(EventTestMixin, ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
"""
Forum thread views are expected to launch analytics events. Test these here.
"""
shard = 4
CATEGORY_ID = 'i4x-edx-discussion-id'
CATEGORY_NAME = 'Discussion 1'
PARENT_CATEGORY_NAME = 'Chapter 1'
DUMMY_THREAD_ID = 'dummythreadids'
DUMMY_TITLE = 'Dummy title'
DUMMY_URL = 'https://example.com/dummy/url/'
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ThreadViewedEventTestCase, self).setUp('eventtracking.tracker')
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
PASSWORD = 'test'
self.student = UserFactory.create(password=PASSWORD)
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
self.staff = UserFactory.create(is_staff=True)
UserBasedRole(user=self.staff, role=CourseStaffRole.ROLE).add_course(self.course.id)
self.category = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id=self.CATEGORY_ID,
discussion_category=self.PARENT_CATEGORY_NAME,
discussion_target=self.CATEGORY_NAME,
)
self.team = CourseTeamFactory.create(
name='Team 1',
course_id=self.course.id,
topic_id='arbitrary-topic-id',
discussion_topic_id=self.category.discussion_id,
)
CourseTeamMembershipFactory.create(team=self.team, user=self.student)
self.client.login(username=self.student.username, password=PASSWORD)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
@patch('lms.lib.comment_client.utils.perform_request')
def test_thread_viewed_event(self, mock_perform_request):
mock_perform_request.side_effect = make_mock_perform_request_impl(
course=self.course,
text=self.DUMMY_TITLE,
thread_id=self.DUMMY_THREAD_ID,
commentable_id=self.category.discussion_id,
)
url = '/courses/{0}/discussion/forum/{1}/threads/{2}'.format(
unicode(self.course.id),
self.category.discussion_id,
self.DUMMY_THREAD_ID
)
self.client.get(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
expected_event = {
'id': self.DUMMY_THREAD_ID,
'title': self.DUMMY_TITLE,
'commentable_id': self.category.discussion_id,
'category_id': self.category.discussion_id,
'category_name': self.category.discussion_target,
'user_forums_roles': [FORUM_ROLE_STUDENT],
'user_course_roles': [],
'target_username': self.student.username,
'team_id': self.team.id,
'url': self.DUMMY_URL,
}
expected_event_items = expected_event.items()
self.assert_event_emission_count('edx.forum.thread.viewed', 1)
_, event = self.get_latest_call_args()
event_items = event.items()
self.assertTrue(kv_pair in event_items for kv_pair in expected_event_items)
|
ahmedaljazzar/edx-platform
|
lms/djangoapps/discussion/tests/test_views.py
|
Python
|
agpl-3.0
| 85,111
|
[
"VisIt"
] |
4ffe5b95ee6a5ac0835d835d776251265a7d2c6f736560394d448d160427573d
|
""" Figs sub module to generate figures"""
# Global variable that gives us imports status
FIGS = True
try:
import cclib
from cclib import parser
import numpy as np
import matplotlib.pyplot as plt
except ImportError:
FIGS = False
def figs(opts):
"""
Determine type of output from opts or file
Call appropriate function
"""
if type(opts.fname) == str:
# Assuming ccinput is a filename
data = parser.ccopen(opts.fname).parse()
else:
data = opts.fname
assert type(data) == parser.data.ccData_optdone_bool \
or type(data) == parser.data.ccData
# TODO: determine what kind of job (opt, sp, freq)
# auto = automatically determine
# or get from opts.job
if opts.job == 'auto':
#print opts.job, "not yet implemented"
_opt(data)
elif opts.job == 'opt':
_opt(data)
elif opts.job == 'sp':
_sp(data)
elif opts.job == 'vib':
print(opts.job, "not yet implemented")
else:
print(opts.job, "not yet implemented")
def _sp(data):
"""
Generate plots of convergence criteria, and energy vs. optimization cycles
:job: ccdata object, or file
:returns: TODO
"""
# TODO scfenergies, scfvalues, scftargets vs. scf cycles
print("\n\n")
#print("Optimization Converged: ", data.optdone)
criteria = [0, 0, 0]
criteria[0] = [x[0] for x in data.scfvalues]
criteria[1] = [x[1] for x in data.scfvalues]
criteria[2] = [x[2] for x in data.scfvalues]
idx = np.arange(len(criteria[0]))
# Plot Geometry Optimization Criteria for Convergence over opt cycles
plt.plot(idx, criteria[0], label='Criteria 1')
plt.plot(idx, criteria[1], label='Criteria 2')
plt.plot(idx, criteria[2], label='Criteria 3')
# Plot target criteria for convergence
plt.axhline(y=data.scftargets[0])
plt.yscale('log')
plt.title("SCF Convergence Analysis")
plt.xlabel("SCF Cycle")
plt.legend()
print(idx, criteria, data.scftargets)
plt.show()
# idx = np.arange(len(data.scfenergies))
# plt.plot(idx, data.scfenergies, label='SCF Energy (eV)')
# plt.show
def _opt(data):
""" Generate plots of convergence criteria,
and energy vs. optimization cycles
:job: ccdata object, or file
:returns: TODO
"""
print("\n\n")
#print("Optimization Converged: ", data.optdone)
print("Optimization Targets:")
print("Gradient: ", data.geotargets[0])
print("Displacement: ", data.geotargets[1])
print("Energy Change: ", data.geotargets[2])
criteria = [0, 0, 0]
criteria[0] = [x[0] for x in data.geovalues]
criteria[1] = [x[1] for x in data.geovalues]
criteria[2] = [x[2] for x in data.geovalues]
idx = np.arange(len(criteria[0]))
print("Optimization Final Values:")
print("Gradient: ", criteria[0][-1])
print("Displacement: ", criteria[1][-1])
print("Energy Change: ", criteria[2][-1])
# Plot Geometry Optimization Criteria for Convergence over opt cycles
plt.plot(idx, criteria[0], label='Gradient', color='red')
plt.plot(idx, criteria[1], label='Displacement', color='green')
plt.plot(idx, criteria[2], label='Energy Change', color='blue')
# Plot target criteria for convergence
plt.axhline(y=data.geotargets[0], color='red')
plt.axhline(y=data.geotargets[1], color='green')
plt.axhline(y=data.geotargets[2], color='blue')
plt.yscale('log')
plt.title("Optimization Convergence Analysis")
plt.xlabel("Optimization Cycle")
plt.legend()
plt.show()
idx = np.arange(len(data.scfenergies))
plt.plot(idx, data.scfenergies, label='SCF Energy (eV)')
plt.show
# TODO plot scfenergies vs. cycles
def main(opts):
""" Main function for figs """
if FIGS:
figs(opts)
else:
raise ImportError("""Unable to import all libraries for figs
numpy
matplotlib
cclib""")
|
ben-albrecht/qcl
|
qcl/figs.py
|
Python
|
mit
| 4,021
|
[
"cclib"
] |
eb82a304912a23bb36ba752a2e4541419d25615ea893bd7aaae9418ffcc33f60
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
# Based on the work of Dave Peticolas for the P4poll
# Changed to svn (using xml.dom.minidom) by Niklaus Giger
# Hacked beyond recognition by Brian Warner
from __future__ import absolute_import
from __future__ import print_function
from future.moves.urllib.parse import quote_plus as urlquote_plus
from future.utils import text_type
import os
import xml.dom.minidom
from twisted.internet import defer
from twisted.internet import utils
from twisted.python import log
from buildbot import util
from buildbot.changes import base
from buildbot.util import bytes2NativeString
from buildbot.util import bytes2unicode
# these split_file_* functions are available for use as values to the
# split_file= argument.
def split_file_alwaystrunk(path):
return dict(path=path)
def split_file_branches(path):
# turn "trunk/subdir/file.c" into (None, "subdir/file.c")
# and "trunk/subdir/" into (None, "subdir/")
# and "trunk/" into (None, "")
# and "branches/1.5.x/subdir/file.c" into ("branches/1.5.x", "subdir/file.c")
# and "branches/1.5.x/subdir/" into ("branches/1.5.x", "subdir/")
# and "branches/1.5.x/" into ("branches/1.5.x", "")
pieces = path.split('/')
if len(pieces) > 1 and pieces[0] == 'trunk':
return (None, '/'.join(pieces[1:]))
elif len(pieces) > 2 and pieces[0] == 'branches':
return ('/'.join(pieces[0:2]), '/'.join(pieces[2:]))
else:
return None
def split_file_projects_branches(path):
# turn projectname/trunk/subdir/file.c into dict(project=projectname,
# branch=trunk, path=subdir/file.c)
if "/" not in path:
return None
project, path = path.split("/", 1)
f = split_file_branches(path)
if f:
info = dict(project=project, path=f[1])
if f[0]:
info['branch'] = f[0]
return info
return f
class SVNPoller(base.PollingChangeSource, util.ComparableMixin):
"""
Poll a Subversion repository for changes and submit them to the change
master.
"""
compare_attrs = ("repourl", "split_file",
"svnuser", "svnpasswd", "project",
"pollInterval", "histmax",
"svnbin", "category", "cachepath", "pollAtLaunch")
parent = None # filled in when we're added
last_change = None
loop = None
def __init__(self, repourl, split_file=None,
svnuser=None, svnpasswd=None,
pollInterval=10 * 60, histmax=100,
svnbin='svn', revlinktmpl='', category=None,
project='', cachepath=None, pollinterval=-2,
extra_args=None, name=None, pollAtLaunch=False):
# for backward compatibility; the parameter used to be spelled with 'i'
if pollinterval != -2:
pollInterval = pollinterval
if name is None:
name = repourl
base.PollingChangeSource.__init__(self, name=name,
pollInterval=pollInterval,
pollAtLaunch=pollAtLaunch)
if repourl.endswith("/"):
repourl = repourl[:-1] # strip the trailing slash
self.repourl = repourl
self.extra_args = extra_args
self.split_file = split_file or split_file_alwaystrunk
self.svnuser = svnuser
self.svnpasswd = svnpasswd
self.revlinktmpl = revlinktmpl
# include environment variables required for ssh-agent auth
self.environ = os.environ.copy()
self.svnbin = svnbin
self.histmax = histmax
self._prefix = None
self.category = category if callable(
category) else util.ascii2unicode(category)
self.project = util.ascii2unicode(project)
self.cachepath = cachepath
if self.cachepath and os.path.exists(self.cachepath):
try:
with open(self.cachepath, "r") as f:
self.last_change = int(f.read().strip())
log.msg("SVNPoller: SVNPoller(%s) setting last_change to %s" % (
self.repourl, self.last_change))
# try writing it, too
with open(self.cachepath, "w") as f:
f.write(str(self.last_change))
except Exception:
self.cachepath = None
log.msg(("SVNPoller: SVNPoller(%s) cache file corrupt or unwriteable; " +
"skipping and not using") % self.repourl)
log.err()
def describe(self):
return "SVNPoller: watching %s" % self.repourl
def poll(self):
# Our return value is only used for unit testing.
# we need to figure out the repository root, so we can figure out
# repository-relative pathnames later. Each REPOURL is in the form
# (ROOT)/(PROJECT)/(BRANCH)/(FILEPATH), where (ROOT) is something
# like svn://svn.twistedmatrix.com/svn/Twisted (i.e. there is a
# physical repository at /svn/Twisted on that host), (PROJECT) is
# something like Projects/Twisted (i.e. within the repository's
# internal namespace, everything under Projects/Twisted/ has
# something to do with Twisted, but these directory names do not
# actually appear on the repository host), (BRANCH) is something like
# "trunk" or "branches/2.0.x", and (FILEPATH) is a tree-relative
# filename like "twisted/internet/defer.py".
# our self.repourl attribute contains (ROOT)/(PROJECT) combined
# together in a way that we can't separate without svn's help. If the
# user is not using the split_file= argument, then self.repourl might
# be (ROOT)/(PROJECT)/(BRANCH) . In any case, the filenames we will
# get back from 'svn log' will be of the form
# (PROJECT)/(BRANCH)/(FILEPATH), but we want to be able to remove
# that (PROJECT) prefix from them. To do this without requiring the
# user to tell us how repourl is split into ROOT and PROJECT, we do an
# 'svn info --xml' command at startup. This command will include a
# <root> element that tells us ROOT. We then strip this prefix from
# self.repourl to determine PROJECT, and then later we strip the
# PROJECT prefix from the filenames reported by 'svn log --xml' to
# get a (BRANCH)/(FILEPATH) that can be passed to split_file() to
# turn into separate BRANCH and FILEPATH values.
# whew.
if self.project:
log.msg("SVNPoller: polling " + self.project)
else:
log.msg("SVNPoller: polling")
d = defer.succeed(None)
if not self._prefix:
d.addCallback(lambda _: self.get_prefix())
@d.addCallback
def set_prefix(prefix):
self._prefix = prefix
d.addCallback(self.get_logs)
d.addCallback(self.parse_logs)
d.addCallback(self.get_new_logentries)
d.addCallback(self.create_changes)
d.addCallback(self.submit_changes)
d.addCallback(self.finished_ok)
# eat errors
d.addErrback(log.err, 'SVNPoller: Error in while polling')
return d
def getProcessOutput(self, args):
# this exists so we can override it during the unit tests
d = utils.getProcessOutput(self.svnbin, args, self.environ)
return d
def get_prefix(self):
args = ["info", "--xml", "--non-interactive", self.repourl]
if self.svnuser:
args.append("--username=%s" % self.svnuser)
if self.svnpasswd is not None:
args.append("--password=%s" % self.svnpasswd)
if self.extra_args:
args.extend(self.extra_args)
d = self.getProcessOutput(args)
@d.addCallback
def determine_prefix(output):
try:
doc = xml.dom.minidom.parseString(output)
except xml.parsers.expat.ExpatError:
log.msg("SVNPoller: SVNPoller.get_prefix: ExpatError in '%s'"
% output)
raise
rootnodes = doc.getElementsByTagName("root")
if not rootnodes:
# this happens if the URL we gave was already the root. In this
# case, our prefix is empty.
self._prefix = ""
return self._prefix
rootnode = rootnodes[0]
root = "".join([c.data for c in rootnode.childNodes])
# root will be a unicode string
if not self.repourl.startswith(root):
log.msg(format="Got root %(root)r from `svn info`, but it is "
"not a prefix of the configured repourl",
repourl=self.repourl, root=root)
raise RuntimeError("Configured repourl doesn't match svn root")
prefix = self.repourl[len(root):]
if prefix.startswith("/"):
prefix = prefix[1:]
log.msg("SVNPoller: repourl=%s, root=%s, so prefix=%s" %
(self.repourl, root, prefix))
return prefix
return d
def get_logs(self, _):
args = []
args.extend(["log", "--xml", "--verbose", "--non-interactive"])
if self.svnuser:
args.extend(["--username=%s" % self.svnuser])
if self.svnpasswd is not None:
args.extend(["--password=%s" % self.svnpasswd])
if self.extra_args:
args.extend(self.extra_args)
args.extend(["--limit=%d" % (self.histmax), self.repourl])
d = self.getProcessOutput(args)
return d
def parse_logs(self, output):
# parse the XML output, return a list of <logentry> nodes
try:
doc = xml.dom.minidom.parseString(output)
except xml.parsers.expat.ExpatError:
log.msg(
"SVNPoller: SVNPoller.parse_logs: ExpatError in '%s'" % output)
raise
logentries = doc.getElementsByTagName("logentry")
return logentries
def get_new_logentries(self, logentries):
last_change = old_last_change = self.last_change
# given a list of logentries, calculate new_last_change, and
# new_logentries, where new_logentries contains only the ones after
# last_change
new_last_change = None
new_logentries = []
if logentries:
new_last_change = int(logentries[0].getAttribute("revision"))
if last_change is None:
# if this is the first time we've been run, ignore any changes
# that occurred before now. This prevents a build at every
# startup.
log.msg('SVNPoller: starting at change %s' % new_last_change)
elif last_change == new_last_change:
# an unmodified repository will hit this case
log.msg('SVNPoller: no changes')
else:
for el in logentries:
if last_change == int(el.getAttribute("revision")):
break
new_logentries.append(el)
new_logentries.reverse() # return oldest first
self.last_change = new_last_change
log.msg('SVNPoller: _process_changes %s .. %s' %
(old_last_change, new_last_change))
return new_logentries
def _get_text(self, element, tag_name):
try:
child_nodes = element.getElementsByTagName(tag_name)[0].childNodes
text = "".join([t.data for t in child_nodes])
except IndexError:
text = "unknown"
return text
def _transform_path(self, path):
if not path.startswith(self._prefix):
log.msg(format="SVNPoller: ignoring path '%(path)s' which doesn't"
"start with prefix '%(prefix)s'",
path=path, prefix=self._prefix)
return
relative_path = path[len(self._prefix):]
if relative_path.startswith("/"):
relative_path = relative_path[1:]
where = self.split_file(relative_path)
# 'where' is either None, (branch, final_path) or a dict
if not where:
return
if isinstance(where, tuple):
where = dict(branch=where[0], path=where[1])
return where
def create_changes(self, new_logentries):
changes = []
for el in new_logentries:
revision = text_type(el.getAttribute("revision"))
revlink = u''
if self.revlinktmpl and revision:
revlink = self.revlinktmpl % urlquote_plus(revision)
revlink = text_type(revlink)
log.msg("Adding change revision %s" % (revision,))
author = self._get_text(el, "author")
comments = self._get_text(el, "msg")
# there is a "date" field, but it provides localtime in the
# repository's timezone, whereas we care about buildmaster's
# localtime (since this will get used to position the boxes on
# the Waterfall display, etc). So ignore the date field, and
# addChange will fill in with the current time
branches = {}
try:
pathlist = el.getElementsByTagName("paths")[0]
except IndexError: # weird, we got an empty revision
log.msg("ignoring commit with no paths")
continue
for p in pathlist.getElementsByTagName("path"):
kind = p.getAttribute("kind")
action = p.getAttribute("action")
path = "".join([t.data for t in p.childNodes])
# Convert the path from unicode to bytes
path = path.encode("ascii")
# Convert path from bytes to native string. Needed for Python 3.
path = bytes2NativeString(path, "ascii")
if path.startswith("/"):
path = path[1:]
if kind == "dir" and not path.endswith("/"):
path += "/"
where = self._transform_path(path)
# if 'where' is None, the file was outside any project that
# we care about and we should ignore it
if where:
branch = where.get("branch", None)
filename = where["path"]
if branch not in branches:
branches[branch] = {
'files': [], 'number_of_directories': 0}
if filename == "":
# root directory of branch
branches[branch]['files'].append(filename)
branches[branch]['number_of_directories'] += 1
elif filename.endswith("/"):
# subdirectory of branch
branches[branch]['files'].append(filename[:-1])
branches[branch]['number_of_directories'] += 1
else:
branches[branch]['files'].append(filename)
if "action" not in branches[branch]:
branches[branch]['action'] = action
for key in ("repository", "project", "codebase"):
if key in where:
branches[branch][key] = where[key]
for branch in branches:
action = branches[branch]['action']
files = branches[branch]['files']
number_of_directories_changed = branches[
branch]['number_of_directories']
number_of_files_changed = len(files)
if action == u'D' and number_of_directories_changed == 1 and number_of_files_changed == 1 and files[0] == '':
log.msg("Ignoring deletion of branch '%s'" % branch)
else:
chdict = dict(
author=author,
# weakly assume filenames are utf-8
files=[bytes2unicode(f, 'utf-8', 'replace') for f in files],
comments=comments,
revision=revision,
branch=util.ascii2unicode(branch),
revlink=revlink,
category=self.category,
repository=util.ascii2unicode(
branches[branch].get('repository', self.repourl)),
project=util.ascii2unicode(
branches[branch].get('project', self.project)),
codebase=util.ascii2unicode(
branches[branch].get('codebase', None)))
changes.append(chdict)
return changes
@defer.inlineCallbacks
def submit_changes(self, changes):
for chdict in changes:
yield self.master.data.updates.addChange(src=u'svn', **chdict)
def finished_ok(self, res):
if self.cachepath:
with open(self.cachepath, "w") as f:
f.write(str(self.last_change))
log.msg("SVNPoller: finished polling %s" % res)
return res
|
Lekensteyn/buildbot
|
master/buildbot/changes/svnpoller.py
|
Python
|
gpl-2.0
| 17,985
|
[
"Brian"
] |
5d2a50cb6111902bd0b67b9e00ec07473e4c8fbec255d6ac34d4a70ec339ded5
|
"""
==============================================
Window functions (:mod:`scipy.signal.windows`)
==============================================
The suite of window functions for filtering and spectral estimation.
.. autosummary::
:toctree: generated/
get_window -- Return a window of a given length and type.
barthann -- Bartlett-Hann window
bartlett -- Bartlett window
blackman -- Blackman window
blackmanharris -- Minimum 4-term Blackman-Harris window
bohman -- Bohman window
boxcar -- Boxcar window
chebwin -- Dolph-Chebyshev window
cosine -- Cosine window
dpss -- Discrete prolate spheroidal sequences
exponential -- Exponential window
flattop -- Flat top window
gaussian -- Gaussian window
general_cosine -- Generalized Cosine window
general_gaussian -- Generalized Gaussian window
general_hamming -- Generalized Hamming window
hamming -- Hamming window
hann -- Hann window
hanning -- Hann window
kaiser -- Kaiser window
nuttall -- Nuttall's minimum 4-term Blackman-Harris window
parzen -- Parzen window
slepian -- Slepian window
triang -- Triangular window
tukey -- Tukey window
"""
from .windows import *
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'general_cosine',
'general_hamming', 'chebwin', 'slepian', 'cosine', 'hann',
'exponential', 'tukey', 'get_window', 'dpss']
|
gfyoung/scipy
|
scipy/signal/windows/__init__.py
|
Python
|
bsd-3-clause
| 1,778
|
[
"Gaussian"
] |
e93260a49a2caf68486f5fded927ed54c4cd30e87132627ee0968f49d5b8c675
|
from __future__ import print_function, division
from sympy.core.basic import C
from sympy.core.singleton import S
from sympy.core.function import Function
from sympy.core import Add
from sympy.core.evalf import get_integer_part, PrecisionExhausted
###############################################################################
######################### FLOOR and CEILING FUNCTIONS #########################
###############################################################################
class RoundFunction(Function):
"""The base class for rounding functions."""
@classmethod
def eval(cls, arg):
if arg.is_integer:
return arg
if arg.is_imaginary or (S.ImaginaryUnit*arg).is_real:
i = C.im(arg)
if not i.has(S.ImaginaryUnit):
return cls(i)*S.ImaginaryUnit
return cls(arg, evaluate=False)
v = cls._eval_number(arg)
if v is not None:
return v
# Integral, numerical, symbolic part
ipart = npart = spart = S.Zero
# Extract integral (or complex integral) terms
terms = Add.make_args(arg)
for t in terms:
if t.is_integer or (t.is_imaginary and C.im(t).is_integer):
ipart += t
elif t.has(C.Symbol):
spart += t
else:
npart += t
if not (npart or spart):
return ipart
# Evaluate npart numerically if independent of spart
if npart and (
not spart or
npart.is_real and (spart.is_imaginary or (S.ImaginaryUnit*spart).is_real) or
npart.is_imaginary and spart.is_real):
try:
re, im = get_integer_part(
npart, cls._dir, {}, return_ints=True)
ipart += C.Integer(re) + C.Integer(im)*S.ImaginaryUnit
npart = S.Zero
except (PrecisionExhausted, NotImplementedError):
pass
spart += npart
if not spart:
return ipart
elif spart.is_imaginary or (S.ImaginaryUnit*spart).is_real:
return ipart + cls(C.im(spart), evaluate=False)*S.ImaginaryUnit
else:
return ipart + cls(spart, evaluate=False)
def _eval_is_finite(self):
return self.args[0].is_finite
def _eval_is_real(self):
return self.args[0].is_real
def _eval_is_integer(self):
return self.args[0].is_real
class floor(RoundFunction):
"""
Floor is a univariate function which returns the largest integer
value not greater than its argument. However this implementation
generalizes floor to complex numbers.
More information can be found in "Concrete mathematics" by Graham,
pp. 87 or visit http://mathworld.wolfram.com/FloorFunction.html.
>>> from sympy import floor, E, I, Float, Rational
>>> floor(17)
17
>>> floor(Rational(23, 10))
2
>>> floor(2*E)
5
>>> floor(-Float(0.567))
-1
>>> floor(-I/2)
-I
See Also
========
ceiling
"""
_dir = -1
@classmethod
def _eval_number(cls, arg):
if arg.is_Number:
if arg.is_Rational:
return C.Integer(arg.p // arg.q)
elif arg.is_Float:
return C.Integer(int(arg.floor()))
else:
return arg
if arg.is_NumberSymbol:
return arg.approximation_interval(C.Integer)[0]
def _eval_nseries(self, x, n, logx):
r = self.subs(x, 0)
args = self.args[0]
args0 = args.subs(x, 0)
if args0 == r:
direction = (args - args0).leadterm(x)[0]
if direction.is_positive:
return r
else:
return r - 1
else:
return r
class ceiling(RoundFunction):
"""
Ceiling is a univariate function which returns the smallest integer
value not less than its argument. Ceiling function is generalized
in this implementation to complex numbers.
More information can be found in "Concrete mathematics" by Graham,
pp. 87 or visit http://mathworld.wolfram.com/CeilingFunction.html.
>>> from sympy import ceiling, E, I, Float, Rational
>>> ceiling(17)
17
>>> ceiling(Rational(23, 10))
3
>>> ceiling(2*E)
6
>>> ceiling(-Float(0.567))
0
>>> ceiling(I/2)
I
See Also
========
floor
"""
_dir = 1
@classmethod
def _eval_number(cls, arg):
if arg.is_Number:
if arg.is_Rational:
return -C.Integer(-arg.p // arg.q)
elif arg.is_Float:
return C.Integer(int(arg.ceiling()))
else:
return arg
if arg.is_NumberSymbol:
return arg.approximation_interval(C.Integer)[1]
def _eval_nseries(self, x, n, logx):
r = self.subs(x, 0)
args = self.args[0]
args0 = args.subs(x, 0)
if args0 == r:
direction = (args - args0).leadterm(x)[0]
if direction.is_positive:
return r + 1
else:
return r
else:
return r
|
wolfram74/numerical_methods_iserles_notes
|
venv/lib/python2.7/site-packages/sympy/functions/elementary/integers.py
|
Python
|
mit
| 5,301
|
[
"VisIt"
] |
2416643732b3b373232987a3613f70ca21f60007689bba571fe1e85e90c9709b
|
#!/usr/bin/env python
'''
Based on:
Zenke, Friedemann, Everton J. Agnes, and Wulfram Gerstner.
"Diverse Synaptic Plasticity Mechanisms Orchestrated to Form and Retrieve Memories in Spiking Neural Networks."
Nature Communications 6 (April 21, 2015).
Part of Zenke's rule embedded in modified Brunel 2000 / Ostojic 2014 network
author: Aditya Gilra, Jun 2016.
in Brian2rc3 for CAMP 2016.
'''
#import modules and functions to be used
from brian2 import * # importing brian also does:
# 'from pylab import *' which imports:
# matplot like commands into the namespace, further
# also can use np. for numpy and mpl. for matplotlib
from data_utils import *
stand_alone = True
if stand_alone: set_device('cpp_standalone', build_on_run=False)
else:
#prefs.codegen.target = 'numpy'
#prefs.codegen.target = 'weave'
prefs.codegen.target = 'cython'
import random
import time
np.random.seed(100) # set seed for reproducibility of simulations
random.seed(100) # set seed for reproducibility of simulations
# ###########################################
# Simulation parameters
# ###########################################
simdt = 0.1*ms
simtime = 10*second
defaultclock.dt = simdt # set Brian's sim time step
dt = simdt/second # convert to value in seconds
# ###########################################
# Neuron model
# ###########################################
# equation: dv/dt=(1/taum)*(-(v-el))
# with spike when v>vt, reset to vr
vt = 20.*mV # Spiking threshold
taum = 20.*ms # Membrane time constant
vr = 10.*mV # Reset potential
muext0 = 24*mV # external input to each neuron
taur = 0.5*ms # Refractory period
taudelay = 0.75*ms # synaptic delay
eqs_neurons='''
muext : volt
dv/dt=-v/taum + muext/taum : volt
'''
# ###########################################
# Network parameters: numbers
# ###########################################
N = 4096+1024 # Total number of neurons
fexc = 0.8 # Fraction of exc neurons
NE = int(fexc*N) # Number of excitatory cells
NI = N-NE # Number of inhibitory cells
# ###########################################
# Network parameters: synapses
# ###########################################
rescale = 2 # rescale C and J to maintain total input
C = 1000/rescale # Number of incoming connections on each neuron (exc or inh)
J = 0.01*mV*rescale # exc strength is J (in mV as we add to voltage)
# Critical J is ~ 0.45 mV in paper for N = 10000, C = 1000
g = 5.0 # -gJ is the inh strength. For exc-inh balance g>~f(1-f)=4
# ###########################################
# Network parameters: synaptic plasticity
# ###########################################
wmax = 10. # hard bound on synaptic weight
Apre_tau = 20*ms # STDP Apre LTP time constant; tauplus
Apost_tau = 20*ms # STDP Apost LTD time constant; tauminus
Apre0 = 1.0 # incr in Apre, on pre-spikes; Aplus for LTP
# at spike coincidence, delta w = -Apre0*eta
Apost0 = 1.0 # incr in Apost on post-spikes; Aminus for LTD
eta = 5e-2 # learning rate
Apostslow0 = 1.0 # incr in Apostslow on post spike
Apostslow_tau = 100*ms
stdp_eqns = ''' wsyn : 1
dApre/dt=-Apre/Apre_tau : 1 (event-driven)
dApost/dt=-Apost/Apost_tau : 1 (event-driven)
dApostslow/dt=-Apostslow/Apostslow_tau : 1 (event-driven) '''
ratemid = 50*Hz
rate0 = 80*Hz
delta = 0.3
beta = Apostslow_tau/Apostslow0/(ratemid+rate0)
# heterosynaptic plasticity strength parameter
Apost0 = Apre0*Apre_tau/Apost_tau*(1+beta*Apostslow0**2*ratemid*rate0)
post_eqns = '''Apost += Apost0
wsyn += eta*Apre*(1 + Apostslow - beta*(Apostslow/Apostslow_tau)**2)
Apostslow+=Apostslow0
wsyn=clip(wsyn,0,inf)'''
pre_eqns = '''Apre += Apre0
wsyn += -Apost*eta + eta*delta
wsyn=clip(wsyn,0,inf)
v+=wsyn*J'''
def dwbydt(r):
return eta*(Apre0*Apre_tau/second - Apost0*Apost_tau/second)*r**2 + \
eta*Apre0*Apre_tau/second * Apostslow0*Apostslow_tau/second*r**3 -\
eta*Apre0*Apre_tau/second * beta*Hz**2 * Apostslow0**2 * r**4 + \
eta*delta*r
figure()
rrange = arange(0,90,0.1)
plot(rrange,dwbydt(rrange))
# ###########################################
# Initialize neuron (sub)groups
# ###########################################
P=NeuronGroup(N,model=eqs_neurons,\
threshold='v>=vt',reset='v=vr',refractory=taur,method='euler')
P.v = uniform(0.,vt/mV,N)*mV
PE = P[:NE]
PI = P[NE:]
# ###########################################
# Connecting the network
# ###########################################
sparseness = C/float(N)
# E to E connections
#conEE = Synapses(PE,PE,'wsyn:1',on_pre='v_post+=wsyn*J',method='euler')
conEE = Synapses(PE,PE,stdp_eqns,\
on_pre=pre_eqns,on_post=post_eqns,\
method='euler')
#conEE.connect(condition='i!=j',p=sparseness)
# need exact connection indices for weight monitor in standalone mode
conEE_idxs_pre = []
conEE_idxs_post = []
Ce = int(fexc*C)
for k in range(NE):
conEE_idxs_pre.extend(Ce*[k])
idxs = range(NE)
idxs.remove(k) # no autapses i.e. no self-connections
l = np.random.permutation(idxs)[:Ce]
conEE_idxs_post.extend(l)
conEE_idxs_assembly = where(array(conEE_idxs_post)[:Ce*400]<400)[0]
conEE_idxs_cross = where(array(conEE_idxs_post)[:Ce*400]>400)[0]
conEE_idxs_bgnd = where(array(conEE_idxs_post)[Ce*400:]>400)[0]
conEE.connect(i=conEE_idxs_pre,j=conEE_idxs_post)
conEE.delay = taudelay
conEE.wsyn = 1.
# E to I connections
conIE = Synapses(PE,PI,'wsyn:1',on_pre='v_post+=wsyn*J',method='euler')
conIE.connect(p=sparseness)
conIE.delay = taudelay
conIE.wsyn = 1
# I to E connections
conEI = Synapses(PI,PE,'wsyn:1',on_pre='v_post+=wsyn*J',method='euler')
conEI.connect(p=sparseness)
conEI.delay = taudelay
conEI.wsyn = -g
# I to I connections
conII = Synapses(PI,PI,'wsyn:1',on_pre='v_post+=wsyn*J',method='euler')
conII.connect(condition='i!=j',p=sparseness)
conII.delay = taudelay
conII.wsyn = -g
# ###########################################
# Stimuli
# ###########################################
P.muext = muext0
## 400 neurons (~10%) receive stimulus current to increase firing
#Pstim = P[:400]
#Pstim.muext = muext0 + 7*mV
# ###########################################
# Setting up monitors
# ###########################################
Nmon = N
sm = SpikeMonitor(P)
# Population monitor
popm = PopulationRateMonitor(P)
# voltage monitor
sm_vm = StateMonitor(P,'v',record=range(10)+range(NE,NE+10))
# weights monitor
wm = StateMonitor(conEE,'wsyn', record=range(Ce*NE), dt=simtime/20.)
# ###########################################
# Simulate
# ###########################################
print "Setup complete, running for",simtime,"at dt =",dt,"s."
t1 = time.time()
run(simtime,report='text')
device.build(directory='output', compile=True, run=True, debug=False)
print 'inittime + runtime, t = ', time.time() - t1
#print "For g,J =",g,J,"mean exc rate =",\
# sm_e.num_spikes/float(NE)/(simtime/second),'Hz.'
#print "For g,J =",g,J,"mean inh rate =",\
# sm_i.num_spikes/float(NI)/(simtime/second),'Hz.'
# ###########################################
# Make plots
# ###########################################
# always convert spikemon.t and spikemon.i to array-s before indexing
# spikemon.i[] indexing is extremely slow!
spiket = array(sm.t/second) # take spiketimes of all neurons
spikei = array(sm.i)
fig = figure()
# raster plot
subplot(231)
plot(sm.t,sm.i,',')
title(str(N)+" exc & inh neurons")
xlim([simtime/second-1,simtime/second])
xlabel("")
print "plotting firing rates"
subplot(232)
tau=50e-3
sigma = tau/2.
# firing rates
timeseries = arange(0,simtime/second+dt,dt)
rate = np.zeros(int(simtime/simdt))
for nrni in range(400):
rate += rate_from_spiketrain(spiket,spikei,simtime/second,sigma,dt,nrni)
plot(timeseries[:len(rate)],rate/400.,'r')
rate = np.zeros(int(simtime/simdt))
for nrni in range(400,800):
rate += rate_from_spiketrain(spiket,spikei,simtime/second,sigma,dt,nrni)
plot(timeseries[:len(rate)],rate/400.,'b')
title("exc rates: assembly (r), bgnd (b)")
ylabel("Hz")
ylim(0,300)
subplot(233)
hist(wm.wsyn[:,-1],bins=500,edgecolor='none')
xlabel('weight')
ylabel('count')
subplot(235)
num_to_plot = 10
for nrni in range(NE,NE+num_to_plot):
rate = rate_from_spiketrain(spiket,spikei,simtime/second,sigma,dt,nrni)
plot(timeseries[:len(rate)],rate)
#print mean(rate),len(sm_i[nrni])
#rates.append(rate)
title(str(num_to_plot)+" inh rates")
ylim(0,300)
#print "Mean rate = ",mean(rates)
xlabel("time (s)")
ylabel("Hz")
print "plotting weights"
subplot(236)
plot(wm.t/second,mean(wm.wsyn[conEE_idxs_assembly,:],axis=0),color='r')
plot(wm.t/second,mean(wm.wsyn[conEE_idxs_cross,:],axis=0),color='m')
plot(wm.t/second,mean(wm.wsyn[conEE_idxs_bgnd,:],axis=0),color='b')
title("assembly weights (cross=m)")
ylabel("arb")
xlabel("time (s)")
print conEE.wsyn
fig.tight_layout()
show()
|
h-mayorquin/camp_india_2016
|
tutorials/LTPinnetworks2/Step3e_Zenke_etal_2014.py
|
Python
|
mit
| 9,216
|
[
"Brian",
"NEURON"
] |
135d6309e862134f36ca1efae097647f6d08e12cf0a73cd7abba67239f296348
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from metaphone.metaphone import doublemetaphone
class MetaphoneTestCase(unittest.TestCase):
"""
"""
def test_single_result(self):
result = doublemetaphone(u"aubrey")
self.assertEquals(result, ('APR', ''))
def test_double_result(self):
result = doublemetaphone(u"richard")
self.assertEquals(result, ('RXRT', 'RKRT'))
def test_general_word_list(self):
result = doublemetaphone('Jose')
self.assertEquals(result, ('HS', ''))
result = doublemetaphone('cambrillo')
self.assertEquals(result, ('KMPRL', 'KMPR'))
result = doublemetaphone('otto')
self.assertEquals(result, ('AT', ''))
result = doublemetaphone('aubrey')
self.assertEquals(result, ('APR', ''))
result = doublemetaphone('maurice')
self.assertEquals(result, ('MRS', ''))
result = doublemetaphone('auto')
self.assertEquals(result, ('AT', ''))
result = doublemetaphone('maisey')
self.assertEquals(result, ('MS', ''))
result = doublemetaphone('catherine')
self.assertEquals(result, ('K0RN', 'KTRN'))
result = doublemetaphone('geoff')
self.assertEquals(result, ('JF', 'KF'))
result = doublemetaphone('Chile')
self.assertEquals(result, ('XL', ''))
result = doublemetaphone('katherine')
self.assertEquals(result, ('K0RN', 'KTRN'))
result = doublemetaphone('steven')
self.assertEquals(result, ('STFN', ''))
result = doublemetaphone('zhang')
self.assertEquals(result, ('JNK', ''))
result = doublemetaphone('bob')
self.assertEquals(result, ('PP', ''))
result = doublemetaphone('ray')
self.assertEquals(result, ('R', ''))
result = doublemetaphone('Tux')
self.assertEquals(result, ('TKS', ''))
result = doublemetaphone('bryan')
self.assertEquals(result, ('PRN', ''))
result = doublemetaphone('bryce')
self.assertEquals(result, ('PRS', ''))
result = doublemetaphone('Rapelje')
self.assertEquals(result, ('RPL', ''))
result = doublemetaphone('richard')
self.assertEquals(result, ('RXRT', 'RKRT'))
result = doublemetaphone('solilijs')
self.assertEquals(result, ('SLLS', ''))
result = doublemetaphone('Dallas')
self.assertEquals(result, ('TLS', ''))
result = doublemetaphone('Schwein')
self.assertEquals(result, ('XN', 'XFN'))
result = doublemetaphone('dave')
self.assertEquals(result, ('TF', ''))
result = doublemetaphone('eric')
self.assertEquals(result, ('ARK', ''))
result = doublemetaphone('Parachute')
self.assertEquals(result, ('PRKT', ''))
result = doublemetaphone('brian')
self.assertEquals(result, ('PRN', ''))
result = doublemetaphone('randy')
self.assertEquals(result, ('RNT', ''))
result = doublemetaphone('Through')
self.assertEquals(result, ('0R', 'TR'))
result = doublemetaphone('Nowhere')
self.assertEquals(result, ('NR', ''))
result = doublemetaphone('heidi')
self.assertEquals(result, ('HT', ''))
result = doublemetaphone('Arnow')
self.assertEquals(result, ('ARN', 'ARNF'))
result = doublemetaphone('Thumbail')
self.assertEquals(result, ('0MPL', 'TMPL'))
def test_homophones(self):
self.assertEqual(
doublemetaphone(u"tolled"),
doublemetaphone(u"told"))
self.assertEqual(
doublemetaphone(u"katherine"),
doublemetaphone(u"catherine"))
self.assertEqual(
doublemetaphone(u"brian"),
doublemetaphone(u"bryan"))
def test_similar_names(self):
result = doublemetaphone("Bartoš")
self.assertEquals(result, ('PRTS', ''))
result = doublemetaphone(u"Bartosz")
self.assertEquals(result, ('PRTS', 'PRTX'))
result = doublemetaphone(u"Bartosch")
self.assertEquals(result, ('PRTX', ''))
result = doublemetaphone(u"Bartos")
self.assertEquals(result, ('PRTS', ''))
result = set(doublemetaphone(u"Jablonski")).intersection(
doublemetaphone(u"Yablonsky"))
self.assertEquals(list(result), ['APLNSK'])
result = set(doublemetaphone(u"Smith")).intersection(
doublemetaphone(u"Schmidt"))
self.assertEquals(list(result), ['XMT'])
def test_non_english_unicode(self):
result = doublemetaphone("andestādītu")
self.assertEquals(result, ('ANTSTTT', ''))
def test_c_cedilla(self):
result = doublemetaphone("français")
self.assertEquals(result, ('FRNS', 'FRNSS'))
result = doublemetaphone("garçon")
self.assertEquals(result, ('KRSN', ''))
result = doublemetaphone("leçon")
self.assertEquals(result, ('LSN', ''))
def test_various_german(self):
result = doublemetaphone("ach")
self.assertEquals(result, ("AK", ""))
result = doublemetaphone("bacher")
self.assertEquals(result, ("PKR", ""))
result = doublemetaphone("macher")
self.assertEquals(result, ("MKR", ""))
def test_various_italian(self):
result = doublemetaphone("bacci")
self.assertEquals(result, ("PX", ""))
result = doublemetaphone("bertucci")
self.assertEquals(result, ("PRTX", ""))
result = doublemetaphone("bellocchio")
self.assertEquals(result, ("PLX", ""))
result = doublemetaphone("bacchus")
self.assertEquals(result, ("PKS", ""))
result = doublemetaphone("focaccia")
self.assertEquals(result, ("FKX", ""))
result = doublemetaphone("chianti")
self.assertEquals(result, ("KNT", ""))
result = doublemetaphone("tagliaro")
self.assertEquals(result, ("TKLR", "TLR"))
result = doublemetaphone("biaggi")
self.assertEquals(result, ("PJ", "PK"))
def test_various_spanish(self):
result = doublemetaphone("bajador")
self.assertEquals(result, ("PJTR", "PHTR"))
result = doublemetaphone("cabrillo")
self.assertEquals(result, ("KPRL", "KPR"))
result = doublemetaphone("gallegos")
self.assertEquals(result, ("KLKS", "KKS"))
result = doublemetaphone("San Jacinto")
self.assertEquals(result, ("SNHSNT", ""))
def test_various_french(self):
result = doublemetaphone("rogier")
self.assertEquals(result, ("RJ", "RJR"))
result = doublemetaphone("breaux")
self.assertEquals(result, ("PR", ""))
def test_various_slavic(self):
result = doublemetaphone("Wewski")
self.assertEquals(result, ("ASK", "FFSK"))
def test_various_chinese(self):
result = doublemetaphone("zhao")
self.assertEquals(result, ("J", ""))
def test_dutch_origin(self):
result = doublemetaphone("school")
self.assertEquals(result, ("SKL", ""))
result = doublemetaphone("schooner")
self.assertEquals(result, ("SKNR", ""))
result = doublemetaphone("schermerhorn")
self.assertEquals(result, ("XRMRRN", "SKRMRRN"))
result = doublemetaphone("schenker")
self.assertEquals(result, ("XNKR", "SKNKR"))
def test_ch_words(self):
result = doublemetaphone("Charac")
self.assertEquals(result, ("KRK", ""))
result = doublemetaphone("Charis")
self.assertEquals(result, ("KRS", ""))
result = doublemetaphone("chord")
self.assertEquals(result, ("KRT", ""))
result = doublemetaphone("Chym")
self.assertEquals(result, ("KM", ""))
result = doublemetaphone("Chia")
self.assertEquals(result, ("K", ""))
result = doublemetaphone("chem")
self.assertEquals(result, ("KM", ""))
result = doublemetaphone("chore")
self.assertEquals(result, ("XR", ""))
result = doublemetaphone("orchestra")
self.assertEquals(result, ("ARKSTR", ""))
result = doublemetaphone("architect")
self.assertEquals(result, ("ARKTKT", ""))
result = doublemetaphone("orchid")
self.assertEquals(result, ("ARKT", ""))
def test_cc_words(self):
result = doublemetaphone("accident")
self.assertEquals(result, ("AKSTNT", ""))
result = doublemetaphone("accede")
self.assertEquals(result, ("AKST", ""))
result = doublemetaphone("succeed")
self.assertEquals(result, ("SKST", ""))
def test_mc_words(self):
result = doublemetaphone("mac caffrey")
self.assertEquals(result, ("MKFR", ""))
result = doublemetaphone("mac gregor")
self.assertEquals(result, ("MKRKR", ""))
result = doublemetaphone("mc crae")
self.assertEquals(result, ("MKR", ""))
result = doublemetaphone("mcclain")
self.assertEquals(result, ("MKLN", ""))
def test_gh_words(self):
result = doublemetaphone("laugh")
self.assertEquals(result, ("LF", ""))
result = doublemetaphone("cough")
self.assertEquals(result, ("KF", ""))
result = doublemetaphone("rough")
self.assertEquals(result, ("RF", ""))
def test_g3_words(self):
result = doublemetaphone("gya")
self.assertEquals(result, ("K", "J"))
result = doublemetaphone("ges")
self.assertEquals(result, ("KS", "JS"))
result = doublemetaphone("gep")
self.assertEquals(result, ("KP", "JP"))
result = doublemetaphone("geb")
self.assertEquals(result, ("KP", "JP"))
result = doublemetaphone("gel")
self.assertEquals(result, ("KL", "JL"))
result = doublemetaphone("gey")
self.assertEquals(result, ("K", "J"))
result = doublemetaphone("gib")
self.assertEquals(result, ("KP", "JP"))
result = doublemetaphone("gil")
self.assertEquals(result, ("KL", "JL"))
result = doublemetaphone("gin")
self.assertEquals(result, ("KN", "JN"))
result = doublemetaphone("gie")
self.assertEquals(result, ("K", "J"))
result = doublemetaphone("gei")
self.assertEquals(result, ("K", "J"))
result = doublemetaphone("ger")
self.assertEquals(result, ("KR", "JR"))
result = doublemetaphone("danger")
self.assertEquals(result, ("TNJR", "TNKR"))
result = doublemetaphone("manager")
self.assertEquals(result, ("MNKR", "MNJR"))
result = doublemetaphone("dowager")
self.assertEquals(result, ("TKR", "TJR"))
def test_pb_words(self):
result = doublemetaphone("Campbell")
self.assertEquals(result, ("KMPL", ""))
result = doublemetaphone("raspberry")
self.assertEquals(result, ("RSPR", ""))
def test_th_words(self):
result = doublemetaphone("Thomas")
self.assertEquals(result, ("TMS", ""))
result = doublemetaphone("Thames")
self.assertEquals(result, ("TMS", ""))
|
oubiwann/metaphone
|
metaphone/tests/test_metaphone.py
|
Python
|
bsd-3-clause
| 11,123
|
[
"Brian"
] |
edcbe1685121d2c654641b8f4dcb43d9a83a953b82830557a56f3fddd1727e17
|
#
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Testmodule for the H5MD interface.
"""
import os
import sys
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd # pylint: disable=import-error
import h5py # h5py has to be imported *after* espressomd (MPI)
from espressomd.interactions import Virtual
npart = 26
class CommonTests(ut.TestCase):
"""
Class that holds common test methods.
"""
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
# avoid particles to be set outside of the main box, otherwise particle
# positions are folded in the core when writing out and we cannot directly
# compare positions in the dataset and where particles were set. One would
# need to unfold the positions of the hdf5 file.
box_l = npart / 2.0
system.box_l = [box_l, box_l, box_l]
system.cell_system.skin = 0.4
system.time_step = 0.01
for i in range(npart):
system.part.add(id=i, pos=np.array(3 * [i], dtype=float),
v=np.array([1.0, 2.0, 3.0]), type=23)
if espressomd.has_features(['MASS']):
system.part[i].mass = 2.3
if espressomd.has_features(['EXTERNAL_FORCES']):
system.part[i].ext_force = [0.1, 0.2, 0.3]
vb = Virtual()
system.bonded_inter.add(vb)
for i in range(npart - 1):
system.part[i].add_bond((vb, i + 1))
system.integrator.run(steps=0)
@classmethod
def setUpClass(cls):
if os.path.isfile('test.h5'):
os.remove('test.h5')
cls.py_file = cls.py_pos = cls.py_vel = cls.py_f = cls.py_id = cls.py_img = None
def test_metadata(self):
"""Test if the H5MD metadata has been written properly."""
self.assertEqual(self.py_file['h5md'].attrs['version'][0], 1)
self.assertEqual(self.py_file['h5md'].attrs['version'][1], 1)
self.assertIn('creator', self.py_file['h5md'])
self.assertIn('name', self.py_file['h5md/creator'].attrs)
self.assertIn('version', self.py_file['h5md/creator'].attrs)
self.assertEqual(
self.py_file['h5md/creator'].attrs['name'][:], b'ESPResSo')
self.assertIn('author', self.py_file['h5md'])
self.assertIn('name', self.py_file['h5md/author'].attrs)
def test_pos(self):
"""Test if positions have been written properly."""
self.assertTrue(np.allclose(
np.array([3 * [float(i) % self.box_l] for i in range(npart)]),
np.array([x for (_, x) in sorted(zip(self.py_id, self.py_pos))])))
def test_img(self):
"""Test if images have been written properly."""
images = np.append(np.zeros((int(npart / 2), 3)),
np.ones((int(npart / 2), 3)))
images = images.reshape(npart, 3)
self.assertTrue((np.allclose(np.array(
[x for (_, x) in sorted(zip(self.py_id, self.py_img))]), images)))
def test_vel(self):
"""Test if velocities have been written properly."""
self.assertTrue(np.allclose(
np.array([[1.0, 2.0, 3.0] for _ in range(npart)]),
np.array([x for (_, x) in sorted(zip(self.py_id, self.py_vel))])),
msg="Velocities not written correctly by H5md!")
@utx.skipIfMissingFeatures(['EXTERNAL_FORCES'])
def test_f(self):
"""Test if forces have been written properly."""
self.assertTrue(np.allclose(
np.array([[0.1, 0.2, 0.3] for _ in range(npart)]),
np.array([x for (_, x) in sorted(zip(self.py_id, self.py_f))])),
msg="Forces not written correctly by H5md!")
def test_bonds(self):
"""Test if bonds have been written properly."""
self.assertEqual(len(self.py_bonds), npart - 1)
for i in range(npart - 1):
bond = [x for x in self.py_bonds if x[0] == i][0]
self.assertEqual(bond[0], i + 0)
self.assertEqual(bond[1], i + 1)
@utx.skipIfMissingFeatures(['H5MD'])
class H5mdTestOrdered(CommonTests):
"""
Test the core implementation of writing hdf5 files if written ordered.
"""
@classmethod
def setUpClass(cls):
write_ordered = True
from espressomd.io.writer import h5md # pylint: disable=import-error
h5 = h5md.H5md(
filename="test.h5",
write_pos=True,
write_vel=True,
write_force=True,
write_species=True,
write_mass=True,
write_ordered=write_ordered)
h5.write()
h5.flush()
h5.close()
cls.py_file = h5py.File("test.h5", 'r')
cls.py_pos = cls.py_file['particles/atoms/position/value'][0]
cls.py_img = cls.py_file['particles/atoms/image/value'][0]
cls.py_vel = cls.py_file['particles/atoms/velocity/value'][0]
cls.py_f = cls.py_file['particles/atoms/force/value'][0]
cls.py_id = cls.py_file['particles/atoms/id/value'][0]
cls.py_bonds = cls.py_file['connectivity/atoms']
@classmethod
def tearDownClass(cls):
os.remove("test.h5")
def test_ids(self):
"""Test if ids have been written properly."""
self.assertTrue(np.allclose(np.array(range(npart)), self.py_id),
msg="ids incorrectly ordered and written by H5md!")
@utx.skipIfMissingFeatures(['H5MD'])
class H5mdTestUnordered(CommonTests):
"""
Test the core implementation of writing hdf5 files if written un-ordered.
"""
@classmethod
def setUpClass(cls):
write_ordered = False
from espressomd.io.writer import h5md # pylint: disable=import-error
h5 = h5md.H5md(
filename="test.h5",
write_pos=True,
write_vel=True,
write_force=True,
write_species=True,
write_mass=True,
write_ordered=write_ordered)
h5.write()
h5.flush()
h5.close()
cls.py_file = h5py.File("test.h5", 'r')
cls.py_pos = cls.py_file['particles/atoms/position/value'][0]
cls.py_img = cls.py_file['particles/atoms/image/value'][0]
cls.py_vel = cls.py_file['particles/atoms/velocity/value'][0]
cls.py_f = cls.py_file['particles/atoms/force/value'][0]
cls.py_id = cls.py_file['particles/atoms/id/value'][0]
cls.py_bonds = cls.py_file['connectivity/atoms']
@classmethod
def tearDownClass(cls):
os.remove("test.h5")
if __name__ == "__main__":
suite = ut.TestSuite()
suite.addTests(ut.TestLoader().loadTestsFromTestCase(H5mdTestUnordered))
suite.addTests(ut.TestLoader().loadTestsFromTestCase(H5mdTestOrdered))
result = ut.TextTestRunner(verbosity=4).run(suite)
sys.exit(not result.wasSuccessful())
|
mkuron/espresso
|
testsuite/python/h5md.py
|
Python
|
gpl-3.0
| 7,413
|
[
"ESPResSo"
] |
394d44dc4b899c2262ecd36fcc6a32ab3744d7b8d84ef446ca0179152d7587de
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import cookielib
import glob
import inspect
import logging
import httplib
import os
import random
import re
import socket
import string
import sys
import tempfile
import threading
import time
import urllib2
import urlparse
import lib.controller.checks
import lib.core.common
import lib.core.threads
import lib.core.convert
import lib.request.connect
import lib.utils.search
from lib.controller.checks import checkConnection
from lib.core.common import Backend
from lib.core.common import boldifyMessage
from lib.core.common import checkFile
from lib.core.common import dataToStdout
from lib.core.common import getPublicTypeMembers
from lib.core.common import getSafeExString
from lib.core.common import extractRegexResult
from lib.core.common import filterStringValue
from lib.core.common import findPageForms
from lib.core.common import getConsoleWidth
from lib.core.common import getFileItems
from lib.core.common import getFileType
from lib.core.common import getUnicode
from lib.core.common import isListLike
from lib.core.common import normalizePath
from lib.core.common import ntToPosixSlashes
from lib.core.common import openFile
from lib.core.common import parseTargetDirect
from lib.core.common import parseTargetUrl
from lib.core.common import paths
from lib.core.common import randomStr
from lib.core.common import readCachedFileContent
from lib.core.common import readInput
from lib.core.common import resetCookieJar
from lib.core.common import runningAsAdmin
from lib.core.common import safeExpandUser
from lib.core.common import setOptimize
from lib.core.common import setPaths
from lib.core.common import singleTimeWarnMessage
from lib.core.common import UnicodeRawConfigParser
from lib.core.common import urldecode
from lib.core.convert import base64unpickle
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import mergedOptions
from lib.core.data import queries
from lib.core.datatype import AttribDict
from lib.core.datatype import InjectionDict
from lib.core.defaults import defaults
from lib.core.dicts import DBMS_DICT
from lib.core.dicts import DUMP_REPLACEMENTS
from lib.core.enums import ADJUST_TIME_DELAY
from lib.core.enums import AUTH_TYPE
from lib.core.enums import CUSTOM_LOGGING
from lib.core.enums import DUMP_FORMAT
from lib.core.enums import HTTP_HEADER
from lib.core.enums import HTTPMETHOD
from lib.core.enums import MOBILES
from lib.core.enums import OPTION_TYPE
from lib.core.enums import PAYLOAD
from lib.core.enums import PRIORITY
from lib.core.enums import PROXY_TYPE
from lib.core.enums import REFLECTIVE_COUNTER
from lib.core.enums import WIZARD
from lib.core.exception import SqlmapConnectionException
from lib.core.exception import SqlmapFilePathException
from lib.core.exception import SqlmapGenericException
from lib.core.exception import SqlmapInstallationException
from lib.core.exception import SqlmapMissingDependence
from lib.core.exception import SqlmapMissingMandatoryOptionException
from lib.core.exception import SqlmapMissingPrivileges
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapSilentQuitException
from lib.core.exception import SqlmapSyntaxException
from lib.core.exception import SqlmapSystemException
from lib.core.exception import SqlmapUnsupportedDBMSException
from lib.core.exception import SqlmapUserQuitException
from lib.core.log import FORMATTER
from lib.core.optiondict import optDict
from lib.core.settings import BURP_REQUEST_REGEX
from lib.core.settings import BURP_XML_HISTORY_REGEX
from lib.core.settings import CODECS_LIST_PAGE
from lib.core.settings import CRAWL_EXCLUDE_EXTENSIONS
from lib.core.settings import CUSTOM_INJECTION_MARK_CHAR
from lib.core.settings import DBMS_ALIASES
from lib.core.settings import DEFAULT_PAGE_ENCODING
from lib.core.settings import DEFAULT_TOR_HTTP_PORTS
from lib.core.settings import DEFAULT_TOR_SOCKS_PORT
from lib.core.settings import DUMMY_URL
from lib.core.settings import IGNORE_SAVE_OPTIONS
from lib.core.settings import INJECT_HERE_MARK
from lib.core.settings import IS_WIN
from lib.core.settings import KB_CHARS_BOUNDARY_CHAR
from lib.core.settings import KB_CHARS_LOW_FREQUENCY_ALPHABET
from lib.core.settings import LOCALHOST
from lib.core.settings import MAX_CONNECT_RETRIES
from lib.core.settings import MAX_NUMBER_OF_THREADS
from lib.core.settings import NULL
from lib.core.settings import PARAMETER_SPLITTING_REGEX
from lib.core.settings import PRECONNECT_CANDIDATE_TIMEOUT
from lib.core.settings import PROBLEMATIC_CUSTOM_INJECTION_PATTERNS
from lib.core.settings import SITE
from lib.core.settings import SOCKET_PRE_CONNECT_QUEUE_SIZE
from lib.core.settings import SQLMAP_ENVIRONMENT_PREFIX
from lib.core.settings import SUPPORTED_DBMS
from lib.core.settings import SUPPORTED_OS
from lib.core.settings import TIME_DELAY_CANDIDATES
from lib.core.settings import UNICODE_ENCODING
from lib.core.settings import UNION_CHAR_REGEX
from lib.core.settings import UNKNOWN_DBMS_VERSION
from lib.core.settings import URI_INJECTABLE_REGEX
from lib.core.settings import VERSION_STRING
from lib.core.settings import WEBSCARAB_SPLITTER
from lib.core.threads import getCurrentThreadData
from lib.core.threads import setDaemon
from lib.core.update import update
from lib.parse.configfile import configFileParser
from lib.parse.payloads import loadBoundaries
from lib.parse.payloads import loadPayloads
from lib.parse.sitemap import parseSitemap
from lib.request.basic import checkCharEncoding
from lib.request.connect import Connect as Request
from lib.request.dns import DNSServer
from lib.request.basicauthhandler import SmartHTTPBasicAuthHandler
from lib.request.httpshandler import HTTPSHandler
from lib.request.pkihandler import HTTPSPKIAuthHandler
from lib.request.rangehandler import HTTPRangeHandler
from lib.request.redirecthandler import SmartRedirectHandler
from lib.request.templates import getPageTemplate
from lib.utils.crawler import crawl
from lib.utils.deps import checkDependencies
from lib.utils.search import search
from lib.utils.purge import purge
from thirdparty.keepalive import keepalive
from thirdparty.oset.pyoset import oset
from thirdparty.socks import socks
from xml.etree.ElementTree import ElementTree
authHandler = urllib2.BaseHandler()
httpsHandler = HTTPSHandler()
keepAliveHandler = keepalive.HTTPHandler()
proxyHandler = urllib2.ProxyHandler()
redirectHandler = SmartRedirectHandler()
rangeHandler = HTTPRangeHandler()
def _feedTargetsDict(reqFile, addedTargetUrls):
"""
Parses web scarab and burp logs and adds results to the target URL list
"""
def _parseWebScarabLog(content):
"""
Parses web scarab logs (POST method not supported)
"""
reqResList = content.split(WEBSCARAB_SPLITTER)
for request in reqResList:
url = extractRegexResult(r"URL: (?P<result>.+?)\n", request, re.I)
method = extractRegexResult(r"METHOD: (?P<result>.+?)\n", request, re.I)
cookie = extractRegexResult(r"COOKIE: (?P<result>.+?)\n", request, re.I)
if not method or not url:
logger.debug("not a valid WebScarab log data")
continue
if method.upper() == HTTPMETHOD.POST:
warnMsg = "POST requests from WebScarab logs aren't supported "
warnMsg += "as their body content is stored in separate files. "
warnMsg += "Nevertheless you can use -r to load them individually."
logger.warning(warnMsg)
continue
if not(conf.scope and not re.search(conf.scope, url, re.I)):
if not kb.targets or url not in addedTargetUrls:
kb.targets.add((url, method, None, cookie, None))
addedTargetUrls.add(url)
def _parseBurpLog(content):
"""
Parses burp logs
"""
if not re.search(BURP_REQUEST_REGEX, content, re.I | re.S):
if re.search(BURP_XML_HISTORY_REGEX, content, re.I | re.S):
reqResList = []
for match in re.finditer(BURP_XML_HISTORY_REGEX, content, re.I | re.S):
port, request = match.groups()
request = request.decode("base64")
_ = re.search(r"%s:.+" % re.escape(HTTP_HEADER.HOST), request)
if _:
host = _.group(0).strip()
if not re.search(r":\d+\Z", host):
request = request.replace(host, "%s:%d" % (host, int(port)))
reqResList.append(request)
else:
reqResList = [content]
else:
reqResList = re.finditer(BURP_REQUEST_REGEX, content, re.I | re.S)
for match in reqResList:
request = match if isinstance(match, basestring) else match.group(0)
request = re.sub(r"\A[^\w]+", "", request)
schemePort = re.search(r"(http[\w]*)\:\/\/.*?\:([\d]+).+?={10,}", request, re.I | re.S)
if schemePort:
scheme = schemePort.group(1)
port = schemePort.group(2)
else:
scheme, port = None, None
if not re.search(r"^[\n]*(%s).*?\sHTTP\/" % "|".join(getPublicTypeMembers(HTTPMETHOD, True)), request, re.I | re.M):
continue
if re.search(r"^[\n]*%s.*?\.(%s)\sHTTP\/" % (HTTPMETHOD.GET, "|".join(CRAWL_EXCLUDE_EXTENSIONS)), request, re.I | re.M):
continue
getPostReq = False
url = None
host = None
method = None
data = None
cookie = None
params = False
newline = None
lines = request.split('\n')
headers = []
for index in xrange(len(lines)):
line = lines[index]
if not line.strip() and index == len(lines) - 1:
break
newline = "\r\n" if line.endswith('\r') else '\n'
line = line.strip('\r')
match = re.search(r"\A(%s) (.+) HTTP/[\d.]+\Z" % "|".join(getPublicTypeMembers(HTTPMETHOD, True)), line) if not method else None
if len(line.strip()) == 0 and method and method != HTTPMETHOD.GET and data is None:
data = ""
params = True
elif match:
method = match.group(1)
url = match.group(2)
if any(_ in line for _ in ('?', '=', CUSTOM_INJECTION_MARK_CHAR)):
params = True
getPostReq = True
# POST parameters
elif data is not None and params:
data += "%s%s" % (line, newline)
# GET parameters
elif "?" in line and "=" in line and ": " not in line:
params = True
# Headers
elif re.search(r"\A\S+:", line):
key, value = line.split(":", 1)
value = value.strip().replace("\r", "").replace("\n", "")
# Cookie and Host headers
if key.upper() == HTTP_HEADER.COOKIE.upper():
cookie = value
elif key.upper() == HTTP_HEADER.HOST.upper():
if '://' in value:
scheme, value = value.split('://')[:2]
splitValue = value.split(":")
host = splitValue[0]
if len(splitValue) > 1:
port = filterStringValue(splitValue[1], "[0-9]")
# Avoid to add a static content length header to
# headers and consider the following lines as
# POSTed data
if key.upper() == HTTP_HEADER.CONTENT_LENGTH.upper():
params = True
# Avoid proxy and connection type related headers
elif key not in (HTTP_HEADER.PROXY_CONNECTION, HTTP_HEADER.CONNECTION):
headers.append((getUnicode(key), getUnicode(value)))
if CUSTOM_INJECTION_MARK_CHAR in re.sub(PROBLEMATIC_CUSTOM_INJECTION_PATTERNS, "", value or ""):
params = True
data = data.rstrip("\r\n") if data else data
if getPostReq and (params or cookie):
if not port and isinstance(scheme, basestring) and scheme.lower() == "https":
port = "443"
elif not scheme and port == "443":
scheme = "https"
if conf.forceSSL:
scheme = "https"
port = port or "443"
if not host:
errMsg = "invalid format of a request file"
raise SqlmapSyntaxException, errMsg
if not url.startswith("http"):
url = "%s://%s:%s%s" % (scheme or "http", host, port or "80", url)
scheme = None
port = None
if not(conf.scope and not re.search(conf.scope, url, re.I)):
if not kb.targets or url not in addedTargetUrls:
kb.targets.add((url, conf.method or method, data, cookie, tuple(headers)))
addedTargetUrls.add(url)
checkFile(reqFile)
try:
with openFile(reqFile, "rb") as f:
content = f.read()
except (IOError, OSError, MemoryError), ex:
errMsg = "something went wrong while trying "
errMsg += "to read the content of file '%s' ('%s')" % (reqFile, getSafeExString(ex))
raise SqlmapSystemException(errMsg)
if conf.scope:
logger.info("using regular expression '%s' for filtering targets" % conf.scope)
_parseBurpLog(content)
_parseWebScarabLog(content)
if not addedTargetUrls:
errMsg = "unable to find usable request(s) "
errMsg += "in provided file ('%s')" % reqFile
raise SqlmapGenericException(errMsg)
def _loadQueries():
"""
Loads queries from 'xml/queries.xml' file.
"""
def iterate(node, retVal=None):
class DictObject(object):
def __init__(self):
self.__dict__ = {}
def __contains__(self, name):
return name in self.__dict__
if retVal is None:
retVal = DictObject()
for child in node.findall("*"):
instance = DictObject()
retVal.__dict__[child.tag] = instance
if child.attrib:
instance.__dict__.update(child.attrib)
else:
iterate(child, instance)
return retVal
tree = ElementTree()
try:
tree.parse(paths.QUERIES_XML)
except Exception, ex:
errMsg = "something appears to be wrong with "
errMsg += "the file '%s' ('%s'). Please make " % (paths.QUERIES_XML, getSafeExString(ex))
errMsg += "sure that you haven't made any changes to it"
raise SqlmapInstallationException, errMsg
for node in tree.findall("*"):
queries[node.attrib['value']] = iterate(node)
def _setMultipleTargets():
"""
Define a configuration parameter if we are running in multiple target
mode.
"""
initialTargetsCount = len(kb.targets)
addedTargetUrls = set()
if not conf.logFile:
return
debugMsg = "parsing targets list from '%s'" % conf.logFile
logger.debug(debugMsg)
if not os.path.exists(conf.logFile):
errMsg = "the specified list of targets does not exist"
raise SqlmapFilePathException(errMsg)
if os.path.isfile(conf.logFile):
_feedTargetsDict(conf.logFile, addedTargetUrls)
elif os.path.isdir(conf.logFile):
files = os.listdir(conf.logFile)
files.sort()
for reqFile in files:
if not re.search("([\d]+)\-request", reqFile):
continue
_feedTargetsDict(os.path.join(conf.logFile, reqFile), addedTargetUrls)
else:
errMsg = "the specified list of targets is not a file "
errMsg += "nor a directory"
raise SqlmapFilePathException(errMsg)
updatedTargetsCount = len(kb.targets)
if updatedTargetsCount > initialTargetsCount:
infoMsg = "sqlmap parsed %d " % (updatedTargetsCount - initialTargetsCount)
infoMsg += "(parameter unique) requests from the "
infoMsg += "targets list ready to be tested"
logger.info(infoMsg)
def _adjustLoggingFormatter():
"""
Solves problem of line deletition caused by overlapping logging messages
and retrieved data info in inference mode
"""
if hasattr(FORMATTER, '_format'):
return
def format(record):
message = FORMATTER._format(record)
message = boldifyMessage(message)
if kb.get("prependFlag"):
message = "\n%s" % message
kb.prependFlag = False
return message
FORMATTER._format = FORMATTER.format
FORMATTER.format = format
def _setRequestFromFile():
"""
This function checks if the way to make a HTTP request is through supplied
textual file, parses it and saves the information into the knowledge base.
"""
if not conf.requestFile:
return
addedTargetUrls = set()
conf.requestFile = safeExpandUser(conf.requestFile)
infoMsg = "parsing HTTP request from '%s'" % conf.requestFile
logger.info(infoMsg)
if not os.path.isfile(conf.requestFile):
errMsg = "the specified HTTP request file "
errMsg += "does not exist"
raise SqlmapFilePathException(errMsg)
_feedTargetsDict(conf.requestFile, addedTargetUrls)
def _setCrawler():
if not conf.crawlDepth:
return
if not any((conf.bulkFile, conf.sitemapUrl)):
crawl(conf.url)
else:
if conf.bulkFile:
targets = getFileItems(conf.bulkFile)
else:
targets = parseSitemap(conf.sitemapUrl)
for i in xrange(len(targets)):
try:
target = targets[i]
crawl(target)
if conf.verbose in (1, 2):
status = "%d/%d links visited (%d%%)" % (i + 1, len(targets), round(100.0 * (i + 1) / len(targets)))
dataToStdout("\r[%s] [INFO] %s" % (time.strftime("%X"), status), True)
except Exception, ex:
errMsg = "problem occurred while crawling at '%s' ('%s')" % (target, getSafeExString(ex))
logger.error(errMsg)
def _doSearch():
"""
This function performs search dorking, parses results
and saves the testable hosts into the knowledge base.
"""
if not conf.googleDork:
return
kb.data.onlyGETs = None
def retrieve():
links = search(conf.googleDork)
if not links:
errMsg = "unable to find results for your "
errMsg += "search dork expression"
raise SqlmapGenericException(errMsg)
for link in links:
link = urldecode(link)
if re.search(r"(.*?)\?(.+)", link):
kb.targets.add((link, conf.method, conf.data, conf.cookie, None))
elif re.search(URI_INJECTABLE_REGEX, link, re.I):
if kb.data.onlyGETs is None and conf.data is None and not conf.googleDork:
message = "do you want to scan only results containing GET parameters? [Y/n] "
test = readInput(message, default="Y")
kb.data.onlyGETs = test.lower() != 'n'
if not kb.data.onlyGETs or conf.googleDork:
kb.targets.add((link, conf.method, conf.data, conf.cookie, None))
return links
while True:
links = retrieve()
if kb.targets:
infoMsg = "sqlmap got %d results for your " % len(links)
infoMsg += "search dork expression, "
if len(links) == len(kb.targets):
infoMsg += "all "
else:
infoMsg += "%d " % len(kb.targets)
infoMsg += "of them are testable targets"
logger.info(infoMsg)
break
else:
message = "sqlmap got %d results " % len(links)
message += "for your search dork expression, but none of them "
message += "have GET parameters to test for SQL injection. "
message += "Do you want to skip to the next result page? [Y/n]"
test = readInput(message, default="Y")
if test[0] in ("n", "N"):
raise SqlmapSilentQuitException
else:
conf.googlePage += 1
def _setBulkMultipleTargets():
if not conf.bulkFile:
return
conf.bulkFile = safeExpandUser(conf.bulkFile)
infoMsg = "parsing multiple targets list from '%s'" % conf.bulkFile
logger.info(infoMsg)
if not os.path.isfile(conf.bulkFile):
errMsg = "the specified bulk file "
errMsg += "does not exist"
raise SqlmapFilePathException(errMsg)
found = False
for line in getFileItems(conf.bulkFile):
if re.match(r"[^ ]+\?(.+)", line, re.I) or CUSTOM_INJECTION_MARK_CHAR in line:
found = True
kb.targets.add((line.strip(), conf.method, conf.data, conf.cookie, None))
if not found and not conf.forms and not conf.crawlDepth:
warnMsg = "no usable links found (with GET parameters)"
logger.warn(warnMsg)
def _setSitemapTargets():
if not conf.sitemapUrl:
return
infoMsg = "parsing sitemap '%s'" % conf.sitemapUrl
logger.info(infoMsg)
found = False
for item in parseSitemap(conf.sitemapUrl):
if re.match(r"[^ ]+\?(.+)", item, re.I):
found = True
kb.targets.add((item.strip(), None, None, None, None))
if not found and not conf.forms and not conf.crawlDepth:
warnMsg = "no usable links found (with GET parameters)"
logger.warn(warnMsg)
def _findPageForms():
if not conf.forms or conf.crawlDepth:
return
if conf.url and not checkConnection():
return
infoMsg = "searching for forms"
logger.info(infoMsg)
if not any((conf.bulkFile, conf.googleDork, conf.sitemapUrl)):
page, _ = Request.queryPage(content=True)
findPageForms(page, conf.url, True, True)
else:
if conf.bulkFile:
targets = getFileItems(conf.bulkFile)
elif conf.sitemapUrl:
targets = parseSitemap(conf.sitemapUrl)
elif conf.googleDork:
targets = [_[0] for _ in kb.targets]
kb.targets.clear()
for i in xrange(len(targets)):
try:
target = targets[i]
page, _, _ = Request.getPage(url=target.strip(), crawling=True, raise404=False)
findPageForms(page, target, False, True)
if conf.verbose in (1, 2):
status = '%d/%d links visited (%d%%)' % (i + 1, len(targets), round(100.0 * (i + 1) / len(targets)))
dataToStdout("\r[%s] [INFO] %s" % (time.strftime("%X"), status), True)
except KeyboardInterrupt:
break
except Exception, ex:
errMsg = "problem occurred while searching for forms at '%s' ('%s')" % (target, getSafeExString(ex))
logger.error(errMsg)
def _setDBMSAuthentication():
"""
Check and set the DBMS authentication credentials to run statements as
another user, not the session user
"""
if not conf.dbmsCred:
return
debugMsg = "setting the DBMS authentication credentials"
logger.debug(debugMsg)
match = re.search("^(.+?):(.*?)$", conf.dbmsCred)
if not match:
errMsg = "DBMS authentication credentials value must be in format "
errMsg += "username:password"
raise SqlmapSyntaxException(errMsg)
conf.dbmsUsername = match.group(1)
conf.dbmsPassword = match.group(2)
def _setMetasploit():
if not conf.osPwn and not conf.osSmb and not conf.osBof:
return
debugMsg = "setting the takeover out-of-band functionality"
logger.debug(debugMsg)
msfEnvPathExists = False
if IS_WIN:
try:
import win32file
except ImportError:
errMsg = "sqlmap requires third-party module 'pywin32' "
errMsg += "in order to use Metasploit functionalities on "
errMsg += "Windows. You can download it from "
errMsg += "'http://sourceforge.net/projects/pywin32/files/pywin32/'"
raise SqlmapMissingDependence(errMsg)
if not conf.msfPath:
def _(key, value):
retVal = None
try:
from _winreg import ConnectRegistry, OpenKey, QueryValueEx, HKEY_LOCAL_MACHINE
_ = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
_ = OpenKey(_, key)
retVal = QueryValueEx(_, value)[0]
except:
logger.debug("unable to identify Metasploit installation path via registry key")
return retVal
conf.msfPath = _(r"SOFTWARE\Rapid7\Metasploit", "Location")
if conf.msfPath:
conf.msfPath = os.path.join(conf.msfPath, "msf3")
if conf.osSmb:
isAdmin = runningAsAdmin()
if not isAdmin:
errMsg = "you need to run sqlmap as an administrator "
errMsg += "if you want to perform a SMB relay attack because "
errMsg += "it will need to listen on a user-specified SMB "
errMsg += "TCP port for incoming connection attempts"
raise SqlmapMissingPrivileges(errMsg)
if conf.msfPath:
for path in (conf.msfPath, os.path.join(conf.msfPath, "bin")):
if any(os.path.exists(normalizePath(os.path.join(path, _))) for _ in ("msfcli", "msfconsole")):
msfEnvPathExists = True
if all(os.path.exists(normalizePath(os.path.join(path, _))) for _ in ("msfvenom",)):
kb.oldMsf = False
elif all(os.path.exists(normalizePath(os.path.join(path, _))) for _ in ("msfencode", "msfpayload")):
kb.oldMsf = True
else:
msfEnvPathExists = False
conf.msfPath = path
break
if msfEnvPathExists:
debugMsg = "provided Metasploit Framework path "
debugMsg += "'%s' is valid" % conf.msfPath
logger.debug(debugMsg)
else:
warnMsg = "the provided Metasploit Framework path "
warnMsg += "'%s' is not valid. The cause could " % conf.msfPath
warnMsg += "be that the path does not exists or that one "
warnMsg += "or more of the needed Metasploit executables "
warnMsg += "within msfcli, msfconsole, msfencode and "
warnMsg += "msfpayload do not exist"
logger.warn(warnMsg)
else:
warnMsg = "you did not provide the local path where Metasploit "
warnMsg += "Framework is installed"
logger.warn(warnMsg)
if not msfEnvPathExists:
warnMsg = "sqlmap is going to look for Metasploit Framework "
warnMsg += "installation inside the environment path(s)"
logger.warn(warnMsg)
envPaths = os.environ.get("PATH", "").split(";" if IS_WIN else ":")
for envPath in envPaths:
envPath = envPath.replace(";", "")
if any(os.path.exists(normalizePath(os.path.join(envPath, _))) for _ in ("msfcli", "msfconsole")):
msfEnvPathExists = True
if all(os.path.exists(normalizePath(os.path.join(envPath, _))) for _ in ("msfvenom",)):
kb.oldMsf = False
elif all(os.path.exists(normalizePath(os.path.join(envPath, _))) for _ in ("msfencode", "msfpayload")):
kb.oldMsf = True
else:
msfEnvPathExists = False
if msfEnvPathExists:
infoMsg = "Metasploit Framework has been found "
infoMsg += "installed in the '%s' path" % envPath
logger.info(infoMsg)
conf.msfPath = envPath
break
if not msfEnvPathExists:
errMsg = "unable to locate Metasploit Framework installation. "
errMsg += "You can get it at 'http://www.metasploit.com/download/'"
raise SqlmapFilePathException(errMsg)
def _setWriteFile():
if not conf.wFile:
return
debugMsg = "setting the write file functionality"
logger.debug(debugMsg)
if not os.path.exists(conf.wFile):
errMsg = "the provided local file '%s' does not exist" % conf.wFile
raise SqlmapFilePathException(errMsg)
if not conf.dFile:
errMsg = "you did not provide the back-end DBMS absolute path "
errMsg += "where you want to write the local file '%s'" % conf.wFile
raise SqlmapMissingMandatoryOptionException(errMsg)
conf.wFileType = getFileType(conf.wFile)
def _setOS():
"""
Force the back-end DBMS operating system option.
"""
if not conf.os:
return
if conf.os.lower() not in SUPPORTED_OS:
errMsg = "you provided an unsupported back-end DBMS operating "
errMsg += "system. The supported DBMS operating systems for OS "
errMsg += "and file system access are %s. " % ', '.join([o.capitalize() for o in SUPPORTED_OS])
errMsg += "If you do not know the back-end DBMS underlying OS, "
errMsg += "do not provide it and sqlmap will fingerprint it for "
errMsg += "you."
raise SqlmapUnsupportedDBMSException(errMsg)
debugMsg = "forcing back-end DBMS operating system to user defined "
debugMsg += "value '%s'" % conf.os
logger.debug(debugMsg)
Backend.setOs(conf.os)
def _setTechnique():
validTechniques = sorted(getPublicTypeMembers(PAYLOAD.TECHNIQUE), key=lambda x: x[1])
validLetters = [_[0][0].upper() for _ in validTechniques]
if conf.tech and isinstance(conf.tech, basestring):
_ = []
for letter in conf.tech.upper():
if letter not in validLetters:
errMsg = "value for --technique must be a string composed "
errMsg += "by the letters %s. Refer to the " % ", ".join(validLetters)
errMsg += "user's manual for details"
raise SqlmapSyntaxException(errMsg)
for validTech, validInt in validTechniques:
if letter == validTech[0]:
_.append(validInt)
break
conf.tech = _
def _setDBMS():
"""
Force the back-end DBMS option.
"""
if not conf.dbms:
return
debugMsg = "forcing back-end DBMS to user defined value"
logger.debug(debugMsg)
conf.dbms = conf.dbms.lower()
regex = re.search("%s ([\d\.]+)" % ("(%s)" % "|".join([alias for alias in SUPPORTED_DBMS])), conf.dbms, re.I)
if regex:
conf.dbms = regex.group(1)
Backend.setVersion(regex.group(2))
if conf.dbms not in SUPPORTED_DBMS:
errMsg = "you provided an unsupported back-end database management "
errMsg += "system. Supported DBMSes are as follows: %s. " % ', '.join(sorted(_ for _ in DBMS_DICT))
errMsg += "If you do not know the back-end DBMS, do not provide "
errMsg += "it and sqlmap will fingerprint it for you."
raise SqlmapUnsupportedDBMSException(errMsg)
for dbms, aliases in DBMS_ALIASES:
if conf.dbms in aliases:
conf.dbms = dbms
break
def _setTamperingFunctions():
"""
Loads tampering functions from given script(s)
"""
if conf.tamper:
last_priority = PRIORITY.HIGHEST
check_priority = True
resolve_priorities = False
priorities = []
for tfile in re.split(PARAMETER_SPLITTING_REGEX, conf.tamper):
found = False
tfile = tfile.strip()
if not tfile:
continue
elif os.path.exists(os.path.join(paths.SQLMAP_TAMPER_PATH, tfile if tfile.endswith('.py') else "%s.py" % tfile)):
tfile = os.path.join(paths.SQLMAP_TAMPER_PATH, tfile if tfile.endswith('.py') else "%s.py" % tfile)
elif not os.path.exists(tfile):
errMsg = "tamper script '%s' does not exist" % tfile
raise SqlmapFilePathException(errMsg)
elif not tfile.endswith('.py'):
errMsg = "tamper script '%s' should have an extension '.py'" % tfile
raise SqlmapSyntaxException(errMsg)
dirname, filename = os.path.split(tfile)
dirname = os.path.abspath(dirname)
infoMsg = "loading tamper script '%s'" % filename[:-3]
logger.info(infoMsg)
if not os.path.exists(os.path.join(dirname, '__init__.py')):
errMsg = "make sure that there is an empty file '__init__.py' "
errMsg += "inside of tamper scripts directory '%s'" % dirname
raise SqlmapGenericException(errMsg)
if dirname not in sys.path:
sys.path.insert(0, dirname)
try:
module = __import__(filename[:-3].encode(sys.getfilesystemencoding() or UNICODE_ENCODING))
except (ImportError, SyntaxError), ex:
raise SqlmapSyntaxException("cannot import tamper script '%s' (%s)" % (filename[:-3], getSafeExString(ex)))
priority = PRIORITY.NORMAL if not hasattr(module, '__priority__') else module.__priority__
for name, function in inspect.getmembers(module, inspect.isfunction):
if name == "tamper" and inspect.getargspec(function).args and inspect.getargspec(function).keywords == "kwargs":
found = True
kb.tamperFunctions.append(function)
function.func_name = module.__name__
if check_priority and priority > last_priority:
message = "it appears that you might have mixed "
message += "the order of tamper scripts. "
message += "Do you want to auto resolve this? [Y/n/q] "
test = readInput(message, default="Y")
if not test or test[0] in ("y", "Y"):
resolve_priorities = True
elif test[0] in ("n", "N"):
resolve_priorities = False
elif test[0] in ("q", "Q"):
raise SqlmapUserQuitException
check_priority = False
priorities.append((priority, function))
last_priority = priority
break
elif name == "dependencies":
function()
if not found:
errMsg = "missing function 'tamper(payload, **kwargs)' "
errMsg += "in tamper script '%s'" % tfile
raise SqlmapGenericException(errMsg)
if kb.tamperFunctions and len(kb.tamperFunctions) > 3:
warnMsg = "using too many tamper scripts is usually not "
warnMsg += "a good idea"
logger.warning(warnMsg)
if resolve_priorities and priorities:
priorities.sort(reverse=True)
kb.tamperFunctions = []
for _, function in priorities:
kb.tamperFunctions.append(function)
def _setWafFunctions():
"""
Loads WAF/IDS/IPS detecting functions from script(s)
"""
if conf.identifyWaf:
for found in glob.glob(os.path.join(paths.SQLMAP_WAF_PATH, "*.py")):
dirname, filename = os.path.split(found)
dirname = os.path.abspath(dirname)
if filename == "__init__.py":
continue
debugMsg = "loading WAF script '%s'" % filename[:-3]
logger.debug(debugMsg)
if dirname not in sys.path:
sys.path.insert(0, dirname)
try:
if filename[:-3] in sys.modules:
del sys.modules[filename[:-3]]
module = __import__(filename[:-3])
except ImportError, msg:
raise SqlmapSyntaxException("cannot import WAF script '%s' (%s)" % (filename[:-3], msg))
_ = dict(inspect.getmembers(module))
if "detect" not in _:
errMsg = "missing function 'detect(get_page)' "
errMsg += "in WAF script '%s'" % found
raise SqlmapGenericException(errMsg)
else:
kb.wafFunctions.append((_["detect"], _.get("__product__", filename[:-3])))
kb.wafFunctions = sorted(kb.wafFunctions, key=lambda _: "generic" in _[1].lower())
def _setThreads():
if not isinstance(conf.threads, int) or conf.threads <= 0:
conf.threads = 1
def _setDNSCache():
"""
Makes a cached version of socket._getaddrinfo to avoid subsequent DNS requests.
"""
def _getaddrinfo(*args, **kwargs):
if args in kb.cache:
return kb.cache[args]
else:
kb.cache[args] = socket._getaddrinfo(*args, **kwargs)
return kb.cache[args]
if not hasattr(socket, "_getaddrinfo"):
socket._getaddrinfo = socket.getaddrinfo
socket.getaddrinfo = _getaddrinfo
def _setSocketPreConnect():
"""
Makes a pre-connect version of socket.connect
"""
if conf.disablePrecon:
return
def _():
while kb.threadContinue and not conf.disablePrecon:
try:
for key in socket._ready:
if len(socket._ready[key]) < SOCKET_PRE_CONNECT_QUEUE_SIZE:
family, type, proto, address = key
s = socket.socket(family, type, proto)
s._connect(address)
with kb.locks.socket:
socket._ready[key].append((s._sock, time.time()))
except KeyboardInterrupt:
break
except:
pass
finally:
time.sleep(0.01)
def connect(self, address):
found = False
key = (self.family, self.type, self.proto, address)
with kb.locks.socket:
if key not in socket._ready:
socket._ready[key] = []
while len(socket._ready[key]) > 0:
candidate, created = socket._ready[key].pop(0)
if (time.time() - created) < PRECONNECT_CANDIDATE_TIMEOUT:
self._sock = candidate
found = True
break
else:
try:
candidate.close()
except socket.error:
pass
if not found:
self._connect(address)
if not hasattr(socket.socket, "_connect"):
socket._ready = {}
socket.socket._connect = socket.socket.connect
socket.socket.connect = connect
thread = threading.Thread(target=_)
setDaemon(thread)
thread.start()
def _setHTTPHandlers():
"""
Check and set the HTTP/SOCKS proxy for all HTTP requests.
"""
global proxyHandler
for _ in ("http", "https"):
if hasattr(proxyHandler, "%s_open" % _):
delattr(proxyHandler, "%s_open" % _)
if conf.proxyList is not None:
if not conf.proxyList:
errMsg = "list of usable proxies is exhausted"
raise SqlmapNoneDataException(errMsg)
conf.proxy = conf.proxyList[0]
conf.proxyList = conf.proxyList[1:]
infoMsg = "loading proxy '%s' from a supplied proxy list file" % conf.proxy
logger.info(infoMsg)
elif not conf.proxy:
if conf.hostname in ("localhost", "127.0.0.1") or conf.ignoreProxy:
proxyHandler.proxies = {}
if conf.proxy:
debugMsg = "setting the HTTP/SOCKS proxy for all HTTP requests"
logger.debug(debugMsg)
try:
_ = urlparse.urlsplit(conf.proxy)
except Exception, ex:
errMsg = "invalid proxy address '%s' ('%s')" % (conf.proxy, getSafeExString(ex))
raise SqlmapSyntaxException, errMsg
hostnamePort = _.netloc.split(":")
scheme = _.scheme.upper()
hostname = hostnamePort[0]
port = None
username = None
password = None
if len(hostnamePort) == 2:
try:
port = int(hostnamePort[1])
except:
pass # drops into the next check block
if not all((scheme, hasattr(PROXY_TYPE, scheme), hostname, port)):
errMsg = "proxy value must be in format '(%s)://address:port'" % "|".join(_[0].lower() for _ in getPublicTypeMembers(PROXY_TYPE))
raise SqlmapSyntaxException(errMsg)
if conf.proxyCred:
_ = re.search("^(.*?):(.*?)$", conf.proxyCred)
if not _:
errMsg = "proxy authentication credentials "
errMsg += "value must be in format username:password"
raise SqlmapSyntaxException(errMsg)
else:
username = _.group(1)
password = _.group(2)
if scheme in (PROXY_TYPE.SOCKS4, PROXY_TYPE.SOCKS5):
proxyHandler.proxies = {}
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5 if scheme == PROXY_TYPE.SOCKS5 else socks.PROXY_TYPE_SOCKS4, hostname, port, username=username, password=password)
socks.wrapmodule(urllib2)
else:
socks.unwrapmodule(urllib2)
if conf.proxyCred:
# Reference: http://stackoverflow.com/questions/34079/how-to-specify-an-authenticated-proxy-for-a-python-http-connection
proxyString = "%s@" % conf.proxyCred
else:
proxyString = ""
proxyString += "%s:%d" % (hostname, port)
proxyHandler.proxies = {"http": proxyString, "https": proxyString}
proxyHandler.__init__(proxyHandler.proxies)
debugMsg = "creating HTTP requests opener object"
logger.debug(debugMsg)
handlers = filter(None, [proxyHandler if proxyHandler.proxies else None, authHandler, redirectHandler, rangeHandler, httpsHandler])
if not conf.dropSetCookie:
if not conf.loadCookies:
conf.cj = cookielib.CookieJar()
else:
conf.cj = cookielib.MozillaCookieJar()
resetCookieJar(conf.cj)
handlers.append(urllib2.HTTPCookieProcessor(conf.cj))
# Reference: http://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html
if conf.keepAlive:
warnMsg = "persistent HTTP(s) connections, Keep-Alive, has "
warnMsg += "been disabled because of its incompatibility "
if conf.proxy:
warnMsg += "with HTTP(s) proxy"
logger.warn(warnMsg)
elif conf.authType:
warnMsg += "with authentication methods"
logger.warn(warnMsg)
else:
handlers.append(keepAliveHandler)
opener = urllib2.build_opener(*handlers)
urllib2.install_opener(opener)
def _setSafeVisit():
"""
Check and set the safe visit options.
"""
if not any ((conf.safeUrl, conf.safeReqFile)):
return
if conf.safeReqFile:
checkFile(conf.safeReqFile)
raw = readCachedFileContent(conf.safeReqFile)
match = re.search(r"\A([A-Z]+) ([^ ]+) HTTP/[0-9.]+\Z", raw[:raw.find('\n')])
if match:
kb.safeReq.method = match.group(1)
kb.safeReq.url = match.group(2)
kb.safeReq.headers = {}
for line in raw[raw.find('\n') + 1:].split('\n'):
line = line.strip()
if line and ':' in line:
key, value = line.split(':', 1)
value = value.strip()
kb.safeReq.headers[key] = value
if key == HTTP_HEADER.HOST:
if not value.startswith("http"):
scheme = "http"
if value.endswith(":443"):
scheme = "https"
value = "%s://%s" % (scheme, value)
kb.safeReq.url = urlparse.urljoin(value, kb.safeReq.url)
else:
break
post = None
if '\r\n\r\n' in raw:
post = raw[raw.find('\r\n\r\n') + 4:]
elif '\n\n' in raw:
post = raw[raw.find('\n\n') + 2:]
if post and post.strip():
kb.safeReq.post = post
else:
kb.safeReq.post = None
else:
errMsg = "invalid format of a safe request file"
raise SqlmapSyntaxException, errMsg
else:
if not re.search("^http[s]*://", conf.safeUrl):
if ":443/" in conf.safeUrl:
conf.safeUrl = "https://" + conf.safeUrl
else:
conf.safeUrl = "http://" + conf.safeUrl
if conf.safeFreq <= 0:
errMsg = "please provide a valid value (>0) for safe frequency (--safe-freq) while using safe visit features"
raise SqlmapSyntaxException(errMsg)
def _setPrefixSuffix():
if conf.prefix is not None and conf.suffix is not None:
# Create a custom boundary object for user's supplied prefix
# and suffix
boundary = AttribDict()
boundary.level = 1
boundary.clause = [0]
boundary.where = [1, 2, 3]
boundary.prefix = conf.prefix
boundary.suffix = conf.suffix
if " like" in boundary.suffix.lower():
if "'" in boundary.suffix.lower():
boundary.ptype = 3
elif '"' in boundary.suffix.lower():
boundary.ptype = 5
elif "'" in boundary.suffix:
boundary.ptype = 2
elif '"' in boundary.suffix:
boundary.ptype = 4
else:
boundary.ptype = 1
# user who provides --prefix/--suffix does not want other boundaries
# to be tested for
conf.boundaries = [boundary]
def _setAuthCred():
"""
Adds authentication credentials (if any) for current target to the password manager
(used by connection handler)
"""
if kb.passwordMgr and all(_ is not None for _ in (conf.scheme, conf.hostname, conf.port, conf.authUsername, conf.authPassword)):
kb.passwordMgr.add_password(None, "%s://%s:%d" % (conf.scheme, conf.hostname, conf.port), conf.authUsername, conf.authPassword)
def _setHTTPAuthentication():
"""
Check and set the HTTP(s) authentication method (Basic, Digest, NTLM or PKI),
username and password for first three methods, or PEM private key file for
PKI authentication
"""
global authHandler
if not conf.authType and not conf.authCred and not conf.authFile:
return
if conf.authFile and not conf.authType:
conf.authType = AUTH_TYPE.PKI
elif conf.authType and not conf.authCred and not conf.authFile:
errMsg = "you specified the HTTP authentication type, but "
errMsg += "did not provide the credentials"
raise SqlmapSyntaxException(errMsg)
elif not conf.authType and conf.authCred:
errMsg = "you specified the HTTP authentication credentials, "
errMsg += "but did not provide the type"
raise SqlmapSyntaxException(errMsg)
elif (conf.authType or "").lower() not in (AUTH_TYPE.BASIC, AUTH_TYPE.DIGEST, AUTH_TYPE.NTLM, AUTH_TYPE.PKI):
errMsg = "HTTP authentication type value must be "
errMsg += "Basic, Digest, NTLM or PKI"
raise SqlmapSyntaxException(errMsg)
if not conf.authFile:
debugMsg = "setting the HTTP authentication type and credentials"
logger.debug(debugMsg)
aTypeLower = conf.authType.lower()
if aTypeLower in (AUTH_TYPE.BASIC, AUTH_TYPE.DIGEST):
regExp = "^(.*?):(.*?)$"
errMsg = "HTTP %s authentication credentials " % aTypeLower
errMsg += "value must be in format 'username:password'"
elif aTypeLower == AUTH_TYPE.NTLM:
regExp = "^(.*\\\\.*):(.*?)$"
errMsg = "HTTP NTLM authentication credentials value must "
errMsg += "be in format 'DOMAIN\username:password'"
elif aTypeLower == AUTH_TYPE.PKI:
errMsg = "HTTP PKI authentication require "
errMsg += "usage of option `--auth-pki`"
raise SqlmapSyntaxException(errMsg)
aCredRegExp = re.search(regExp, conf.authCred)
if not aCredRegExp:
raise SqlmapSyntaxException(errMsg)
conf.authUsername = aCredRegExp.group(1)
conf.authPassword = aCredRegExp.group(2)
kb.passwordMgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
_setAuthCred()
if aTypeLower == AUTH_TYPE.BASIC:
authHandler = SmartHTTPBasicAuthHandler(kb.passwordMgr)
elif aTypeLower == AUTH_TYPE.DIGEST:
authHandler = urllib2.HTTPDigestAuthHandler(kb.passwordMgr)
elif aTypeLower == AUTH_TYPE.NTLM:
try:
from ntlm import HTTPNtlmAuthHandler
except ImportError:
errMsg = "sqlmap requires Python NTLM third-party library "
errMsg += "in order to authenticate via NTLM, "
errMsg += "http://code.google.com/p/python-ntlm/"
raise SqlmapMissingDependence(errMsg)
authHandler = HTTPNtlmAuthHandler.HTTPNtlmAuthHandler(kb.passwordMgr)
else:
debugMsg = "setting the HTTP(s) authentication PEM private key"
logger.debug(debugMsg)
_ = safeExpandUser(conf.authFile)
checkFile(_)
authHandler = HTTPSPKIAuthHandler(_)
def _setHTTPExtraHeaders():
if conf.headers:
debugMsg = "setting extra HTTP headers"
logger.debug(debugMsg)
conf.headers = conf.headers.split("\n") if "\n" in conf.headers else conf.headers.split("\\n")
for headerValue in conf.headers:
if not headerValue.strip():
continue
if headerValue.count(':') >= 1:
header, value = (_.lstrip() for _ in headerValue.split(":", 1))
if header and value:
conf.httpHeaders.append((header, value))
else:
errMsg = "invalid header value: %s. Valid header format is 'name:value'" % repr(headerValue).lstrip('u')
raise SqlmapSyntaxException(errMsg)
elif not conf.requestFile and len(conf.httpHeaders or []) < 2:
conf.httpHeaders.append((HTTP_HEADER.ACCEPT_LANGUAGE, "en-us,en;q=0.5"))
if not conf.charset:
conf.httpHeaders.append((HTTP_HEADER.ACCEPT_CHARSET, "ISO-8859-15,utf-8;q=0.7,*;q=0.7"))
else:
conf.httpHeaders.append((HTTP_HEADER.ACCEPT_CHARSET, "%s;q=0.7,*;q=0.1" % conf.charset))
# Invalidating any caching mechanism in between
# Reference: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
conf.httpHeaders.append((HTTP_HEADER.CACHE_CONTROL, "no-cache,no-store"))
conf.httpHeaders.append((HTTP_HEADER.PRAGMA, "no-cache"))
def _defaultHTTPUserAgent():
"""
@return: default sqlmap HTTP User-Agent header
@rtype: C{str}
"""
return "%s (%s)" % (VERSION_STRING, SITE)
# Firefox 3 running on Ubuntu 9.04 updated at April 2009
#return "Mozilla/5.0 (X11; U; Linux i686; en-GB; rv:1.9.0.9) Gecko/2009042113 Ubuntu/9.04 (jaunty) Firefox/3.0.9"
# Internet Explorer 7.0 running on Windows 2003 Service Pack 2 english
# updated at March 2009
#return "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)"
def _setHTTPUserAgent():
"""
Set the HTTP User-Agent header.
Depending on the user options it can be:
* The default sqlmap string
* A default value read as user option
* A random value read from a list of User-Agent headers from a
file choosed as user option
"""
if conf.mobile:
message = "which smartphone do you want sqlmap to imitate "
message += "through HTTP User-Agent header?\n"
items = sorted(getPublicTypeMembers(MOBILES, True))
for count in xrange(len(items)):
item = items[count]
message += "[%d] %s%s\n" % (count + 1, item[0], " (default)" if item == MOBILES.IPHONE else "")
test = readInput(message.rstrip('\n'), default=items.index(MOBILES.IPHONE) + 1)
try:
item = items[int(test) - 1]
except:
item = MOBILES.IPHONE
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, item[1]))
elif conf.agent:
debugMsg = "setting the HTTP User-Agent header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, conf.agent))
elif not conf.randomAgent:
_ = True
for header, _ in conf.httpHeaders:
if header == HTTP_HEADER.USER_AGENT:
_ = False
break
if _:
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, _defaultHTTPUserAgent()))
else:
if not kb.userAgents:
debugMsg = "loading random HTTP User-Agent header(s) from "
debugMsg += "file '%s'" % paths.USER_AGENTS
logger.debug(debugMsg)
try:
kb.userAgents = getFileItems(paths.USER_AGENTS)
except IOError:
warnMsg = "unable to read HTTP User-Agent header "
warnMsg += "file '%s'" % paths.USER_AGENTS
logger.warn(warnMsg)
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, _defaultHTTPUserAgent()))
return
userAgent = random.sample(kb.userAgents or [_defaultHTTPUserAgent()], 1)[0]
infoMsg = "fetched random HTTP User-Agent header from "
infoMsg += "file '%s': '%s'" % (paths.USER_AGENTS, userAgent)
logger.info(infoMsg)
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, userAgent))
def _setHTTPReferer():
"""
Set the HTTP Referer
"""
if conf.referer:
debugMsg = "setting the HTTP Referer header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.REFERER, conf.referer))
def _setHTTPHost():
"""
Set the HTTP Host
"""
if conf.host:
debugMsg = "setting the HTTP Host header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.HOST, conf.host))
def _setHTTPCookies():
"""
Set the HTTP Cookie header
"""
if conf.cookie:
debugMsg = "setting the HTTP Cookie header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.COOKIE, conf.cookie))
def _setHTTPTimeout():
"""
Set the HTTP timeout
"""
if conf.timeout:
debugMsg = "setting the HTTP timeout"
logger.debug(debugMsg)
conf.timeout = float(conf.timeout)
if conf.timeout < 3.0:
warnMsg = "the minimum HTTP timeout is 3 seconds, sqlmap "
warnMsg += "will going to reset it"
logger.warn(warnMsg)
conf.timeout = 3.0
else:
conf.timeout = 30.0
socket.setdefaulttimeout(conf.timeout)
def _checkDependencies():
"""
Checks for missing dependencies.
"""
if conf.dependencies:
checkDependencies()
def _createTemporaryDirectory():
"""
Creates temporary directory for this run.
"""
if conf.tmpDir:
try:
if not os.path.isdir(conf.tmpDir):
os.makedirs(conf.tmpDir)
_ = os.path.join(conf.tmpDir, randomStr())
open(_, "w+b").close()
os.remove(_)
tempfile.tempdir = conf.tmpDir
warnMsg = "using '%s' as the temporary directory" % conf.tmpDir
logger.warn(warnMsg)
except (OSError, IOError), ex:
errMsg = "there has been a problem while accessing "
errMsg += "temporary directory location(s) ('%s')" % getSafeExString(ex)
raise SqlmapSystemException, errMsg
else:
try:
if not os.path.isdir(tempfile.gettempdir()):
os.makedirs(tempfile.gettempdir())
except IOError, ex:
errMsg = "there has been a problem while accessing "
errMsg += "system's temporary directory location(s) ('%s'). Please " % getSafeExString(ex)
errMsg += "make sure that there is enough disk space left. If problem persists, "
errMsg += "try to set environment variable 'TEMP' to a location "
errMsg += "writeable by the current user"
raise SqlmapSystemException, errMsg
if "sqlmap" not in (tempfile.tempdir or "") or conf.tmpDir and tempfile.tempdir == conf.tmpDir:
tempfile.tempdir = tempfile.mkdtemp(prefix="sqlmap", suffix=str(os.getpid()))
kb.tempDir = tempfile.tempdir
if not os.path.isdir(tempfile.tempdir):
os.makedirs(tempfile.tempdir)
def _cleanupOptions():
"""
Cleanup configuration attributes.
"""
debugMsg = "cleaning up configuration parameters"
logger.debug(debugMsg)
width = getConsoleWidth()
if conf.eta:
conf.progressWidth = width - 26
else:
conf.progressWidth = width - 46
for key, value in conf.items():
if value and any(key.endswith(_) for _ in ("Path", "File", "Dir")):
conf[key] = safeExpandUser(value)
if conf.testParameter:
conf.testParameter = urldecode(conf.testParameter)
conf.testParameter = conf.testParameter.replace(" ", "")
conf.testParameter = re.split(PARAMETER_SPLITTING_REGEX, conf.testParameter)
else:
conf.testParameter = []
if conf.agent:
conf.agent = re.sub(r"[\r\n]", "", conf.agent)
if conf.user:
conf.user = conf.user.replace(" ", "")
if conf.rParam:
conf.rParam = conf.rParam.replace(" ", "")
conf.rParam = re.split(PARAMETER_SPLITTING_REGEX, conf.rParam)
else:
conf.rParam = []
if conf.paramDel and '\\' in conf.paramDel:
conf.paramDel = conf.paramDel.decode("string_escape")
if conf.skip:
conf.skip = conf.skip.replace(" ", "")
conf.skip = re.split(PARAMETER_SPLITTING_REGEX, conf.skip)
else:
conf.skip = []
if conf.cookie:
conf.cookie = re.sub(r"[\r\n]", "", conf.cookie)
if conf.delay:
conf.delay = float(conf.delay)
if conf.rFile:
conf.rFile = ntToPosixSlashes(normalizePath(conf.rFile))
if conf.wFile:
conf.wFile = ntToPosixSlashes(normalizePath(conf.wFile))
if conf.dFile:
conf.dFile = ntToPosixSlashes(normalizePath(conf.dFile))
if conf.sitemapUrl and not conf.sitemapUrl.lower().startswith("http"):
conf.sitemapUrl = "http%s://%s" % ('s' if conf.forceSSL else '', conf.sitemapUrl)
if conf.msfPath:
conf.msfPath = ntToPosixSlashes(normalizePath(conf.msfPath))
if conf.tmpPath:
conf.tmpPath = ntToPosixSlashes(normalizePath(conf.tmpPath))
if any((conf.googleDork, conf.logFile, conf.bulkFile, conf.sitemapUrl, conf.forms, conf.crawlDepth)):
conf.multipleTargets = True
if conf.optimize:
setOptimize()
if conf.data:
conf.data = re.sub(INJECT_HERE_MARK.replace(" ", r"[^A-Za-z]*"), CUSTOM_INJECTION_MARK_CHAR, conf.data, re.I)
if conf.url:
conf.url = re.sub(INJECT_HERE_MARK.replace(" ", r"[^A-Za-z]*"), CUSTOM_INJECTION_MARK_CHAR, conf.url, re.I)
if conf.os:
conf.os = conf.os.capitalize()
if conf.dbms:
conf.dbms = conf.dbms.capitalize()
if conf.testFilter:
conf.testFilter = conf.testFilter.strip('*+')
conf.testFilter = re.sub(r"([^.])([*+])", "\g<1>.\g<2>", conf.testFilter)
try:
re.compile(conf.testFilter)
except re.error:
conf.testFilter = re.escape(conf.testFilter)
if conf.testSkip:
conf.testSkip = conf.testSkip.strip('*+')
conf.testSkip = re.sub(r"([^.])([*+])", "\g<1>.\g<2>", conf.testSkip)
try:
re.compile(conf.testSkip)
except re.error:
conf.testSkip = re.escape(conf.testSkip)
if "timeSec" not in kb.explicitSettings:
if conf.tor:
conf.timeSec = 2 * conf.timeSec
kb.adjustTimeDelay = ADJUST_TIME_DELAY.DISABLE
warnMsg = "increasing default value for "
warnMsg += "option '--time-sec' to %d because " % conf.timeSec
warnMsg += "switch '--tor' was provided"
logger.warn(warnMsg)
else:
kb.adjustTimeDelay = ADJUST_TIME_DELAY.DISABLE
if conf.retries:
conf.retries = min(conf.retries, MAX_CONNECT_RETRIES)
if conf.code:
conf.code = int(conf.code)
if conf.csvDel:
conf.csvDel = conf.csvDel.decode("string_escape") # e.g. '\\t' -> '\t'
if conf.torPort and isinstance(conf.torPort, basestring) and conf.torPort.isdigit():
conf.torPort = int(conf.torPort)
if conf.torType:
conf.torType = conf.torType.upper()
if conf.outputDir:
paths.SQLMAP_OUTPUT_PATH = os.path.realpath(os.path.expanduser(conf.outputDir))
setPaths()
if conf.string:
try:
conf.string = conf.string.decode("unicode_escape")
except:
charset = string.whitespace.replace(" ", "")
for _ in charset:
conf.string = conf.string.replace(_.encode("string_escape"), _)
if conf.getAll:
map(lambda x: conf.__setitem__(x, True), WIZARD.ALL)
if conf.noCast:
for _ in DUMP_REPLACEMENTS.keys():
del DUMP_REPLACEMENTS[_]
if conf.dumpFormat:
conf.dumpFormat = conf.dumpFormat.upper()
if conf.torType:
conf.torType = conf.torType.upper()
if conf.col:
conf.col = re.sub(r"\s*,\s*", ",", conf.col)
if conf.excludeCol:
conf.excludeCol = re.sub(r"\s*,\s*", ",", conf.excludeCol)
if conf.binaryFields:
conf.binaryFields = re.sub(r"\s*,\s*", ",", conf.binaryFields)
threadData = getCurrentThreadData()
threadData.reset()
def _dirtyPatches():
"""
Place for "dirty" Python related patches
"""
httplib._MAXLINE = 1 * 1024 * 1024 # to accept overly long result lines (e.g. SQLi results in HTTP header responses)
def _purgeOutput():
"""
Safely removes (purges) output directory.
"""
if conf.purgeOutput:
purge(paths.SQLMAP_OUTPUT_PATH)
def _setConfAttributes():
"""
This function set some needed attributes into the configuration
singleton.
"""
debugMsg = "initializing the configuration"
logger.debug(debugMsg)
conf.authUsername = None
conf.authPassword = None
conf.boundaries = []
conf.cj = None
conf.dbmsConnector = None
conf.dbmsHandler = None
conf.dnsServer = None
conf.dumpPath = None
conf.hashDB = None
conf.hashDBFile = None
conf.httpHeaders = []
conf.hostname = None
conf.ipv6 = False
conf.multipleTargets = False
conf.outputPath = None
conf.paramDict = {}
conf.parameters = {}
conf.path = None
conf.port = None
conf.proxyList = None
conf.resultsFilename = None
conf.resultsFP = None
conf.scheme = None
conf.tests = []
conf.trafficFP = None
conf.wFileType = None
def _setKnowledgeBaseAttributes(flushAll=True):
"""
This function set some needed attributes into the knowledge base
singleton.
"""
debugMsg = "initializing the knowledge base"
logger.debug(debugMsg)
kb.absFilePaths = set()
kb.adjustTimeDelay = None
kb.alerted = False
kb.alwaysRefresh = None
kb.arch = None
kb.authHeader = None
kb.bannerFp = AttribDict()
kb.binaryField = False
kb.brute = AttribDict({"tables": [], "columns": []})
kb.bruteMode = False
kb.cache = AttribDict()
kb.cache.content = {}
kb.cache.regex = {}
kb.cache.stdev = {}
kb.captchaDetected = None
kb.chars = AttribDict()
kb.chars.delimiter = randomStr(length=6, lowercase=True)
kb.chars.start = "%s%s%s" % (KB_CHARS_BOUNDARY_CHAR, randomStr(length=3, alphabet=KB_CHARS_LOW_FREQUENCY_ALPHABET), KB_CHARS_BOUNDARY_CHAR)
kb.chars.stop = "%s%s%s" % (KB_CHARS_BOUNDARY_CHAR, randomStr(length=3, alphabet=KB_CHARS_LOW_FREQUENCY_ALPHABET), KB_CHARS_BOUNDARY_CHAR)
kb.chars.at, kb.chars.space, kb.chars.dollar, kb.chars.hash_ = ("%s%s%s" % (KB_CHARS_BOUNDARY_CHAR, _, KB_CHARS_BOUNDARY_CHAR) for _ in randomStr(length=4, lowercase=True))
kb.columnExistsChoice = None
kb.commonOutputs = None
kb.cookieEncodeChoice = None
kb.counters = {}
kb.data = AttribDict()
kb.dataOutputFlag = False
# Active back-end DBMS fingerprint
kb.dbms = None
kb.dbmsVersion = [UNKNOWN_DBMS_VERSION]
kb.delayCandidates = TIME_DELAY_CANDIDATES * [0]
kb.dep = None
kb.dnsMode = False
kb.dnsTest = None
kb.docRoot = None
kb.dumpColumns = None
kb.dumpTable = None
kb.dumpKeyboardInterrupt = False
kb.dynamicMarkings = []
kb.dynamicParameter = False
kb.endDetection = False
kb.explicitSettings = set()
kb.extendTests = None
kb.errorChunkLength = None
kb.errorIsNone = True
kb.falsePositives = []
kb.fileReadMode = False
kb.followSitemapRecursion = None
kb.forcedDbms = None
kb.forcePartialUnion = False
kb.forceWhere = None
kb.futileUnion = None
kb.headersFp = {}
kb.heuristicDbms = None
kb.heuristicMode = False
kb.heuristicPage = False
kb.heuristicTest = None
kb.hintValue = None
kb.htmlFp = []
kb.httpErrorCodes = {}
kb.inferenceMode = False
kb.ignoreCasted = None
kb.ignoreNotFound = False
kb.ignoreTimeout = False
kb.injection = InjectionDict()
kb.injections = []
kb.laggingChecked = False
kb.lastParserStatus = None
kb.locks = AttribDict()
for _ in ("cache", "count", "index", "io", "limit", "log", "socket", "redirect", "request", "value"):
kb.locks[_] = threading.Lock()
kb.matchRatio = None
kb.maxConnectionsFlag = False
kb.mergeCookies = None
kb.multiThreadMode = False
kb.negativeLogic = False
kb.nullConnection = None
kb.oldMsf = None
kb.orderByColumns = None
kb.originalCode = None
kb.originalPage = None
kb.originalPageTime = None
kb.originalTimeDelay = None
kb.originalUrls = dict()
# Back-end DBMS underlying operating system fingerprint via banner (-b)
# parsing
kb.os = None
kb.osVersion = None
kb.osSP = None
kb.pageCompress = True
kb.pageTemplate = None
kb.pageTemplates = dict()
kb.pageEncoding = DEFAULT_PAGE_ENCODING
kb.pageStable = None
kb.partRun = None
kb.permissionFlag = False
kb.postHint = None
kb.postSpaceToPlus = False
kb.postUrlEncode = True
kb.prependFlag = False
kb.processResponseCounter = 0
kb.previousMethod = None
kb.processUserMarks = None
kb.proxyAuthHeader = None
kb.queryCounter = 0
kb.redirectChoice = None
kb.reflectiveMechanism = True
kb.reflectiveCounters = {REFLECTIVE_COUNTER.MISS: 0, REFLECTIVE_COUNTER.HIT: 0}
kb.requestCounter = 0
kb.resendPostOnRedirect = None
kb.resolutionDbms = None
kb.responseTimes = {}
kb.responseTimeMode = None
kb.responseTimePayload = None
kb.resumeValues = True
kb.rowXmlMode = False
kb.safeCharEncode = False
kb.safeReq = AttribDict()
kb.singleLogFlags = set()
kb.skipSeqMatcher = False
kb.reduceTests = None
kb.tlsSNI = {}
kb.stickyDBMS = False
kb.stickyLevel = None
kb.storeCrawlingChoice = None
kb.storeHashesChoice = None
kb.suppressResumeInfo = False
kb.tableFrom = None
kb.technique = None
kb.tempDir = None
kb.testMode = False
kb.testOnlyCustom = False
kb.testQueryCount = 0
kb.testType = None
kb.threadContinue = True
kb.threadException = False
kb.tableExistsChoice = None
kb.timeValidCharsRun = 0
kb.uChar = NULL
kb.unionDuplicates = False
kb.xpCmdshellAvailable = False
if flushAll:
kb.headerPaths = {}
kb.keywords = set(getFileItems(paths.SQL_KEYWORDS))
kb.passwordMgr = None
kb.skipVulnHost = None
kb.tamperFunctions = []
kb.targets = oset()
kb.testedParams = set()
kb.userAgents = None
kb.vainRun = True
kb.vulnHosts = set()
kb.wafFunctions = []
kb.wordlists = None
def _useWizardInterface():
"""
Presents simple wizard interface for beginner users
"""
if not conf.wizard:
return
logger.info("starting wizard interface")
while not conf.url:
message = "Please enter full target URL (-u): "
conf.url = readInput(message, default=None)
message = "%s data (--data) [Enter for None]: " % ((conf.method if conf.method != HTTPMETHOD.GET else conf.method) or HTTPMETHOD.POST)
conf.data = readInput(message, default=None)
if not (filter(lambda _: '=' in unicode(_), (conf.url, conf.data)) or '*' in conf.url):
warnMsg = "no GET and/or %s parameter(s) found for testing " % ((conf.method if conf.method != HTTPMETHOD.GET else conf.method) or HTTPMETHOD.POST)
warnMsg += "(e.g. GET parameter 'id' in 'http://www.site.com/vuln.php?id=1'). "
if not conf.crawlDepth and not conf.forms:
warnMsg += "Will search for forms"
conf.forms = True
logger.warn(warnMsg)
choice = None
while choice is None or choice not in ("", "1", "2", "3"):
message = "Injection difficulty (--level/--risk). Please choose:\n"
message += "[1] Normal (default)\n[2] Medium\n[3] Hard"
choice = readInput(message, default='1')
if choice == '2':
conf.risk = 2
conf.level = 3
elif choice == '3':
conf.risk = 3
conf.level = 5
else:
conf.risk = 1
conf.level = 1
if not conf.getAll:
choice = None
while choice is None or choice not in ("", "1", "2", "3"):
message = "Enumeration (--banner/--current-user/etc). Please choose:\n"
message += "[1] Basic (default)\n[2] Intermediate\n[3] All"
choice = readInput(message, default='1')
if choice == '2':
map(lambda x: conf.__setitem__(x, True), WIZARD.INTERMEDIATE)
elif choice == '3':
map(lambda x: conf.__setitem__(x, True), WIZARD.ALL)
else:
map(lambda x: conf.__setitem__(x, True), WIZARD.BASIC)
logger.debug("muting sqlmap.. it will do the magic for you")
conf.verbose = 0
conf.batch = True
conf.threads = 4
dataToStdout("\nsqlmap is running, please wait..\n\n")
def _saveConfig():
"""
Saves the command line options to a sqlmap configuration INI file
Format.
"""
if not conf.saveConfig:
return
debugMsg = "saving command line options to a sqlmap configuration INI file"
logger.debug(debugMsg)
config = UnicodeRawConfigParser()
userOpts = {}
for family in optDict.keys():
userOpts[family] = []
for option, value in conf.items():
for family, optionData in optDict.items():
if option in optionData:
userOpts[family].append((option, value, optionData[option]))
for family, optionData in userOpts.items():
config.add_section(family)
optionData.sort()
for option, value, datatype in optionData:
if datatype and isListLike(datatype):
datatype = datatype[0]
if option in IGNORE_SAVE_OPTIONS:
continue
if value is None:
if datatype == OPTION_TYPE.BOOLEAN:
value = "False"
elif datatype in (OPTION_TYPE.INTEGER, OPTION_TYPE.FLOAT):
if option in defaults:
value = str(defaults[option])
else:
value = "0"
elif datatype == OPTION_TYPE.STRING:
value = ""
if isinstance(value, basestring):
value = value.replace("\n", "\n ")
config.set(family, option, value)
confFP = openFile(conf.saveConfig, "wb")
try:
config.write(confFP)
except IOError, ex:
errMsg = "something went wrong while trying "
errMsg += "to write to the configuration file '%s' ('%s')" % (conf.saveConfig, getSafeExString(ex))
raise SqlmapSystemException(errMsg)
infoMsg = "saved command line options to the configuration file '%s'" % conf.saveConfig
logger.info(infoMsg)
def setVerbosity():
"""
This function set the verbosity of sqlmap output messages.
"""
if conf.verbose is None:
conf.verbose = 1
conf.verbose = int(conf.verbose)
if conf.verbose == 0:
logger.setLevel(logging.ERROR)
elif conf.verbose == 1:
logger.setLevel(logging.INFO)
elif conf.verbose > 2 and conf.eta:
conf.verbose = 2
logger.setLevel(logging.DEBUG)
elif conf.verbose == 2:
logger.setLevel(logging.DEBUG)
elif conf.verbose == 3:
logger.setLevel(CUSTOM_LOGGING.PAYLOAD)
elif conf.verbose == 4:
logger.setLevel(CUSTOM_LOGGING.TRAFFIC_OUT)
elif conf.verbose >= 5:
logger.setLevel(CUSTOM_LOGGING.TRAFFIC_IN)
def _normalizeOptions(inputOptions):
"""
Sets proper option types
"""
types_ = {}
for group in optDict.keys():
types_.update(optDict[group])
for key in inputOptions:
if key in types_:
value = inputOptions[key]
if value is None:
continue
type_ = types_[key]
if type_ and isinstance(type_, tuple):
type_ = type_[0]
if type_ == OPTION_TYPE.BOOLEAN:
try:
value = bool(value)
except (TypeError, ValueError):
value = False
elif type_ == OPTION_TYPE.INTEGER:
try:
value = int(value)
except (TypeError, ValueError):
value = 0
elif type_ == OPTION_TYPE.FLOAT:
try:
value = float(value)
except (TypeError, ValueError):
value = 0.0
inputOptions[key] = value
def _mergeOptions(inputOptions, overrideOptions):
"""
Merge command line options with configuration file and default options.
@param inputOptions: optparse object with command line options.
@type inputOptions: C{instance}
"""
if inputOptions.pickledOptions:
try:
inputOptions = base64unpickle(inputOptions.pickledOptions)
_normalizeOptions(inputOptions)
except Exception, ex:
errMsg = "provided invalid value '%s' for option '--pickled-options'" % inputOptions.pickledOptions
errMsg += " ('%s')" % ex if ex.message else ""
raise SqlmapSyntaxException(errMsg)
if inputOptions.configFile:
configFileParser(inputOptions.configFile)
if hasattr(inputOptions, "items"):
inputOptionsItems = inputOptions.items()
else:
inputOptionsItems = inputOptions.__dict__.items()
for key, value in inputOptionsItems:
if key not in conf or value not in (None, False) or overrideOptions:
conf[key] = value
for key, value in conf.items():
if value is not None:
kb.explicitSettings.add(key)
for key, value in defaults.items():
if hasattr(conf, key) and conf[key] is None:
conf[key] = value
lut = {}
for group in optDict.keys():
lut.update((_.upper(), _) for _ in optDict[group])
envOptions = {}
for key, value in os.environ.items():
if key.upper().startswith(SQLMAP_ENVIRONMENT_PREFIX):
_ = key[len(SQLMAP_ENVIRONMENT_PREFIX):].upper()
if _ in lut:
envOptions[lut[_]] = value
if envOptions:
_normalizeOptions(envOptions)
for key, value in envOptions.items():
conf[key] = value
mergedOptions.update(conf)
def _setTrafficOutputFP():
if conf.trafficFile:
infoMsg = "setting file for logging HTTP traffic"
logger.info(infoMsg)
conf.trafficFP = openFile(conf.trafficFile, "w+")
def _setDNSServer():
if not conf.dnsName:
return
infoMsg = "setting up DNS server instance"
logger.info(infoMsg)
isAdmin = runningAsAdmin()
if isAdmin:
try:
conf.dnsServer = DNSServer()
conf.dnsServer.run()
except socket.error, msg:
errMsg = "there was an error while setting up "
errMsg += "DNS server instance ('%s')" % msg
raise SqlmapGenericException(errMsg)
else:
errMsg = "you need to run sqlmap as an administrator "
errMsg += "if you want to perform a DNS data exfiltration attack "
errMsg += "as it will need to listen on privileged UDP port 53 "
errMsg += "for incoming address resolution attempts"
raise SqlmapMissingPrivileges(errMsg)
def _setProxyList():
if not conf.proxyFile:
return
conf.proxyList = []
for match in re.finditer(r"(?i)((http[^:]*|socks[^:]*)://)?([\w.]+):(\d+)", readCachedFileContent(conf.proxyFile)):
_, type_, address, port = match.groups()
conf.proxyList.append("%s://%s:%s" % (type_ or "http", address, port))
def _setTorProxySettings():
if not conf.tor:
return
if conf.torType == PROXY_TYPE.HTTP:
_setTorHttpProxySettings()
else:
_setTorSocksProxySettings()
def _setTorHttpProxySettings():
infoMsg = "setting Tor HTTP proxy settings"
logger.info(infoMsg)
s = None
found = None
for port in (DEFAULT_TOR_HTTP_PORTS if not conf.torPort else (conf.torPort,)):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((LOCALHOST, port))
found = port
break
except socket.error:
pass
if s:
s.close()
if found:
conf.proxy = "http://%s:%d" % (LOCALHOST, found)
else:
errMsg = "can't establish connection with the Tor HTTP proxy. "
errMsg += "Please make sure that you have Vidalia, Privoxy or "
errMsg += "Polipo bundle installed for you to be able to "
errMsg += "successfully use switch '--tor' "
raise SqlmapConnectionException(errMsg)
if not conf.checkTor:
warnMsg = "use switch '--check-tor' at "
warnMsg += "your own convenience when accessing "
warnMsg += "Tor anonymizing network because of "
warnMsg += "known issues with default settings of various 'bundles' "
warnMsg += "(e.g. Vidalia)"
logger.warn(warnMsg)
def _setTorSocksProxySettings():
infoMsg = "setting Tor SOCKS proxy settings"
logger.info(infoMsg)
# Has to be SOCKS5 to prevent DNS leaks (http://en.wikipedia.org/wiki/Tor_%28anonymity_network%29)
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5 if conf.torType == PROXY_TYPE.SOCKS5 else socks.PROXY_TYPE_SOCKS4, LOCALHOST, conf.torPort or DEFAULT_TOR_SOCKS_PORT)
socks.wrapmodule(urllib2)
def _checkWebSocket():
if conf.url and (conf.url.startswith("ws:/") or conf.url.startswith("wss:/")):
try:
from websocket import ABNF
except ImportError:
errMsg = "sqlmap requires third-party module 'websocket-client' "
errMsg += "in order to use WebSocket funcionality"
raise SqlmapMissingDependence(errMsg)
def _checkTor():
if not conf.checkTor:
return
infoMsg = "checking Tor connection"
logger.info(infoMsg)
try:
page, _, _ = Request.getPage(url="https://check.torproject.org/", raise404=False)
except SqlmapConnectionException:
page = None
if not page or 'Congratulations' not in page:
errMsg = "it appears that Tor is not properly set. Please try using options '--tor-type' and/or '--tor-port'"
raise SqlmapConnectionException(errMsg)
else:
infoMsg = "Tor is properly being used"
logger.info(infoMsg)
def _basicOptionValidation():
if conf.limitStart is not None and not (isinstance(conf.limitStart, int) and conf.limitStart > 0):
errMsg = "value for option '--start' (limitStart) must be an integer value greater than zero (>0)"
raise SqlmapSyntaxException(errMsg)
if conf.limitStop is not None and not (isinstance(conf.limitStop, int) and conf.limitStop > 0):
errMsg = "value for option '--stop' (limitStop) must be an integer value greater than zero (>0)"
raise SqlmapSyntaxException(errMsg)
if conf.level is not None and not (isinstance(conf.level, int) and conf.level >= 1 and conf.level <= 5):
errMsg = "value for option '--level' must be an integer value from range [1, 5]"
raise SqlmapSyntaxException(errMsg)
if conf.risk is not None and not (isinstance(conf.risk, int) and conf.risk >= 1 and conf.risk <= 3):
errMsg = "value for option '--risk' must be an integer value from range [1, 3]"
raise SqlmapSyntaxException(errMsg)
if isinstance(conf.limitStart, int) and conf.limitStart > 0 and \
isinstance(conf.limitStop, int) and conf.limitStop < conf.limitStart:
errMsg = "value for option '--start' (limitStart) must be smaller or equal than value for --stop (limitStop) option"
raise SqlmapSyntaxException(errMsg)
if isinstance(conf.firstChar, int) and conf.firstChar > 0 and \
isinstance(conf.lastChar, int) and conf.lastChar < conf.firstChar:
errMsg = "value for option '--first' (firstChar) must be smaller than or equal to value for --last (lastChar) option"
raise SqlmapSyntaxException(errMsg)
if conf.textOnly and conf.nullConnection:
errMsg = "switch '--text-only' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.direct and conf.url:
errMsg = "option '-d' is incompatible with option '-u' ('--url')"
raise SqlmapSyntaxException(errMsg)
if conf.identifyWaf and conf.skipWaf:
errMsg = "switch '--identify-waf' is incompatible with switch '--skip-waf'"
raise SqlmapSyntaxException(errMsg)
if conf.titles and conf.nullConnection:
errMsg = "switch '--titles' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.dumpTable and conf.search:
errMsg = "switch '--dump' is incompatible with switch '--search'"
raise SqlmapSyntaxException(errMsg)
if conf.data and conf.nullConnection:
errMsg = "option '--data' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.string and conf.nullConnection:
errMsg = "option '--string' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.notString and conf.nullConnection:
errMsg = "option '--not-string' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.noCast and conf.hexConvert:
errMsg = "switch '--no-cast' is incompatible with switch '--hex'"
raise SqlmapSyntaxException(errMsg)
if conf.dumpAll and conf.search:
errMsg = "switch '--dump-all' is incompatible with switch '--search'"
raise SqlmapSyntaxException(errMsg)
if conf.string and conf.notString:
errMsg = "option '--string' is incompatible with switch '--not-string'"
raise SqlmapSyntaxException(errMsg)
if conf.regexp and conf.nullConnection:
errMsg = "option '--regexp' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.regexp:
try:
re.compile(conf.regexp)
except re.error, ex:
errMsg = "invalid regular expression '%s' ('%s')" % (conf.regexp, getSafeExString(ex))
raise SqlmapSyntaxException(errMsg)
if conf.crawlExclude:
try:
re.compile(conf.crawlExclude)
except re.error, ex:
errMsg = "invalid regular expression '%s' ('%s')" % (conf.crawlExclude, getSafeExString(ex))
raise SqlmapSyntaxException(errMsg)
if conf.dumpTable and conf.dumpAll:
errMsg = "switch '--dump' is incompatible with switch '--dump-all'"
raise SqlmapSyntaxException(errMsg)
if conf.predictOutput and (conf.threads > 1 or conf.optimize):
errMsg = "switch '--predict-output' is incompatible with option '--threads' and switch '-o'"
raise SqlmapSyntaxException(errMsg)
if conf.threads > MAX_NUMBER_OF_THREADS and not conf.get("skipThreadCheck"):
errMsg = "maximum number of used threads is %d avoiding potential connection issues" % MAX_NUMBER_OF_THREADS
raise SqlmapSyntaxException(errMsg)
if conf.forms and not any((conf.url, conf.googleDork, conf.bulkFile, conf.sitemapUrl)):
errMsg = "switch '--forms' requires usage of option '-u' ('--url'), '-g', '-m' or '-x'"
raise SqlmapSyntaxException(errMsg)
if conf.crawlExclude and not conf.crawlDepth:
errMsg = "option '--crawl-exclude' requires usage of switch '--crawl'"
raise SqlmapSyntaxException(errMsg)
if conf.safePost and not conf.safeUrl:
errMsg = "option '--safe-post' requires usage of option '--safe-url'"
raise SqlmapSyntaxException(errMsg)
if conf.safeFreq and not any((conf.safeUrl, conf.safeReqFile)):
errMsg = "option '--safe-freq' requires usage of option '--safe-url' or '--safe-req'"
raise SqlmapSyntaxException(errMsg)
if conf.safeReqFile and any((conf.safeUrl, conf.safePost)):
errMsg = "option '--safe-req' is incompatible with option '--safe-url' and option '--safe-post'"
raise SqlmapSyntaxException(errMsg)
if conf.csrfUrl and not conf.csrfToken:
errMsg = "option '--csrf-url' requires usage of option '--csrf-token'"
raise SqlmapSyntaxException(errMsg)
if conf.csrfToken and conf.threads > 1:
errMsg = "option '--csrf-url' is incompatible with option '--threads'"
raise SqlmapSyntaxException(errMsg)
if conf.requestFile and conf.url and conf.url != DUMMY_URL:
errMsg = "option '-r' is incompatible with option '-u' ('--url')"
raise SqlmapSyntaxException(errMsg)
if conf.direct and conf.proxy:
errMsg = "option '-d' is incompatible with option '--proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.direct and conf.tor:
errMsg = "option '-d' is incompatible with switch '--tor'"
raise SqlmapSyntaxException(errMsg)
if not conf.tech:
errMsg = "option '--technique' can't be empty"
raise SqlmapSyntaxException(errMsg)
if conf.tor and conf.ignoreProxy:
errMsg = "switch '--tor' is incompatible with switch '--ignore-proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.tor and conf.proxy:
errMsg = "switch '--tor' is incompatible with option '--proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.proxy and conf.proxyFile:
errMsg = "switch '--proxy' is incompatible with option '--proxy-file'"
raise SqlmapSyntaxException(errMsg)
if conf.checkTor and not any((conf.tor, conf.proxy)):
errMsg = "switch '--check-tor' requires usage of switch '--tor' (or option '--proxy' with HTTP proxy address using Tor)"
raise SqlmapSyntaxException(errMsg)
if conf.torPort is not None and not (isinstance(conf.torPort, int) and conf.torPort >= 0 and conf.torPort <= 65535):
errMsg = "value for option '--tor-port' must be in range 0-65535"
raise SqlmapSyntaxException(errMsg)
if conf.torType not in getPublicTypeMembers(PROXY_TYPE, True):
errMsg = "option '--tor-type' accepts one of following values: %s" % ", ".join(getPublicTypeMembers(PROXY_TYPE, True))
raise SqlmapSyntaxException(errMsg)
if conf.dumpFormat not in getPublicTypeMembers(DUMP_FORMAT, True):
errMsg = "option '--dump-format' accepts one of following values: %s" % ", ".join(getPublicTypeMembers(DUMP_FORMAT, True))
raise SqlmapSyntaxException(errMsg)
if conf.skip and conf.testParameter:
errMsg = "option '--skip' is incompatible with option '-p'"
raise SqlmapSyntaxException(errMsg)
if conf.mobile and conf.agent:
errMsg = "switch '--mobile' is incompatible with option '--user-agent'"
raise SqlmapSyntaxException(errMsg)
if conf.proxy and conf.ignoreProxy:
errMsg = "option '--proxy' is incompatible with switch '--ignore-proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.timeSec < 1:
errMsg = "value for option '--time-sec' must be a positive integer"
raise SqlmapSyntaxException(errMsg)
if conf.uChar and not re.match(UNION_CHAR_REGEX, conf.uChar):
errMsg = "value for option '--union-char' must be an alpha-numeric value (e.g. 1)"
raise SqlmapSyntaxException(errMsg)
if isinstance(conf.uCols, basestring):
if not conf.uCols.isdigit() and ("-" not in conf.uCols or len(conf.uCols.split("-")) != 2):
errMsg = "value for option '--union-cols' must be a range with hyphon "
errMsg += "(e.g. 1-10) or integer value (e.g. 5)"
raise SqlmapSyntaxException(errMsg)
if conf.dbmsCred and ':' not in conf.dbmsCred:
errMsg = "value for option '--dbms-cred' must be in "
errMsg += "format <username>:<password> (e.g. \"root:pass\")"
raise SqlmapSyntaxException(errMsg)
if conf.charset:
_ = checkCharEncoding(conf.charset, False)
if _ is None:
errMsg = "unknown charset '%s'. Please visit " % conf.charset
errMsg += "'%s' to get the full list of " % CODECS_LIST_PAGE
errMsg += "supported charsets"
raise SqlmapSyntaxException(errMsg)
else:
conf.charset = _
if conf.loadCookies:
if not os.path.exists(conf.loadCookies):
errMsg = "cookies file '%s' does not exist" % conf.loadCookies
raise SqlmapFilePathException(errMsg)
def _resolveCrossReferences():
lib.core.threads.readInput = readInput
lib.core.common.getPageTemplate = getPageTemplate
lib.core.convert.singleTimeWarnMessage = singleTimeWarnMessage
lib.request.connect.setHTTPHandlers = _setHTTPHandlers
lib.utils.search.setHTTPHandlers = _setHTTPHandlers
lib.controller.checks.setVerbosity = setVerbosity
lib.controller.checks.setWafFunctions = _setWafFunctions
def initOptions(inputOptions=AttribDict(), overrideOptions=False):
_setConfAttributes()
_setKnowledgeBaseAttributes()
_mergeOptions(inputOptions, overrideOptions)
def init():
"""
Set attributes into both configuration and knowledge base singletons
based upon command line and configuration file options.
"""
_useWizardInterface()
setVerbosity()
_saveConfig()
_setRequestFromFile()
_cleanupOptions()
_dirtyPatches()
_purgeOutput()
_checkDependencies()
_createTemporaryDirectory()
_basicOptionValidation()
_setProxyList()
_setTorProxySettings()
_setDNSServer()
_adjustLoggingFormatter()
_setMultipleTargets()
_setTamperingFunctions()
_setWafFunctions()
_setTrafficOutputFP()
_resolveCrossReferences()
_checkWebSocket()
parseTargetUrl()
parseTargetDirect()
if any((conf.url, conf.logFile, conf.bulkFile, conf.sitemapUrl, conf.requestFile, conf.googleDork, conf.liveTest)):
_setHTTPTimeout()
_setHTTPExtraHeaders()
_setHTTPCookies()
_setHTTPReferer()
_setHTTPHost()
_setHTTPUserAgent()
_setHTTPAuthentication()
_setHTTPHandlers()
_setDNSCache()
_setSocketPreConnect()
_setSafeVisit()
_doSearch()
_setBulkMultipleTargets()
_setSitemapTargets()
_checkTor()
_setCrawler()
_findPageForms()
_setDBMS()
_setTechnique()
_setThreads()
_setOS()
_setWriteFile()
_setMetasploit()
_setDBMSAuthentication()
loadBoundaries()
loadPayloads()
_setPrefixSuffix()
update()
_loadQueries()
|
undefinedv/Jingubang
|
sqlmap/lib/core/option.py
|
Python
|
gpl-3.0
| 93,494
|
[
"VisIt"
] |
87a92e9d579a176a04e317de9ee5dae0bcefde5a10657dc7bb3e649c47a3018f
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import warnings
import math
import numpy
from . import _ni_support
from . import _nd_image
from . import _ni_docstrings
from scipy.misc import doccer
from scipy._lib._version import NumpyVersion
__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
'prewitt', 'sobel', 'generic_laplace', 'laplace',
'gaussian_laplace', 'generic_gradient_magnitude',
'gaussian_gradient_magnitude', 'correlate', 'convolve',
'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
'maximum_filter1d', 'minimum_filter', 'maximum_filter',
'rank_filter', 'median_filter', 'percentile_filter',
'generic_filter1d', 'generic_filter']
@_ni_docstrings.docfiller
def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional correlation along the given axis.
The lines of the array along the given axis are correlated with the
given weights.
Parameters
----------
%(input)s
weights : array
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Examples
--------
>>> from scipy.ndimage import correlate1d
>>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
array([ 8, 26, 8, 12, 7, 28, 36, 9])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
weights = numpy.asarray(weights, dtype=numpy.float64)
if weights.ndim != 1 or weights.shape[0] < 1:
raise RuntimeError('no filter weights given')
if not weights.flags.contiguous:
weights = weights.copy()
axis = _ni_support._check_axis(axis, input.ndim)
if (len(weights) // 2 + origin < 0) or (len(weights) // 2 +
origin > len(weights)):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate1d(input, weights, axis, output, mode, cval,
origin)
return output
@_ni_docstrings.docfiller
def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional convolution along the given axis.
The lines of the array along the given axis are convolved with the
given weights.
Parameters
----------
%(input)s
weights : ndarray
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
convolve1d : ndarray
Convolved array with same shape as input
Examples
--------
>>> from scipy.ndimage import convolve1d
>>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
array([14, 24, 4, 13, 12, 36, 27, 0])
"""
weights = weights[::-1]
origin = -origin
if not len(weights) & 1:
origin -= 1
return correlate1d(input, weights, axis, output, mode, cval, origin)
def _gaussian_kernel1d(sigma, order, radius):
"""
Computes a 1D Gaussian convolution kernel.
"""
if order < 0:
raise ValueError('order must be non-negative')
p = numpy.polynomial.Polynomial([0, 0, -0.5 / (sigma * sigma)])
x = numpy.arange(-radius, radius + 1)
phi_x = numpy.exp(p(x), dtype=numpy.double)
phi_x /= phi_x.sum()
if order > 0:
q = numpy.polynomial.Polynomial([1])
p_deriv = p.deriv()
for _ in range(order):
# f(x) = q(x) * phi(x) = q(x) * exp(p(x))
# f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
q = q.deriv() + q * p_deriv
phi_x *= q(x)
return phi_x
@_ni_docstrings.docfiller
def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""One-dimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
standard deviation for Gaussian kernel
%(axis)s
order : int, optional
An order of 0 corresponds to convolution with a Gaussian
kernel. A positive order corresponds to convolution with
that derivative of a Gaussian.
%(output)s
%(mode)s
%(cval)s
truncate : float, optional
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter1d : ndarray
Examples
--------
>>> from scipy.ndimage import gaussian_filter1d
>>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1)
array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905])
>>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4)
array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657])
>>> import matplotlib.pyplot as plt
>>> np.random.seed(280490)
>>> x = np.random.randn(101).cumsum()
>>> y3 = gaussian_filter1d(x, 3)
>>> y6 = gaussian_filter1d(x, 6)
>>> plt.plot(x, 'k', label='original data')
>>> plt.plot(y3, '--', label='filtered, sigma=3')
>>> plt.plot(y6, ':', label='filtered, sigma=6')
>>> plt.legend()
>>> plt.grid()
>>> plt.show()
"""
sd = float(sigma)
# make the radius of the filter equal to truncate standard deviations
lw = int(truncate * sd + 0.5)
# Since we are calling correlate, not convolve, revert the kernel
weights = _gaussian_kernel1d(sigma, order, lw)[::-1]
return correlate1d(input, weights, axis, output, mode, cval, 0)
@_ni_docstrings.docfiller
def gaussian_filter(input, sigma, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""Multidimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
Standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
order : int or sequence of ints, optional
The order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. A positive order
corresponds to convolution with that derivative of a Gaussian.
%(output)s
%(mode_multiple)s
%(cval)s
truncate : float
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
The multidimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
Examples
--------
>>> from scipy.ndimage import gaussian_filter
>>> a = np.arange(50, step=2).reshape((5,5))
>>> a
array([[ 0, 2, 4, 6, 8],
[10, 12, 14, 16, 18],
[20, 22, 24, 26, 28],
[30, 32, 34, 36, 38],
[40, 42, 44, 46, 48]])
>>> gaussian_filter(a, sigma=1)
array([[ 4, 6, 8, 9, 11],
[10, 12, 14, 15, 17],
[20, 22, 24, 25, 27],
[29, 31, 33, 34, 36],
[35, 37, 39, 40, 42]])
>>> from scipy import misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = gaussian_filter(ascent, sigma=5)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
orders = _ni_support._normalize_sequence(order, input.ndim)
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii])
for ii in range(len(axes)) if sigmas[ii] > 1e-15]
if len(axes) > 0:
for axis, sigma, order, mode in axes:
gaussian_filter1d(input, sigma, axis, order, output,
mode, cval, truncate)
input = output
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Prewitt filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.prewitt(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output = _ni_support._get_output(output, input)
modes = _ni_support._normalize_sequence(mode, input.ndim)
correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,)
return output
@_ni_docstrings.docfiller
def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Sobel filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.sobel(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output = _ni_support._get_output(output, input)
modes = _ni_support._normalize_sequence(mode, input.ndim)
correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0)
return output
@_ni_docstrings.docfiller
def generic_laplace(input, derivative2, output=None, mode="reflect",
cval=0.0,
extra_arguments=(),
extra_keywords=None):
"""
N-dimensional Laplace filter using a provided second derivative function.
Parameters
----------
%(input)s
derivative2 : callable
Callable with the following signature::
derivative2(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
%(output)s
%(mode_multiple)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
modes = _ni_support._normalize_sequence(mode, len(axes))
derivative2(input, axes[0], output, modes[0], cval,
*extra_arguments, **extra_keywords)
for ii in range(1, len(axes)):
tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval,
*extra_arguments, **extra_keywords)
output += tmp
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def laplace(input, output=None, mode="reflect", cval=0.0):
"""N-dimensional Laplace filter based on approximate second derivatives.
Parameters
----------
%(input)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.laplace(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
def derivative2(input, axis, output, mode, cval):
return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
return generic_laplace(input, derivative2, output, mode, cval)
@_ni_docstrings.docfiller
def gaussian_laplace(input, sigma, output=None, mode="reflect",
cval=0.0, **kwargs):
"""Multidimensional Laplace filter using gaussian second derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes.
%(output)s
%(mode_multiple)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> result = ndimage.gaussian_laplace(ascent, sigma=1)
>>> ax1.imshow(result)
>>> result = ndimage.gaussian_laplace(ascent, sigma=3)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
def derivative2(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 2
return gaussian_filter(input, sigma, order, output, mode, cval,
**kwargs)
return generic_laplace(input, derivative2, output, mode, cval,
extra_arguments=(sigma,),
extra_keywords=kwargs)
@_ni_docstrings.docfiller
def generic_gradient_magnitude(input, derivative, output=None,
mode="reflect", cval=0.0,
extra_arguments=(), extra_keywords=None):
"""Gradient magnitude using a provided gradient function.
Parameters
----------
%(input)s
derivative : callable
Callable with the following signature::
derivative(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
`derivative` can assume that `input` and `output` are ndarrays.
Note that the output from `derivative` is modified inplace;
be careful to copy important inputs before returning them.
%(output)s
%(mode_multiple)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
modes = _ni_support._normalize_sequence(mode, len(axes))
derivative(input, axes[0], output, modes[0], cval,
*extra_arguments, **extra_keywords)
numpy.multiply(output, output, output)
for ii in range(1, len(axes)):
tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval,
*extra_arguments, **extra_keywords)
numpy.multiply(tmp, tmp, tmp)
output += tmp
# This allows the sqrt to work with a different default casting
numpy.sqrt(output, output, casting='unsafe')
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def gaussian_gradient_magnitude(input, sigma, output=None,
mode="reflect", cval=0.0, **kwargs):
"""Multidimensional gradient magnitude using Gaussian derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes..
%(output)s
%(mode_multiple)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
Returns
-------
gaussian_gradient_magnitude : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
def derivative(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 1
return gaussian_filter(input, sigma, order, output, mode,
cval, **kwargs)
return generic_gradient_magnitude(input, derivative, output, mode,
cval, extra_arguments=(sigma,),
extra_keywords=kwargs)
def _correlate_or_convolve(input, weights, output, mode, cval, origin,
convolution):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
weights = numpy.asarray(weights, dtype=numpy.float64)
wshape = [ii for ii in weights.shape if ii > 0]
if len(wshape) != input.ndim:
raise RuntimeError('filter weights array has incorrect shape.')
if convolution:
weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
for ii in range(len(origins)):
origins[ii] = -origins[ii]
if not weights.shape[ii] & 1:
origins[ii] -= 1
for origin, lenw in zip(origins, wshape):
if (lenw // 2 + origin < 0) or (lenw // 2 + origin > lenw):
raise ValueError('invalid origin')
if not weights.flags.contiguous:
weights = weights.copy()
output = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate(input, weights, output, mode, cval, origins)
return output
@_ni_docstrings.docfiller
def correlate(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multi-dimensional correlation.
The array is correlated with the given kernel.
Parameters
----------
%(input)s
weights : ndarray
array of weights, same number of dimensions as input
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
See Also
--------
convolve : Convolve an image with a kernel.
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, False)
@_ni_docstrings.docfiller
def convolve(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multidimensional convolution.
The array is convolved with the given kernel.
Parameters
----------
%(input)s
weights : array_like
Array of weights, same number of dimensions as input
%(output)s
%(mode_multiple)s
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
%(origin_multiple)s
Returns
-------
result : ndarray
The result of convolution of `input` with `weights`.
See Also
--------
correlate : Correlate an image with a kernel.
Notes
-----
Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where
W is the `weights` kernel,
j is the n-D spatial index over :math:`W`,
I is the `input` and k is the coordinate of the center of
W, specified by `origin` in the input parameters.
Examples
--------
Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
because in this case borders (i.e. where the `weights` kernel, centered
on any one value, extends beyond an edge of `input`.
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
>>> from scipy import ndimage
>>> ndimage.convolve(a, k, mode='constant', cval=0.0)
array([[11, 10, 7, 4],
[10, 3, 11, 11],
[15, 12, 14, 7],
[12, 3, 7, 0]])
Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
with 1.0's (and then extracting only the original region of the result).
>>> ndimage.convolve(a, k, mode='constant', cval=1.0)
array([[13, 11, 8, 7],
[11, 3, 11, 14],
[16, 12, 14, 10],
[15, 6, 10, 5]])
With ``mode='reflect'`` (the default), outer values are reflected at the
edge of `input` to fill in missing values.
>>> b = np.array([[2, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0,1,0], [0,1,0], [0,1,0]])
>>> ndimage.convolve(b, k, mode='reflect')
array([[5, 0, 0],
[3, 0, 0],
[1, 0, 0]])
This includes diagonally at the corners.
>>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
>>> ndimage.convolve(b, k)
array([[4, 2, 0],
[3, 2, 0],
[1, 1, 0]])
With ``mode='nearest'``, the single nearest value in to an edge in
`input` is repeated as many times as needed to match the overlapping
`weights`.
>>> c = np.array([[2, 0, 1],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0]])
>>> ndimage.convolve(c, k, mode='nearest')
array([[7, 0, 3],
[5, 0, 2],
[3, 0, 1]])
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, True)
@_ni_docstrings.docfiller
def uniform_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional uniform filter along the given axis.
The lines of the array along the given axis are filtered with a
uniform filter of given size.
Parameters
----------
%(input)s
size : int
length of uniform filter
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Examples
--------
>>> from scipy.ndimage import uniform_filter1d
>>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([4, 3, 4, 1, 4, 6, 6, 3])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
origin)
return output
@_ni_docstrings.docfiller
def uniform_filter(input, size=3, output=None, mode="reflect",
cval=0.0, origin=0):
"""Multi-dimensional uniform filter.
Parameters
----------
%(input)s
size : int or sequence of ints, optional
The sizes of the uniform filter are given for each axis as a
sequence, or as a single number, in which case the size is
equal for all axes.
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
uniform_filter : ndarray
Filtered array. Has the same shape as `input`.
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional uniform filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.uniform_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
sizes = _ni_support._normalize_sequence(size, input.ndim)
origins = _ni_support._normalize_sequence(origin, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if len(axes) > 0:
for axis, size, origin, mode in axes:
uniform_filter1d(input, int(size), axis, output, mode,
cval, origin)
input = output
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def minimum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional minimum filter along the given axis.
The lines of the array along the given axis are filtered with a
minimum filter of given size.
Parameters
----------
%(input)s
size : int
length along which to calculate 1D minimum
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Notes
-----
This function implements the MINLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
Examples
--------
>>> from scipy.ndimage import minimum_filter1d
>>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([2, 0, 0, 0, 1, 1, 0, 0])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 1)
return output
@_ni_docstrings.docfiller
def maximum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional maximum filter along the given axis.
The lines of the array along the given axis are filtered with a
maximum filter of given size.
Parameters
----------
%(input)s
size : int
Length along which to calculate the 1-D maximum.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
maximum1d : ndarray, None
Maximum-filtered array with same shape as input.
None if `output` is not None
Notes
-----
This function implements the MAXLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
Examples
--------
>>> from scipy.ndimage import maximum_filter1d
>>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([8, 8, 8, 4, 9, 9, 9, 9])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 0)
return output
def _min_or_max_filter(input, size, footprint, structure, output, mode,
cval, origin, minimum):
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3)
if structure is None:
if footprint is None:
if size is None:
raise RuntimeError("no footprint provided")
separable = True
else:
footprint = numpy.asarray(footprint, dtype=bool)
if not footprint.any():
raise ValueError("All-zero footprint is not supported.")
if footprint.all():
size = footprint.shape
footprint = None
separable = True
else:
separable = False
else:
structure = numpy.asarray(structure, dtype=numpy.float64)
separable = False
if footprint is None:
footprint = numpy.ones(structure.shape, bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
origins = _ni_support._normalize_sequence(origin, input.ndim)
if separable:
sizes = _ni_support._normalize_sequence(size, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if minimum:
filter_ = minimum_filter1d
else:
filter_ = maximum_filter1d
if len(axes) > 0:
for axis, size, origin, mode in axes:
filter_(input, int(size), axis, output, mode, cval, origin)
input = output
else:
output[...] = input[...]
else:
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
if structure is not None:
if len(structure.shape) != input.ndim:
raise RuntimeError('structure array has incorrect shape')
if not structure.flags.contiguous:
structure = structure.copy()
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter(input, footprint, structure, output,
mode, cval, origins, minimum)
return output
@_ni_docstrings.docfiller
def minimum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multi-dimensional minimum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
minimum_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.minimum_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 1)
@_ni_docstrings.docfiller
def maximum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multi-dimensional maximum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
maximum_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.maximum_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 0)
@_ni_docstrings.docfiller
def _rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0, operation='rank'):
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
filter_size = numpy.where(footprint, 1, 0).sum()
if operation == 'median':
rank = filter_size // 2
elif operation == 'percentile':
percentile = rank
if percentile < 0.0:
percentile += 100.0
if percentile < 0 or percentile > 100:
raise RuntimeError('invalid percentile')
if percentile == 100.0:
rank = filter_size - 1
else:
rank = int(float(filter_size) * percentile / 100.0)
if rank < 0:
rank += filter_size
if rank < 0 or rank >= filter_size:
raise RuntimeError('rank not within filter footprint size')
if rank == 0:
return minimum_filter(input, None, footprint, output, mode, cval,
origins)
elif rank == filter_size - 1:
return maximum_filter(input, None, footprint, output, mode, cval,
origins)
else:
output = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.rank_filter(input, rank, footprint, output, mode, cval,
origins)
return output
@_ni_docstrings.docfiller
def rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multi-dimensional rank filter.
Parameters
----------
%(input)s
rank : int
The rank parameter may be less then zero, i.e., rank = -1
indicates the largest element.
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
rank_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.rank_filter(ascent, rank=42, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, rank, size, footprint, output, mode, cval,
origin, 'rank')
@_ni_docstrings.docfiller
def median_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""
Calculate a multidimensional median filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
median_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.median_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, 0, size, footprint, output, mode, cval,
origin, 'median')
@_ni_docstrings.docfiller
def percentile_filter(input, percentile, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""Calculate a multi-dimensional percentile filter.
Parameters
----------
%(input)s
percentile : scalar
The percentile parameter may be less then zero, i.e.,
percentile = -20 equals percentile = 80
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
percentile_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.percentile_filter(ascent, percentile=20, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, percentile, size, footprint, output, mode,
cval, origin, 'percentile')
@_ni_docstrings.docfiller
def generic_filter1d(input, function, filter_size, axis=-1,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords=None):
"""Calculate a one-dimensional filter along the given axis.
`generic_filter1d` iterates over the lines of the array, calling the
given function at each line. The arguments of the line are the
input line, and the output line. The input and output lines are 1D
double arrays. The input line is extended appropriately according
to the filter size and origin. The output line must be modified
in-place with the result.
Parameters
----------
%(input)s
function : {callable, scipy.LowLevelCallable}
Function to apply along given axis.
filter_size : scalar
Length of the filter.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
Notes
-----
This function also accepts low-level callback functions with one of
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int function(double *input_line, npy_intp input_length,
double *output_line, npy_intp output_length,
void *user_data)
int function(double *input_line, intptr_t input_length,
double *output_line, intptr_t output_length,
void *user_data)
The calling function iterates over the lines of the input and output
arrays, calling the callback function at each line. The current line
is extended according to the border conditions set by the calling
function, and the result is copied into the array that is passed
through ``input_line``. The length of the input line (after extension)
is passed through ``input_length``. The callback function should apply
the filter and store the result in the array passed through
``output_line``. The length of the output line is passed through
``output_length``. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
if filter_size < 1:
raise RuntimeError('invalid filter size')
axis = _ni_support._check_axis(axis, input.ndim)
if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >=
filter_size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter1d(input, function, filter_size, axis, output,
mode, cval, origin, extra_arguments,
extra_keywords)
return output
@_ni_docstrings.docfiller
def generic_filter(input, function, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords=None):
"""Calculate a multi-dimensional filter using the given function.
At each element the provided function is called. The input values
within the filter footprint at that element are passed to the function
as a 1D array of double values.
Parameters
----------
%(input)s
function : {callable, scipy.LowLevelCallable}
Function to apply at each element.
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
%(extra_arguments)s
%(extra_keywords)s
Notes
-----
This function also accepts low-level callback functions with one of
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int callback(double *buffer, npy_intp filter_size,
double *return_value, void *user_data)
int callback(double *buffer, intptr_t filter_size,
double *return_value, void *user_data)
The calling function iterates over the elements of the input and
output arrays, calling the callback function at each element. The
elements within the footprint of the filter at the current element are
passed through the ``buffer`` parameter, and the number of elements
within the footprint through ``filter_size``. The calculated value is
returned in ``return_value``. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
"""
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2)
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
output = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter(input, function, footprint, output, mode,
cval, origins, extra_arguments, extra_keywords)
return output
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/scipy/ndimage/filters.py
|
Python
|
gpl-3.0
| 49,136
|
[
"Gaussian"
] |
8dd660ea6dabc7a927bc3fb28aabb5444b9af45437ec805539a0f2a891cd7b28
|
########################################################################
# $HeadURL$
# Author : Andrei Tsaregorodtsev
########################################################################
"""
Utilities for managing DIRAC configuration:
getCEsFromCS
getUnusedGridCEs
getUnusedGridSEs
getSiteUpdates
getSEUpdates
"""
__RCSID__ = "$Id$"
import re
import types
from urlparse import urlparse
from DIRAC import gConfig, gLogger, S_OK
from DIRAC.Core.Utilities import List
from DIRAC.Core.Utilities.Grid import getBdiiCEInfo, getBdiiSEInfo, ldapService
from DIRAC.Core.Utilities.SitesDIRACGOCDBmapping import getDIRACSiteName, getDIRACSesForSRM
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOs, getVOOption
def getGridVOs():
""" Get all the VOMS VO names served by this DIRAC service
"""
voNames = []
result = getVOs()
if not result['OK']:
return result
else:
vos = result['Value']
for vo in vos:
vomsVO = getVOOption( vo, "VOMSName" )
if vomsVO:
voNames.append( vomsVO )
return S_OK( voNames )
def getCEsFromCS():
""" Get all the CEs defined in the CS
"""
knownCEs = []
result = gConfig.getSections( '/Resources/Sites' )
if not result['OK']:
return result
grids = result['Value']
for grid in grids:
result = gConfig.getSections( '/Resources/Sites/%s' % grid )
if not result['OK']:
return result
sites = result['Value']
for site in sites:
opt = gConfig.getOptionsDict( '/Resources/Sites/%s/%s' % ( grid, site ) )['Value']
ces = List.fromChar( opt.get( 'CE', '' ) )
knownCEs += ces
return S_OK( knownCEs )
def getSEsFromCS( protocol = 'srm' ):
""" Get all the SEs defined in the CS
"""
knownSEs = {}
result = gConfig.getSections( '/Resources/StorageElements' )
if not result['OK']:
return result
ses = result['Value']
for se in ses:
seSection = '/Resources/StorageElements/%s' % se
result = gConfig.getSections( seSection )
if not result['OK']:
continue
accesses = result['Value']
for access in accesses:
seProtocol = gConfig.getValue( cfgPath( seSection, access, 'Protocol' ), '' )
if seProtocol.lower() == protocol.lower() or protocol == 'any':
host = gConfig.getValue( cfgPath( seSection, access, 'Host' ), '' )
knownSEs.setdefault( host, [] )
knownSEs[host].append( se )
else:
continue
return S_OK( knownSEs )
def getGridCEs( vo, bdiiInfo = None, ceBlackList = None ):
""" Get all the CEs available for a given VO and having queues in Production state
"""
knownCEs = set()
if ceBlackList is not None:
knownCEs = knownCEs.union( set( ceBlackList ) )
ceBdiiDict = bdiiInfo
if bdiiInfo is None:
result = getBdiiCEInfo( vo )
if not result['OK']:
return result
ceBdiiDict = result['Value']
siteDict = {}
for site in ceBdiiDict:
siteCEs = set( ceBdiiDict[site]['CEs'].keys() )
newCEs = siteCEs - knownCEs
if not newCEs:
continue
ceFullDict = {}
for ce in newCEs:
ceDict = {}
ceInfo = ceBdiiDict[site]['CEs'][ce]
ceType = 'Unknown'
ceDict['Queues'] = []
for queue in ceInfo['Queues']:
queueStatus = ceInfo['Queues'][queue].get( 'GlueCEStateStatus', 'UnknownStatus' )
if 'production' in queueStatus.lower():
ceType = ceInfo['Queues'][queue].get( 'GlueCEImplementationName', '' )
ceDict['Queues'].append( queue )
if not ceDict['Queues']:
continue
ceDict['CEType'] = ceType
ceDict['GOCSite'] = site
ceDict['CEID'] = ce
systemName = ceInfo.get( 'GlueHostOperatingSystemName', 'Unknown' )
systemVersion = ceInfo.get( 'GlueHostOperatingSystemVersion', 'Unknown' )
systemRelease = ceInfo.get( 'GlueHostOperatingSystemRelease', 'Unknown' )
ceDict['System'] = ( systemName, systemVersion, systemRelease )
ceFullDict[ce] = ceDict
siteDict[site] = ceFullDict
result = S_OK( siteDict )
result['BdiiInfo'] = ceBdiiDict
return result
def getSiteUpdates( vo, bdiiInfo = None, log = None ):
""" Get all the necessary updates for the already defined sites and CEs
"""
def addToChangeSet( entry, changeSet ):
_section, _option, value, new_value = entry
if new_value and new_value != value:
changeSet.add( entry )
if log is None:
log = gLogger
ceBdiiDict = bdiiInfo
if bdiiInfo is None:
result = getBdiiCEInfo( vo )
if not result['OK']:
return result
ceBdiiDict = result['Value']
changeSet = set()
gConfig.forceRefresh()
for site in ceBdiiDict:
result = getDIRACSiteName( site )
if not result['OK']:
continue
siteNames = result['Value']
for siteName in siteNames:
siteSection = cfgPath( '/Resources', 'Sites', siteName.split('.')[0], siteName )
result = gConfig.getOptionsDict( siteSection )
if not result['OK']:
continue
siteDict = result['Value']
# Current CS values
coor = siteDict.get( 'Coordinates', 'Unknown' )
mail = siteDict.get( 'Mail', 'Unknown' ).replace( ' ','' )
description = siteDict.get( 'Description', 'Unknown' )
longitude = ceBdiiDict[site].get( 'GlueSiteLongitude', '' ).strip()
latitude = ceBdiiDict[site].get( 'GlueSiteLatitude', '' ).strip()
# Current BDII value
newcoor = ''
if longitude and latitude:
newcoor = "%s:%s" % ( longitude, latitude )
newmail = ceBdiiDict[site].get( 'GlueSiteSysAdminContact', '' ).replace( 'mailto:', '' ).strip()
newdescription = ceBdiiDict[site].get( 'GlueSiteDescription', '' ).strip()
# Adding site data to the changes list
addToChangeSet( ( siteSection, 'Coordinates', coor, newcoor ), changeSet )
addToChangeSet( ( siteSection, 'Mail', mail, newmail ), changeSet )
addToChangeSet( ( siteSection, 'Description', description, newdescription ), changeSet )
ces = gConfig.getValue( cfgPath( siteSection, 'CE' ), [] )
for ce in ces:
ceSection = cfgPath( siteSection, 'CEs', ce )
ceDict = {}
result = gConfig.getOptionsDict( ceSection )
if result['OK']:
ceDict = result['Value']
else:
if ceBdiiDict[site]['CEs'].get( ce, None ):
log.notice( "Adding new CE %s to site %s/%s" % (ce, siteName, site) )
ceInfo = ceBdiiDict[site]['CEs'].get( ce, None )
if ceInfo is None:
ceType = ceDict.get( 'CEType', '')
continue
# Current CS CE info
arch = ceDict.get( 'architecture', 'Unknown' )
OS = ceDict.get( 'OS', 'Unknown' )
si00 = ceDict.get( 'SI00', 'Unknown' )
ceType = ceDict.get( 'CEType', 'Unknown' )
ram = ceDict.get( 'HostRAM', 'Unknown' )
submissionMode = ceDict.get( 'SubmissionMode', 'Unknown' )
# Current BDII CE info
newarch = ceBdiiDict[site]['CEs'][ce].get( 'GlueHostArchitecturePlatformType', '' ).strip()
systemName = ceInfo.get( 'GlueHostOperatingSystemName', '' ).strip()
systemVersion = ceInfo.get( 'GlueHostOperatingSystemVersion', '' ).strip()
systemRelease = ceInfo.get( 'GlueHostOperatingSystemRelease', '' ).strip()
newOS = ''
if systemName and systemVersion and systemRelease:
newOS = '_'.join( ( systemName, systemVersion, systemRelease ) )
newsi00 = ceInfo.get( 'GlueHostBenchmarkSI00', '' ).strip()
newCEType = 'Unknown'
for queue in ceInfo['Queues']:
queueDict = ceInfo['Queues'][queue]
newCEType = queueDict.get( 'GlueCEImplementationName', '' ).strip()
if newCEType:
break
if newCEType=='ARC-CE':
newCEType = 'ARC'
if newCEType in ['ARC','CREAM']:
newSubmissionMode = "Direct"
newRAM = ceInfo.get( 'GlueHostMainMemoryRAMSize', '' ).strip()
# Adding CE data to the change list
addToChangeSet( ( ceSection, 'architecture', arch, newarch ), changeSet )
addToChangeSet( ( ceSection, 'OS', OS, newOS ), changeSet )
addToChangeSet( ( ceSection, 'SI00', si00, newsi00 ), changeSet )
addToChangeSet( ( ceSection, 'CEType', ceType, newCEType ), changeSet )
addToChangeSet( ( ceSection, 'MaxRAM', ram, newRAM ), changeSet )
if submissionMode == "Unknown":
addToChangeSet( ( ceSection, 'SubmissionMode', submissionMode, newSubmissionMode ), changeSet )
queues = ceInfo['Queues'].keys()
for queue in queues:
queueSection = cfgPath( ceSection, 'Queues', queue )
queueDict = {}
result = gConfig.getOptionsDict( queueSection )
if result['OK']:
queueDict = result['Value']
else:
log.notice( "Adding new queue %s to CE %s" % (queue, ce) )
queueInfo = ceInfo['Queues'][queue]
queueStatus = queueInfo['GlueCEStateStatus']
if queueStatus.lower() != "production":
continue
# Current CS queue info
maxCPUTime = queueDict.get( 'maxCPUTime', 'Unknown' )
si00 = queueDict.get( 'SI00', 'Unknown' )
maxTotalJobs = queueDict.get( 'MaxTotalJobs', 'Unknown' )
# Current BDII queue info
newMaxCPUTime = queueInfo.get( 'GlueCEPolicyMaxCPUTime', '' )
newSI00 = ''
caps = queueInfo['GlueCECapability']
if type( caps ) == type( '' ):
caps = [caps]
for cap in caps:
if 'CPUScalingReferenceSI00' in cap:
newSI00 = cap.split( '=' )[-1]
# Adding queue info to the CS
addToChangeSet( ( queueSection, 'maxCPUTime', maxCPUTime, newMaxCPUTime ), changeSet )
addToChangeSet( ( queueSection, 'SI00', si00, newSI00 ), changeSet )
if maxTotalJobs == "Unknown":
newTotalJobs = min( 1000, int( int( queueInfo.get( 'GlueCEInfoTotalCPUs', 0 ) )/2 ) )
newWaitingJobs = max( 2, int( newTotalJobs * 0.1 ) )
newTotalJobs = str( newTotalJobs )
newWaitingJobs = str( newWaitingJobs )
addToChangeSet( ( queueSection, 'MaxTotalJobs', '', newTotalJobs ), changeSet )
addToChangeSet( ( queueSection, 'MaxWaitingJobs', '', newWaitingJobs ), changeSet )
# Updating eligible VO list
VOs = set()
if queueDict.get( 'VO', '' ):
VOs = set( [ q.strip() for q in queueDict.get( 'VO', '' ).split( ',' ) if q ] )
if not vo in VOs:
VOs.add( vo )
VOs = list( VOs )
newVOs = ','.join( VOs )
addToChangeSet( ( queueSection, 'VO', '', newVOs ), changeSet )
return S_OK( changeSet )
def getGridSEs( vo, bdiiInfo = None, seBlackList = None ):
""" Get all the SEs available for a given VO
"""
seBdiiDict = bdiiInfo
if bdiiInfo is None:
result = getBdiiSEInfo( vo )
if not result['OK']:
return result
seBdiiDict = result['Value']
knownSEs = set()
if seBlackList is not None:
knownSEs = knownSEs.union( set( seBlackList ) )
siteDict = {}
for site in seBdiiDict:
for gridSE in seBdiiDict[site]['SEs']:
seDict = seBdiiDict[site]['SEs'][gridSE]
#if "lhcb" in seDict['GlueSAName']:
# print '+'*80
# print gridSE
# for k,v in seDict.items():
# print k,'\t',v
if not gridSE in knownSEs:
siteDict.setdefault( site, {} )
if type( seDict['GlueSAAccessControlBaseRule'] ) == types.ListType:
voList = [ re.sub( '^VO:', '', s ) for s in seDict['GlueSAAccessControlBaseRule'] ]
else:
voList = [ re.sub( '^VO:', '', seDict['GlueSAAccessControlBaseRule'] ) ]
siteDict[site][gridSE] = { 'GridSite': seDict['GlueSiteUniqueID'],
'BackendType': seDict['GlueSEImplementationName'],
'Description': seDict.get( 'GlueSEName', '-' ),
'VOs': voList
}
result = S_OK( siteDict )
result['BdiiInfo'] = seBdiiDict
return result
def getGridSRMs( vo, bdiiInfo = None, srmBlackList = None, unUsed = False ):
result = ldapService( serviceType = 'SRM', vo = vo )
if not result['OK']:
return result
srmBdiiDict = result['Value']
knownSRMs = set()
if srmBlackList is not None:
knownSRMs = knownSRMs.union( set( srmBlackList ) )
siteSRMDict = {}
for srm in srmBdiiDict:
endPoint = srm.get( 'GlueServiceEndpoint', '')
srmHost = ''
if endPoint:
srmHost = urlparse( endPoint ).hostname
if not srmHost:
continue
if srmHost in knownSRMs:
continue
if unUsed:
result = getDIRACSesForSRM( srmHost )
if not result['OK']:
return result
diracSEs = result['Value']
if diracSEs:
# If it is a known SRM and only new SRMs are requested, continue
continue
site = srm.get( 'GlueForeignKey', '' ).replace( 'GlueSiteUniqueID=', '' )
siteSRMDict.setdefault( site, {} )
siteSRMDict[site][srmHost] = srm
if bdiiInfo is None:
result = getBdiiSEInfo( vo )
if not result['OK']:
return result
seBdiiDict = result['Value']
else:
seBdiiDict = bdiiInfo
srmSeDict = {}
for site in siteSRMDict:
srms = siteSRMDict[site].keys()
for srm in srms:
if seBdiiDict.get( site, {} ).get( 'SEs', {} ).get( srm, {} ):
srmSeDict.setdefault( site, {} )
srmSeDict[site].setdefault( srm, {} )
srmSeDict[site][srm]['SRM'] = siteSRMDict[site][srm]
srmSeDict[site][srm]['SE'] = seBdiiDict[site]['SEs'][srm]
return S_OK( srmSeDict )
def getSRMUpdates( vo, bdiiInfo = None ):
changeSet = set()
def addToChangeSet( entry, changeSet ):
_section, _option, value, new_value = entry
if new_value and new_value != value:
changeSet.add( entry )
result = getGridSRMs( vo, bdiiInfo = bdiiInfo )
if not result['OK']:
return result
srmBdiiDict = result['Value']
result = getSEsFromCS()
if not result['OK']:
return result
seDict = result['Value']
result = getVOs()
if result['OK']:
csVOs = set( result['Value'] )
else:
csVOs = set( [vo] )
for seHost, diracSE in seDict.items():
seSection = '/Resources/StorageElements/%s' % diracSE[0]
# Look up existing values first
description = gConfig.getValue( cfgPath( seSection, 'Description'), 'Unknown' )
backend = gConfig.getValue( cfgPath( seSection, 'BackendType'), 'Unknown' )
vos = gConfig.getValue( cfgPath( seSection, 'VO'), 'Unknown' ).replace( ' ','' )
size = gConfig.getValue( cfgPath( seSection, 'TotalSize'), 'Unknown' )
# Look up current BDII values
srmDict = {}
seBdiiDict = {}
for site in srmBdiiDict:
if seHost in srmBdiiDict[site]:
srmDict = srmBdiiDict[site][seHost]['SRM']
seBdiiDict = srmBdiiDict[site][seHost]['SE']
break
if not srmDict or not seBdiiDict:
continue
newDescription = seBdiiDict.get( 'GlueSEName', 'Unknown' )
newBackend = seBdiiDict.get( 'GlueSEImplementationName', 'Unknown' )
newSize = seBdiiDict.get( 'GlueSESizeTotal', 'Unknown' )
addToChangeSet( ( seSection, 'Description', description, newDescription ), changeSet )
addToChangeSet( ( seSection, 'BackendType', backend, newBackend ), changeSet )
addToChangeSet( ( seSection, 'TotalSize', size, newSize ), changeSet )
# Evaluate VOs if no space token defined, otherwise this is VO specific
spaceToken = ''
for i in range( 1, 10 ):
protocol = gConfig.getValue( cfgPath( seSection, 'AccessProtocol.%d' % i, 'Protocol' ), '' )
if protocol.lower() == 'srm':
spaceToken = gConfig.getValue( cfgPath( seSection, 'AccessProtocol.%d' % i, 'SpaceToken' ), '' )
break
if not spaceToken:
bdiiVOs = srmDict.get( 'GlueServiceAccessControlBaseRule', [] )
bdiiVOs = set( [ re.sub( '^VO:', '', rule ) for rule in bdiiVOs ] )
seVOs = csVOs.intersection( bdiiVOs )
newVOs = ','.join( seVOs )
addToChangeSet( ( seSection, 'VO', vos, newVOs ), changeSet )
return S_OK( changeSet )
|
miloszz/DIRAC
|
ConfigurationSystem/Client/Utilities.py
|
Python
|
gpl-3.0
| 16,563
|
[
"DIRAC"
] |
6af2f11c57dd89cf849848411c198f9686c51d7c62e90b191b3dcff61f3ff12d
|
from . import chess
import copy
from . import common
sw, w, nw, n, ne, e, se, s = (-1, -1), (-1, 0), (-1, 1), (0, 1), \
(1, 1), (1, 0), (1, -1), (0, -1)
traversals = [
[sw, w, nw, n, ne, e, se, s], [sw, s, se, e, ne, n, nw, w],
[nw, n, ne, e, se, s, sw, w], [nw, w, sw, s, se, e, ne, n],
[ne, e, se, s, sw, w, nw, n], [ne, n, nw, w, sw, s, se, e],
[se, s, sw, w, nw, n, ne, e], [se, e, ne, n, nw, w, sw, s]
]
def add(square, dir):
x, y = chess.LookupTables.to_xy(square)
x, y = x + dir[0], y + dir[1]
if chess.LookupTables.oob(x, y):
return -1
return chess.LookupTables.from_xy(x, y)
def inc_id(id_map, key, next_id):
if key in id_map:
return id_map, id_map[key], next_id
id_map[key] = next_id
return id_map, next_id, next_id + 1
def analyze(board, mated):
board.interferers[board.kings[mated]] = 0
retval = _analyze(board, mated)
board.interferers[board.kings[mated]] = 1
return retval
def _analyze(board, mated):
retval = {
'ideal': False,
'model': False,
'pure': False,
'hash': 0,
'traversal': -1,
'mated': board.kings[mated],
'octet': False,
'pins': 0}
mating = [chess.WHITE, chess.BLACK][mated == chess.WHITE]
mated_king = 'kK'[mated == chess.WHITE]
participants, attackers = chess.BitBoard(), {}
king_area = chess.BitBoard(
chess.LUT.att[mated_king][
board.kings[mated]].v[0], chess.LUT.att[mated_king][
board.kings[mated]].v[1])
king_area[board.kings[mated]] = 1
for square in chess.SetBits(king_area):
attackers[square] = get_attackers(board, square, mating)
if len(attackers[square]) > 1:
return retval
if len(attackers[square]) == 1:
participants[attackers[square][0][1]] = 1
if square == board.kings[mated]:
continue
blocker = board.board[square][chess.NAME]
if (blocker == '') or chess.Board.is_of(blocker, mating):
continue
participants[square] = 1
# both blocked & once attacked => need to check pinning
if (len(attackers[square]) == 1):
if not chess.LUT.btw[
attackers[square][0][1]][
board.kings[mated]][square]:
return retval
# checking that mate exploits pinning - black must have legal moves
# if pinner is removed
# nice bug: bk is transparent!
board.interferers[board.kings[mated]] = 1
id = board.board[attackers[square][0][1]][chess.ID]
board.drop(attackers[square][0][1])
if not board.has_legal_moves(mated):
board.add(attackers[square][0][0], attackers[square][0][1], id)
board.interferers[board.kings[mated]] = 0
return retval
board.interferers[board.kings[mated]] = 0
board.add(attackers[square][0][0], attackers[square][0][1], id)
retval['pins'] = retval['pins'] + 1
retval['model'], retval['pure'] = True, True
for name, pos in chess.Pieces(board, mating):
if (not name in ['pk', 'PK'][mating == chess.WHITE]) and (
not participants[pos]):
retval['model'] = False
break
retval['ideal'] = retval['model'] and (participants == board.interferers)
# checking unit has id = 0
init_id_map, retval['hash'] = {}, 999999999999
if len(attackers[board.kings[mated]]) > 0:
init_id_map[attackers[board.kings[mated]][0][1]] = 0 # checking unit
# id of other participants depend on traversal
for i in range(len(traversals)):
# if i <> 2: continue
id_map, hash, pinmask, next_id = copy.deepcopy(init_id_map), 0, 0, 1
for j in range(len(traversals[i])):
participant_id = -1
square = add(board.kings[mated], traversals[i][j])
if -1 == square: # board edge
id_map, participant_id, next_id = inc_id(id_map, -1, next_id)
elif board.board[square][chess.NAME] != '' and \
chess.Board.is_of(board.board[square][chess.NAME], mated): # blocked
# blockers have same id as board edge
id_map, participant_id, next_id = inc_id(id_map, -1, next_id)
if len(attackers[square]) > 0: # additionally pinned
pinmask = pinmask | (1 << j)
else: # the square is once attacked
id_map, participant_id, next_id = inc_id(
id_map, attackers[square][0][1], next_id)
hash = (hash << 3) + participant_id
hash = hash | (pinmask << 24)
if int(hash) < int(retval['hash']):
retval['hash'] = hash
retval['traversal'] = i
if retval['hash'] == 2739136:
retval['hash'] = 0
return retval
def get_attackers(board, square, color):
retval = []
for name, pos in chess.Pieces(board, color):
if chess.LUT.att[name][pos][square] and \
(chess.LUT.btw[pos][square] & board.interferers).is_zero():
retval.append((name, pos))
return retval
def print_bin(i):
s = ''
while i != 0:
s = str(1 & i) + s
i = i >> 1
print(s)
class FinalesVisitor:
def __init__(self):
self.by_hash = {}
self.different = {}
def visit(self, node, board, side_on_move):
if not isinstance(node, chess.MoveNode):
return
if isinstance(node.move, chess.NullMove):
return
if not (node.move.is_mate or node.move.is_stalemate):
return
info = analyze(board, side_on_move)
if not info['pure']:
return
info['stale'] = node.move.is_stalemate
if info['hash'] in self.by_hash:
self.by_hash[info['hash']].append(info)
else:
self.by_hash[info['hash']] = [info]
extended_key = str(info['hash']) + \
str(info['mated']) + str(info['traversal'])
if extended_key in self.different:
self.different[extended_key].append(info)
else:
self.different[extended_key] = [info]
def provides():
return [
'Model mates',
'Model stalemates',
'Ideal mates',
'Ideal satelemates',
'Echo',
'Chameleon echo',
'Octet',
'Models with pin',
'Models with two pins',
'Models with three pins']
def check(problem, board, solution):
visitor = FinalesVisitor()
solution.traverse(board, visitor)
if len(visitor.by_hash) > 0:
problem['pure-finales'] = [key for key in visitor.by_hash]
retval = {}
for keyword in provides():
retval[keyword] = False
mm, msm, im, ism = 0, 0, 0, 0
for dif in visitor.different:
mm_, msm_, im_, ism_ = 0, 0, 0, 0
for info in visitor.different[dif]:
if info['model'] and info['stale']:
msm_ = 1
if info['model'] and not info['stale']:
mm_ = 1
if info['ideal'] and info['stale']:
ism_ = 1
if info['ideal'] and not info['stale']:
im_ = 1
if info['octet']:
retval['Octet'] = True
if info['pins'] > 0:
retval['Models with pin'] = True
if info['pins'] > 1:
retval['Models with two pins'] = True
if info['pins'] > 2:
retval['Models with three pins'] = True
mm, msm, im, ism = mm + mm_, msm + msm_, im + im_, ism + ism_
retval['Model mates'], retval['Model stalemates'], retval[
'Ideal mates'], retval['Ideal satelemates'] = mm > 1, msm > 1, im > 1, ism > 1
if retval['Model mates']:
octet = ''
for piece, pos in chess.Pieces(board, chess.WHITE):
if piece in 'QRSB':
octet = octet + piece
retval['Octet'] = ''.join(sorted(octet)) == 'BBQRRSS'
# now echoes
for hash in visitor.by_hash:
if len(visitor.by_hash[hash]) < 2:
continue
for info1, info2 in common.tuples(visitor.by_hash[hash], 2, False):
if info1['mated'] != info2['mated'] or info1[
'traversal'] != info2['traversal']:
retval['Echo'] = True
if square_color(info1['mated']) != square_color(info2['mated']):
retval['Chameleon echo'] = True
return retval
def square_color(square):
return (square % 2) ^ (square >> 3) % 2
|
dturevski/olive-gui
|
legacy/finales.py
|
Python
|
gpl-3.0
| 8,645
|
[
"VisIt"
] |
4051b5d93f6a2acad275378cc99bf3309dc565b61757d80eea7ead82a42770f1
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements input and output processing from Gaussian.
"""
import re
import warnings
import numpy as np
import scipy.constants as cst
from monty.io import zopen
from pymatgen.core.composition import Composition
from pymatgen.core.operations import SymmOp
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Molecule
from pymatgen.core.units import Ha_to_eV
from pymatgen.electronic_structure.core import Spin
from pymatgen.util.coord import get_angle
__author__ = "Shyue Ping Ong, Germain Salvato-Vallverdu, Xin Chen"
__copyright__ = "Copyright 2013, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "8/1/15"
float_patt = re.compile(r"\s*([+-]?\d+\.\d+)")
def read_route_line(route):
"""
read route line in gaussian input/output and return functional basis_set
and a dictionary of other route parameters
Args:
route (str) : the route line
return
functional (str) : the method (HF, PBE ...)
basis_set (str) : the basis set
route (dict) : dictionary of parameters
"""
scrf_patt = re.compile(r"^([sS][cC][rR][fF])\s*=\s*(.+)")
multi_params_patt = re.compile(r"^([A-z]+[0-9]*)[\s=]+\((.*)\)$")
functional = None
basis_set = None
route_params = {}
dieze_tag = None
if route:
if "/" in route:
tok = route.split("/")
functional = tok[0].split()[-1]
basis_set = tok[1].split()[0]
for tok in [functional, basis_set, "/"]:
route = route.replace(tok, "")
for tok in route.split():
if scrf_patt.match(tok):
m = scrf_patt.match(tok)
route_params[m.group(1)] = m.group(2)
elif tok.upper() in ["#", "#N", "#P", "#T"]:
# does not store # in route to avoid error in input
if tok == "#":
dieze_tag = "#N"
else:
dieze_tag = tok
continue
else:
m = re.match(multi_params_patt, tok.strip("#"))
if m:
pars = {}
for par in m.group(2).split(","):
p = par.split("=")
pars[p[0]] = None if len(p) == 1 else p[1]
route_params[m.group(1)] = pars
else:
d = tok.strip("#").split("=")
route_params[d[0]] = None if len(d) == 1 else d[1]
return functional, basis_set, route_params, dieze_tag
class GaussianInput:
"""
An object representing a Gaussian input file.
"""
# Commonly used regex patterns
_zmat_patt = re.compile(r"^(\w+)*([\s,]+(\w+)[\s,]+(\w+))*[\-\.\s,\w]*$")
_xyz_patt = re.compile(r"^(\w+)[\s,]+([\d\.eE\-]+)[\s,]+([\d\.eE\-]+)[\s,]+" r"([\d\.eE\-]+)[\-\.\s,\w.]*$")
def __init__(
self,
mol,
charge=None,
spin_multiplicity=None,
title=None,
functional="HF",
basis_set="6-31G(d)",
route_parameters=None,
input_parameters=None,
link0_parameters=None,
dieze_tag="#P",
gen_basis=None,
):
"""
Args:
mol: Input molecule. It can either be a Molecule object,
a string giving the geometry in a format supported by Gaussian,
or ``None``. If the molecule is ``None``, you will need to use
read it in from a checkpoint. Consider adding ``CHK`` to the
``link0_parameters``.
charge: Charge of the molecule. If None, charge on molecule is used.
Defaults to None. This allows the input file to be set a
charge independently from the molecule itself.
If ``mol`` is not a Molecule object, then you must specify a charge.
spin_multiplicity: Spin multiplicity of molecule. Defaults to None,
which means that the spin multiplicity is set to 1 if the
molecule has no unpaired electrons and to 2 if there are
unpaired electrons. If ``mol`` is not a Molecule object, then you
must specify the multiplicity
title: Title for run. Defaults to formula of molecule if None.
functional: Functional for run.
basis_set: Basis set for run.
route_parameters: Additional route parameters as a dict. For example,
{'SP':"", "SCF":"Tight"}
input_parameters: Additional input parameters for run as a dict. Used
for example, in PCM calculations. E.g., {"EPS":12}
link0_parameters: Link0 parameters as a dict. E.g., {"%mem": "1000MW"}
dieze_tag: # preceding the route line. E.g. "#p"
gen_basis: allows a user-specified basis set to be used in a Gaussian
calculation. If this is not None, the attribute ``basis_set`` will
be set to "Gen".
"""
self._mol = mol
# Determine multiplicity and charge settings
if isinstance(mol, Molecule):
self.charge = charge if charge is not None else mol.charge
nelectrons = mol.charge + mol.nelectrons - self.charge
if spin_multiplicity is not None:
self.spin_multiplicity = spin_multiplicity
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of {} and spin multiplicity of {} is"
" not possible for this molecule".format(self.charge, spin_multiplicity)
)
else:
self.spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
# Get a title from the molecule name
self.title = title if title else self._mol.composition.formula
else:
self.charge = charge
self.spin_multiplicity = spin_multiplicity
# Set a title
self.title = title if title else "Restart"
# Store the remaining settings
self.functional = functional
self.basis_set = basis_set
self.link0_parameters = link0_parameters if link0_parameters else {}
self.route_parameters = route_parameters if route_parameters else {}
self.input_parameters = input_parameters if input_parameters else {}
self.dieze_tag = dieze_tag if dieze_tag[0] == "#" else "#" + dieze_tag
self.gen_basis = gen_basis
if gen_basis is not None:
self.basis_set = "Gen"
@property
def molecule(self):
"""
Returns molecule associated with this GaussianInput.
"""
return self._mol
@staticmethod
def _parse_coords(coord_lines):
"""
Helper method to parse coordinates.
"""
paras = {}
var_pattern = re.compile(r"^([A-Za-z]+\S*)[\s=,]+([\d\-\.]+)$")
for l in coord_lines:
m = var_pattern.match(l.strip())
if m:
paras[m.group(1).strip("=")] = float(m.group(2))
species = []
coords = []
# Stores whether a Zmatrix format is detected. Once a zmatrix format
# is detected, it is assumed for the remaining of the parsing.
zmode = False
for l in coord_lines:
l = l.strip()
if not l:
break
if (not zmode) and GaussianInput._xyz_patt.match(l):
m = GaussianInput._xyz_patt.match(l)
species.append(m.group(1))
toks = re.split(r"[,\s]+", l.strip())
if len(toks) > 4:
coords.append([float(i) for i in toks[2:5]])
else:
coords.append([float(i) for i in toks[1:4]])
elif GaussianInput._zmat_patt.match(l):
zmode = True
toks = re.split(r"[,\s]+", l.strip())
species.append(toks[0])
toks.pop(0)
if len(toks) == 0:
coords.append(np.array([0, 0, 0]))
else:
nn = []
parameters = []
while len(toks) > 1:
ind = toks.pop(0)
data = toks.pop(0)
try:
nn.append(int(ind))
except ValueError:
nn.append(species.index(ind) + 1)
try:
val = float(data)
parameters.append(val)
except ValueError:
if data.startswith("-"):
parameters.append(-paras[data[1:]])
else:
parameters.append(paras[data])
if len(nn) == 1:
coords.append(np.array([0, 0, parameters[0]]))
elif len(nn) == 2:
coords1 = coords[nn[0] - 1]
coords2 = coords[nn[1] - 1]
bl = parameters[0]
angle = parameters[1]
axis = [0, 1, 0]
op = SymmOp.from_origin_axis_angle(coords1, axis, angle, False)
coord = op.operate(coords2)
vec = coord - coords1
coord = vec * bl / np.linalg.norm(vec) + coords1
coords.append(coord)
elif len(nn) == 3:
coords1 = coords[nn[0] - 1]
coords2 = coords[nn[1] - 1]
coords3 = coords[nn[2] - 1]
bl = parameters[0]
angle = parameters[1]
dih = parameters[2]
v1 = coords3 - coords2
v2 = coords1 - coords2
axis = np.cross(v1, v2)
op = SymmOp.from_origin_axis_angle(coords1, axis, angle, False)
coord = op.operate(coords2)
v1 = coord - coords1
v2 = coords1 - coords2
v3 = np.cross(v1, v2)
adj = get_angle(v3, axis)
axis = coords1 - coords2
op = SymmOp.from_origin_axis_angle(coords1, axis, dih - adj, False)
coord = op.operate(coord)
vec = coord - coords1
coord = vec * bl / np.linalg.norm(vec) + coords1
coords.append(coord)
def _parse_species(sp_str):
"""
The species specification can take many forms. E.g.,
simple integers representing atomic numbers ("8"),
actual species string ("C") or a labelled species ("C1").
Sometimes, the species string is also not properly capitalized,
e.g, ("c1"). This method should take care of these known formats.
"""
try:
return int(sp_str)
except ValueError:
sp = re.sub(r"\d", "", sp_str)
return sp.capitalize()
species = [_parse_species(sp) for sp in species]
return Molecule(species, coords)
@staticmethod
def from_string(contents):
"""
Creates GaussianInput from a string.
Args:
contents: String representing an Gaussian input file.
Returns:
GaussianInput object
"""
lines = [l.strip() for l in contents.split("\n")]
link0_patt = re.compile(r"^(%.+)\s*=\s*(.+)")
link0_dict = {}
for i, l in enumerate(lines):
if link0_patt.match(l):
m = link0_patt.match(l)
link0_dict[m.group(1).strip("=")] = m.group(2)
route_patt = re.compile(r"^#[sSpPnN]*.*")
route = ""
route_index = None
for i, l in enumerate(lines):
if route_patt.match(l):
route += " " + l
route_index = i
# This condition allows for route cards spanning multiple lines
elif (l == "" or l.isspace()) and route_index:
break
functional, basis_set, route_paras, dieze_tag = read_route_line(route)
ind = 2
title = []
while lines[route_index + ind].strip():
title.append(lines[route_index + ind].strip())
ind += 1
title = " ".join(title)
ind += 1
toks = re.split(r"[,\s]+", lines[route_index + ind])
charge = int(float(toks[0]))
spin_mult = int(toks[1])
coord_lines = []
spaces = 0
input_paras = {}
ind += 1
for i in range(route_index + ind, len(lines)):
if lines[i].strip() == "":
spaces += 1
if spaces >= 2:
d = lines[i].split("=")
if len(d) == 2:
input_paras[d[0]] = d[1]
else:
coord_lines.append(lines[i].strip())
mol = GaussianInput._parse_coords(coord_lines)
mol.set_charge_and_spin(charge, spin_mult)
return GaussianInput(
mol,
charge=charge,
spin_multiplicity=spin_mult,
title=title,
functional=functional,
basis_set=basis_set,
route_parameters=route_paras,
input_parameters=input_paras,
link0_parameters=link0_dict,
dieze_tag=dieze_tag,
)
@staticmethod
def from_file(filename):
"""
Creates GaussianInput from a file.
Args:
filename: Gaussian input filename
Returns:
GaussianInput object
"""
with zopen(filename, "r") as f:
return GaussianInput.from_string(f.read())
def _find_nn_pos_before_site(self, siteindex):
"""
Returns index of nearest neighbor atoms.
"""
alldist = [(self._mol.get_distance(siteindex, i), i) for i in range(siteindex)]
alldist = sorted(alldist, key=lambda x: x[0])
return [d[1] for d in alldist]
def get_zmatrix(self):
"""
Returns a z-matrix representation of the molecule.
"""
output = []
outputvar = []
for i, site in enumerate(self._mol):
if i == 0:
output.append(f"{site.specie}")
elif i == 1:
nn = self._find_nn_pos_before_site(i)
bondlength = self._mol.get_distance(i, nn[0])
output.append(f"{self._mol[i].specie} {nn[0] + 1} B{i}")
outputvar.append(f"B{i}={bondlength:.6f}")
elif i == 2:
nn = self._find_nn_pos_before_site(i)
bondlength = self._mol.get_distance(i, nn[0])
angle = self._mol.get_angle(i, nn[0], nn[1])
output.append(f"{self._mol[i].specie} {nn[0] + 1} B{i} {nn[1] + 1} A{i}")
outputvar.append(f"B{i}={bondlength:.6f}")
outputvar.append(f"A{i}={angle:.6f}")
else:
nn = self._find_nn_pos_before_site(i)
bondlength = self._mol.get_distance(i, nn[0])
angle = self._mol.get_angle(i, nn[0], nn[1])
dih = self._mol.get_dihedral(i, nn[0], nn[1], nn[2])
output.append(f"{self._mol[i].specie} {nn[0] + 1} B{i} {nn[1] + 1} A{i} {nn[2] + 1} D{i}")
outputvar.append(f"B{i}={bondlength:.6f}")
outputvar.append(f"A{i}={angle:.6f}")
outputvar.append(f"D{i}={dih:.6f}")
return "\n".join(output) + "\n\n" + "\n".join(outputvar)
def get_cart_coords(self):
"""
Return the cartesian coordinates of the molecule
"""
def to_s(x):
return f"{x:0.6f}"
outs = []
for i, site in enumerate(self._mol):
outs.append(" ".join([site.species_string, " ".join([to_s(j) for j in site.coords])]))
return "\n".join(outs)
def __str__(self):
return self.to_string()
def to_string(self, cart_coords=False):
"""
Return GaussianInput string
Option: when cart_coords is set to True return the cartesian coordinates
instead of the z-matrix
"""
def para_dict_to_string(para, joiner=" "):
para_str = []
# sorted is only done to make unittests work reliably
for par, val in sorted(para.items()):
if val is None or val == "":
para_str.append(par)
elif isinstance(val, dict):
val_str = para_dict_to_string(val, joiner=",")
para_str.append(f"{par}=({val_str})")
else:
para_str.append(f"{par}={val}")
return joiner.join(para_str)
output = []
if self.link0_parameters:
output.append(para_dict_to_string(self.link0_parameters, "\n"))
# Handle functional or basis set set to None, empty string or whitespace
func_str = "" if self.functional is None else self.functional.strip()
bset_str = "" if self.basis_set is None else self.basis_set.strip()
if func_str != "" and bset_str != "":
func_bset_str = f" {func_str}/{bset_str}"
else:
# don't use the slash if either or both are set as empty
func_bset_str = f" {func_str}{bset_str}".rstrip()
output.append(f"{self.dieze_tag}{func_bset_str} {para_dict_to_string(self.route_parameters)}")
output.append("")
output.append(self.title)
output.append("")
charge_str = "" if self.charge is None else f"{self.charge:.0f}"
multip_str = "" if self.spin_multiplicity is None else f" {self.spin_multiplicity:.0f}"
output.append(f"{charge_str}{multip_str}")
if isinstance(self._mol, Molecule):
if cart_coords is True:
output.append(self.get_cart_coords())
else:
output.append(self.get_zmatrix())
elif self._mol is not None:
output.append(str(self._mol))
output.append("")
if self.gen_basis is not None:
output.append(f"{self.gen_basis}\n")
output.append(para_dict_to_string(self.input_parameters, "\n"))
output.append("\n")
return "\n".join(output)
def write_file(self, filename, cart_coords=False):
"""
Write the input string into a file
Option: see __str__ method
"""
with zopen(filename, "w") as f:
f.write(self.to_string(cart_coords))
def as_dict(self):
"""
:return: MSONable dict
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"molecule": self.molecule.as_dict(),
"functional": self.functional,
"basis_set": self.basis_set,
"route_parameters": self.route_parameters,
"title": self.title,
"charge": self.charge,
"spin_multiplicity": self.spin_multiplicity,
"input_parameters": self.input_parameters,
"link0_parameters": self.link0_parameters,
"dieze_tag": self.dieze_tag,
}
@classmethod
def from_dict(cls, d):
"""
:param d: dict
:return: GaussianInput
"""
return GaussianInput(
mol=Molecule.from_dict(d["molecule"]),
functional=d["functional"],
basis_set=d["basis_set"],
route_parameters=d["route_parameters"],
title=d["title"],
charge=d["charge"],
spin_multiplicity=d["spin_multiplicity"],
input_parameters=d["input_parameters"],
link0_parameters=d["link0_parameters"],
)
class GaussianOutput:
"""
Parser for Gaussian output files.
.. note::
Still in early beta.
Attributes:
.. attribute:: structures
All structures from the calculation in the standard orientation. If the
symmetry is not considered, the standard orientation is not printed out
and the input orientation is used instead. Check the `standard_orientation`
attribute.
.. attribute:: structures_input_orientation
All structures from the calculation in the input orientation or the
Z-matrix orientation (if an opt=z-matrix was requested).
.. attribute:: opt_structures
All optimized structures from the calculation in the standard orientation,
if the attribute 'standard_orientation' is True, otherwise in the input
or the Z-matrix orientation.
.. attribute:: energies
All energies from the calculation.
.. attribute:: eigenvalues
List of eigenvalues for the last geometry
.. attribute:: MO_coefficients
Matrix of MO coefficients for the last geometry
.. attribute:: cart_forces
All cartesian forces from the calculation.
.. attribute:: frequencies
A list for each freq calculation and for each mode of a dict with
{
"frequency": freq in cm-1,
"symmetry": symmetry tag
"r_mass": Reduce mass,
"f_constant": force constant,
"IR_intensity": IR Intensity,
"mode": normal mode
}
The normal mode is a 1D vector of dx, dy dz of each atom.
.. attribute:: hessian
Matrix of second derivatives of the energy with respect to cartesian
coordinates in the **input orientation** frame. Need #P in the
route section in order to be in the output.
.. attribute:: properly_terminated
True if run has properly terminated
.. attribute:: is_pcm
True if run is a PCM run.
.. attribute:: is_spin
True if it is an unrestricted run
.. attribute:: stationary_type
If it is a relaxation run, indicates whether it is a minimum (Minimum)
or a saddle point ("Saddle").
.. attribute:: corrections
Thermochemical corrections if this run is a Freq run as a dict. Keys
are "Zero-point", "Thermal", "Enthalpy" and "Gibbs Free Energy"
.. attribute:: functional
Functional used in the run.
.. attribute:: basis_set
Basis set used in the run
.. attribute:: route
Additional route parameters as a dict. For example,
{'SP':"", "SCF":"Tight"}
.. attribute:: dieze_tag
# preceding the route line, e.g. "#P"
.. attribute:: link0
Link0 parameters as a dict. E.g., {"%mem": "1000MW"}
.. attribute:: charge
Charge for structure
.. attribute:: spin_multiplicity
Spin multiplicity for structure
.. attribute:: num_basis_func
Number of basis functions in the run.
.. attribute:: electrons
number of alpha and beta electrons as (N alpha, N beta)
.. attribute:: pcm
PCM parameters and output if available.
.. attribute:: errors
error if not properly terminated (list to be completed in error_defs)
.. attribute:: Mulliken_charges
Mulliken atomic charges
.. attribute:: eigenvectors
Matrix of shape (num_basis_func, num_basis_func). Each column is an
eigenvectors and contains AO coefficients of an MO.
eigenvectors[Spin] = mat(num_basis_func, num_basis_func)
.. attribute:: molecular_orbital
MO development coefficients on AO in a more convenient array dict
for each atom and basis set label.
mo[Spin][OM j][atom i] = {AO_k: coeff, AO_k: coeff ... }
.. attribute:: atom_basis_labels
Labels of AO for each atoms. These labels are those used in the output
of molecular orbital coefficients (POP=Full) and in the
molecular_orbital array dict.
atom_basis_labels[iatom] = [AO_k, AO_k, ...]
.. attribute:: resumes
List of gaussian data resume given at the end of the output file before
the quotation. The resumes are given as string.
.. attribute:: title
Title of the gaussian run.
.. attribute:: standard_orientation
If True, the geometries stored in the structures are in the standard
orientation. Else, the geometries are in the input orientation.
.. attribute:: bond_orders
Dict of bond order values read in the output file such as:
{(0, 1): 0.8709, (1, 6): 1.234, ...}
The keys are the atom indexes and the values are the Wiberg bond indexes
that are printed using `pop=NBOREAD` and `$nbo bndidx $end`.
Methods:
.. method:: to_input()
Return a GaussianInput object using the last geometry and the same
calculation parameters.
.. method:: read_scan()
Read a potential energy surface from a gaussian scan calculation.
.. method:: get_scan_plot()
Get a matplotlib plot of the potential energy surface
.. method:: save_scan_plot()
Save a matplotlib plot of the potential energy surface to a file
"""
def __init__(self, filename):
"""
Args:
filename: Filename of Gaussian output file.
"""
self.filename = filename
self._parse(filename)
@property
def final_energy(self):
"""
:return: Final energy in Gaussian output.
"""
return self.energies[-1]
@property
def final_structure(self):
"""
:return: Final structure in Gaussian output.
"""
return self.structures[-1]
def _parse(self, filename):
start_patt = re.compile(r" \(Enter \S+l101\.exe\)")
route_patt = re.compile(r" #[pPnNtT]*.*")
link0_patt = re.compile(r"^\s(%.+)\s*=\s*(.+)")
charge_mul_patt = re.compile(r"Charge\s+=\s*([-\d]+)\s+" r"Multiplicity\s+=\s*(\d+)")
num_basis_func_patt = re.compile(r"([0-9]+)\s+basis functions")
num_elec_patt = re.compile(r"(\d+)\s+alpha electrons\s+(\d+)\s+beta electrons")
pcm_patt = re.compile(r"Polarizable Continuum Model")
stat_type_patt = re.compile(r"imaginary frequencies")
scf_patt = re.compile(r"E\(.*\)\s*=\s*([-\.\d]+)\s+")
mp2_patt = re.compile(r"EUMP2\s*=\s*(.*)")
oniom_patt = re.compile(r"ONIOM:\s+extrapolated energy\s*=\s*(.*)")
termination_patt = re.compile(r"(Normal|Error) termination")
error_patt = re.compile(r"(! Non-Optimized Parameters !|Convergence failure)")
mulliken_patt = re.compile(r"^\s*(Mulliken charges|Mulliken atomic charges)")
mulliken_charge_patt = re.compile(r"^\s+(\d+)\s+([A-Z][a-z]?)\s*(\S*)")
end_mulliken_patt = re.compile(r"(Sum of Mulliken )(.*)(charges)\s*=\s*(\D)")
std_orientation_patt = re.compile(r"Standard orientation")
input_orientation_patt = re.compile(r"Input orientation|Z-Matrix orientation")
orbital_patt = re.compile(r"(Alpha|Beta)\s*\S+\s*eigenvalues --(.*)")
thermo_patt = re.compile(r"(Zero-point|Thermal) correction(.*)=" r"\s+([\d\.-]+)")
forces_on_patt = re.compile(r"Center\s+Atomic\s+Forces\s+\(Hartrees/Bohr\)")
forces_off_patt = re.compile(r"Cartesian\s+Forces:\s+Max.*RMS.*")
forces_patt = re.compile(r"\s+(\d+)\s+(\d+)\s+([0-9\.-]+)\s+([0-9\.-]+)\s+([0-9\.-]+)")
freq_on_patt = re.compile(r"Harmonic\sfrequencies\s+\(cm\*\*-1\),\sIR\sintensities.*Raman.*")
normal_mode_patt = re.compile(r"\s+(\d+)\s+(\d+)\s+([0-9\.-]{4,5})\s+([0-9\.-]{4,5}).*")
mo_coeff_patt = re.compile(r"Molecular Orbital Coefficients:")
mo_coeff_name_patt = re.compile(r"\d+\s((\d+|\s+)\s+([a-zA-Z]{1,2}|\s+))\s+(\d+\S+)")
hessian_patt = re.compile(r"Force constants in Cartesian coordinates:")
resume_patt = re.compile(r"^\s1\\1\\GINC-\S*")
resume_end_patt = re.compile(r"^\s.*\\\\@")
bond_order_patt = re.compile(r"Wiberg bond index matrix in the NAO basis:")
self.properly_terminated = False
self.is_pcm = False
self.stationary_type = "Minimum"
self.corrections = {}
self.energies = []
self.pcm = None
self.errors = []
self.Mulliken_charges = {}
self.link0 = {}
self.cart_forces = []
self.frequencies = []
self.eigenvalues = []
self.is_spin = False
self.hessian = None
self.resumes = []
self.title = None
self.bond_orders = {}
read_coord = 0
read_mulliken = False
read_eigen = False
eigen_txt = []
parse_stage = 0
num_basis_found = False
terminated = False
parse_forces = False
forces = []
parse_freq = False
frequencies = []
read_mo = False
parse_hessian = False
routeline = ""
standard_orientation = False
parse_bond_order = False
input_structures = []
std_structures = []
geom_orientation = None
opt_structures = []
with zopen(filename) as f:
for line in f:
if parse_stage == 0:
if start_patt.search(line):
parse_stage = 1
elif link0_patt.match(line):
m = link0_patt.match(line)
self.link0[m.group(1)] = m.group(2)
elif route_patt.search(line) or routeline != "":
if set(line.strip()) == {"-"}:
params = read_route_line(routeline)
self.functional = params[0]
self.basis_set = params[1]
self.route_parameters = params[2]
route_lower = {k.lower(): v for k, v in self.route_parameters.items()}
self.dieze_tag = params[3]
parse_stage = 1
else:
routeline += line.strip()
elif parse_stage == 1:
if set(line.strip()) == {"-"} and self.title is None:
self.title = ""
elif self.title == "":
self.title = line.strip()
elif charge_mul_patt.search(line):
m = charge_mul_patt.search(line)
self.charge = int(m.group(1))
self.spin_multiplicity = int(m.group(2))
parse_stage = 2
elif parse_stage == 2:
if self.is_pcm:
self._check_pcm(line)
if "freq" in route_lower and thermo_patt.search(line):
m = thermo_patt.search(line)
if m.group(1) == "Zero-point":
self.corrections["Zero-point"] = float(m.group(3))
else:
key = m.group(2).strip(" to ")
self.corrections[key] = float(m.group(3))
if read_coord:
[f.readline() for i in range(3)]
line = f.readline()
sp = []
coords = []
while set(line.strip()) != {"-"}:
toks = line.split()
sp.append(Element.from_Z(int(toks[1])))
coords.append([float(x) for x in toks[3:6]])
line = f.readline()
read_coord = False
if geom_orientation == "input":
input_structures.append(Molecule(sp, coords))
elif geom_orientation == "standard":
std_structures.append(Molecule(sp, coords))
if parse_forces:
m = forces_patt.search(line)
if m:
forces.extend([float(_v) for _v in m.groups()[2:5]])
elif forces_off_patt.search(line):
self.cart_forces.append(forces)
forces = []
parse_forces = False
# read molecular orbital eigenvalues
if read_eigen:
m = orbital_patt.search(line)
if m:
eigen_txt.append(line)
else:
read_eigen = False
self.eigenvalues = {Spin.up: []}
for eigenline in eigen_txt:
if "Alpha" in eigenline:
self.eigenvalues[Spin.up] += [float(e) for e in float_patt.findall(eigenline)]
elif "Beta" in eigenline:
if Spin.down not in self.eigenvalues:
self.eigenvalues[Spin.down] = []
self.eigenvalues[Spin.down] += [float(e) for e in float_patt.findall(eigenline)]
eigen_txt = []
# read molecular orbital coefficients
if (not num_basis_found) and num_basis_func_patt.search(line):
m = num_basis_func_patt.search(line)
self.num_basis_func = int(m.group(1))
num_basis_found = True
elif read_mo:
# build a matrix with all coefficients
all_spin = [Spin.up]
if self.is_spin:
all_spin.append(Spin.down)
mat_mo = {}
for spin in all_spin:
mat_mo[spin] = np.zeros((self.num_basis_func, self.num_basis_func))
nMO = 0
end_mo = False
while nMO < self.num_basis_func and not end_mo:
f.readline()
f.readline()
self.atom_basis_labels = []
for i in range(self.num_basis_func):
line = f.readline()
# identify atom and OA labels
m = mo_coeff_name_patt.search(line)
if m.group(1).strip() != "":
iat = int(m.group(2)) - 1
# atname = m.group(3)
self.atom_basis_labels.append([m.group(4)])
else:
self.atom_basis_labels[iat].append(m.group(4))
# MO coefficients
coeffs = [float(c) for c in float_patt.findall(line)]
for j, c in enumerate(coeffs):
mat_mo[spin][i, nMO + j] = c
nMO += len(coeffs)
line = f.readline()
# manage pop=regular case (not all MO)
if nMO < self.num_basis_func and (
"Density Matrix:" in line or mo_coeff_patt.search(line)
):
end_mo = True
warnings.warn("POP=regular case, matrix coefficients not complete")
f.readline()
self.eigenvectors = mat_mo
read_mo = False
# build a more convenient array dict with MO
# coefficient of each atom in each MO.
# mo[Spin][OM j][atom i] =
# {AO_k: coeff, AO_k: coeff ... }
mo = {}
for spin in all_spin:
mo[spin] = [
[{} for iat in range(len(self.atom_basis_labels))] for j in range(self.num_basis_func)
]
for j in range(self.num_basis_func):
i = 0
for iat, labels in enumerate(self.atom_basis_labels):
for label in labels:
mo[spin][j][iat][label] = self.eigenvectors[spin][i, j]
i += 1
self.molecular_orbital = mo
elif parse_freq:
while line.strip() != "": # blank line
ifreqs = [int(val) - 1 for val in line.split()]
for ifreq in ifreqs:
frequencies.append(
{
"frequency": None,
"r_mass": None,
"f_constant": None,
"IR_intensity": None,
"symmetry": None,
"mode": [],
}
)
# read freq, intensity, masses, symmetry ...
while "Atom AN" not in line:
if "Frequencies --" in line:
freqs = map(float, float_patt.findall(line))
for ifreq, freq in zip(ifreqs, freqs):
frequencies[ifreq]["frequency"] = freq
elif "Red. masses --" in line:
r_masses = map(float, float_patt.findall(line))
for ifreq, r_mass in zip(ifreqs, r_masses):
frequencies[ifreq]["r_mass"] = r_mass
elif "Frc consts --" in line:
f_consts = map(float, float_patt.findall(line))
for ifreq, f_const in zip(ifreqs, f_consts):
frequencies[ifreq]["f_constant"] = f_const
elif "IR Inten --" in line:
IR_intens = map(float, float_patt.findall(line))
for ifreq, intens in zip(ifreqs, IR_intens):
frequencies[ifreq]["IR_intensity"] = intens
else:
syms = line.split()[:3]
for ifreq, sym in zip(ifreqs, syms):
frequencies[ifreq]["symmetry"] = sym
line = f.readline()
# read normal modes
line = f.readline()
while normal_mode_patt.search(line):
values = list(map(float, float_patt.findall(line)))
for i, ifreq in zip(range(0, len(values), 3), ifreqs):
frequencies[ifreq]["mode"].extend(values[i : i + 3])
line = f.readline()
parse_freq = False
self.frequencies.append(frequencies)
frequencies = []
elif parse_hessian:
# read Hessian matrix under "Force constants in Cartesian coordinates"
# Hessian matrix is in the input orientation framework
# WARNING : need #P in the route line
parse_hessian = False
ndf = 3 * len(input_structures[0])
self.hessian = np.zeros((ndf, ndf))
j_indices = range(5)
jndf = 0
while jndf < ndf:
for i in range(jndf, ndf):
line = f.readline()
vals = re.findall(r"\s*([+-]?\d+\.\d+[eEdD]?[+-]\d+)", line)
vals = [float(val.replace("D", "E")) for val in vals]
for jval, val in enumerate(vals):
j = j_indices[jval]
self.hessian[i, j] = val
self.hessian[j, i] = val
jndf += len(vals)
line = f.readline()
j_indices = [j + 5 for j in j_indices]
elif parse_bond_order:
# parse Wiberg bond order
line = f.readline()
line = f.readline()
nat = len(input_structures[0])
matrix = []
for iat in range(nat):
line = f.readline()
matrix.append([float(v) for v in line.split()[2:]])
self.bond_orders = {}
for iat in range(nat):
for jat in range(iat + 1, nat):
self.bond_orders[(iat, jat)] = matrix[iat][jat]
parse_bond_order = False
elif termination_patt.search(line):
m = termination_patt.search(line)
if m.group(1) == "Normal":
self.properly_terminated = True
terminated = True
elif error_patt.search(line):
error_defs = {
"! Non-Optimized Parameters !": "Optimization error",
"Convergence failure": "SCF convergence error",
}
m = error_patt.search(line)
self.errors.append(error_defs[m.group(1)])
elif num_elec_patt.search(line):
m = num_elec_patt.search(line)
self.electrons = (int(m.group(1)), int(m.group(2)))
elif (not self.is_pcm) and pcm_patt.search(line):
self.is_pcm = True
self.pcm = {}
elif "freq" in route_lower and "opt" in route_lower and stat_type_patt.search(line):
self.stationary_type = "Saddle"
elif mp2_patt.search(line):
m = mp2_patt.search(line)
self.energies.append(float(m.group(1).replace("D", "E")))
elif oniom_patt.search(line):
m = oniom_patt.matcher(line)
self.energies.append(float(m.group(1)))
elif scf_patt.search(line):
m = scf_patt.search(line)
self.energies.append(float(m.group(1)))
elif std_orientation_patt.search(line):
standard_orientation = True
geom_orientation = "standard"
read_coord = True
elif input_orientation_patt.search(line):
geom_orientation = "input"
read_coord = True
elif "Optimization completed." in line:
line = f.readline()
if " -- Stationary point found." not in line:
warnings.warn(
"\n" + self.filename + ": Optimization complete but this is not a stationary point"
)
if standard_orientation:
opt_structures.append(std_structures[-1])
else:
opt_structures.append(input_structures[-1])
elif not read_eigen and orbital_patt.search(line):
eigen_txt.append(line)
read_eigen = True
elif mulliken_patt.search(line):
mulliken_txt = []
read_mulliken = True
elif not parse_forces and forces_on_patt.search(line):
parse_forces = True
elif freq_on_patt.search(line):
parse_freq = True
[f.readline() for i in range(3)]
elif mo_coeff_patt.search(line):
if "Alpha" in line:
self.is_spin = True
read_mo = True
elif hessian_patt.search(line):
parse_hessian = True
elif resume_patt.search(line):
resume = []
while not resume_end_patt.search(line):
resume.append(line)
line = f.readline()
# security if \\@ not in one line !
if line == "\n":
break
resume.append(line)
resume = "".join([r.strip() for r in resume])
self.resumes.append(resume)
elif bond_order_patt.search(line):
parse_bond_order = True
if read_mulliken:
if not end_mulliken_patt.search(line):
mulliken_txt.append(line)
else:
m = end_mulliken_patt.search(line)
mulliken_charges = {}
for line in mulliken_txt:
if mulliken_charge_patt.search(line):
m = mulliken_charge_patt.search(line)
dic = {int(m.group(1)): [m.group(2), float(m.group(3))]}
mulliken_charges.update(dic)
read_mulliken = False
self.Mulliken_charges = mulliken_charges
# store the structures. If symmetry is considered, the standard orientation
# is used. Else the input orientation is used.
if standard_orientation:
self.structures = std_structures
self.structures_input_orientation = input_structures
else:
self.structures = input_structures
self.structures_input_orientation = input_structures
# store optimized structure in input orientation
self.opt_structures = opt_structures
if not terminated:
warnings.warn("\n" + self.filename + ": Termination error or bad Gaussian output file !")
def _check_pcm(self, line):
energy_patt = re.compile(r"(Dispersion|Cavitation|Repulsion) energy" r"\s+\S+\s+=\s+(\S*)")
total_patt = re.compile(r"with all non electrostatic terms\s+\S+\s+" r"=\s+(\S*)")
parameter_patt = re.compile(r"(Eps|Numeral density|RSolv|Eps" r"\(inf[inity]*\))\s+=\s*(\S*)")
if energy_patt.search(line):
m = energy_patt.search(line)
self.pcm[f"{m.group(1)} energy"] = float(m.group(2))
elif total_patt.search(line):
m = total_patt.search(line)
self.pcm["Total energy"] = float(m.group(1))
elif parameter_patt.search(line):
m = parameter_patt.search(line)
self.pcm[m.group(1)] = float(m.group(2))
def as_dict(self):
"""
Json-serializable dict representation.
"""
structure = self.final_structure
d = {
"has_gaussian_completed": self.properly_terminated,
"nsites": len(structure),
}
comp = structure.composition
d["unit_cell_formula"] = comp.as_dict()
d["reduced_cell_formula"] = Composition(comp.reduced_formula).as_dict()
d["pretty_formula"] = comp.reduced_formula
d["is_pcm"] = self.is_pcm
d["errors"] = self.errors
d["Mulliken_charges"] = self.Mulliken_charges
unique_symbols = sorted(list(d["unit_cell_formula"].keys()))
d["elements"] = unique_symbols
d["nelements"] = len(unique_symbols)
d["charge"] = self.charge
d["spin_multiplicity"] = self.spin_multiplicity
vin = {
"route": self.route_parameters,
"functional": self.functional,
"basis_set": self.basis_set,
"nbasisfunctions": self.num_basis_func,
"pcm_parameters": self.pcm,
}
d["input"] = vin
nsites = len(self.final_structure)
vout = {
"energies": self.energies,
"final_energy": self.final_energy,
"final_energy_per_atom": self.final_energy / nsites,
"molecule": structure.as_dict(),
"stationary_type": self.stationary_type,
"corrections": self.corrections,
}
d["output"] = vout
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
def read_scan(self):
"""
Read a potential energy surface from a gaussian scan calculation.
Returns:
A dict: {"energies": [ values ],
"coords": {"d1": [ values ], "A2", [ values ], ... }}
"energies" are the energies of all points of the potential energy
surface. "coords" are the internal coordinates used to compute the
potential energy surface and the internal coordinates optimized,
labelled by their name as defined in the calculation.
"""
def floatList(l):
"""return a list of float from a list of string"""
return [float(v) for v in l]
scan_patt = re.compile(r"^\sSummary of the potential surface scan:")
optscan_patt = re.compile(r"^\sSummary of Optimized Potential Surface Scan")
coord_patt = re.compile(r"^\s*(\w+)((\s*[+-]?\d+\.\d+)+)")
# data dict return
data = {"energies": [], "coords": {}}
# read in file
with zopen(self.filename, "r") as f:
line = f.readline()
while line != "":
if optscan_patt.match(line):
f.readline()
line = f.readline()
endScan = False
while not endScan:
data["energies"] += floatList(float_patt.findall(line))
line = f.readline()
while coord_patt.match(line):
icname = line.split()[0].strip()
if icname in data["coords"]:
data["coords"][icname] += floatList(float_patt.findall(line))
else:
data["coords"][icname] = floatList(float_patt.findall(line))
line = f.readline()
if not re.search(r"^\s+((\s*\d+)+)", line):
endScan = True
else:
line = f.readline()
elif scan_patt.match(line):
line = f.readline()
data["coords"] = {icname: [] for icname in line.split()[1:-1]}
f.readline()
line = f.readline()
while not re.search(r"^\s-+", line):
values = floatList(line.split())
data["energies"].append(values[-1])
for i, icname in enumerate(data["coords"]):
data["coords"][icname].append(values[i + 1])
line = f.readline()
else:
line = f.readline()
return data
def get_scan_plot(self, coords=None):
"""
Get a matplotlib plot of the potential energy surface.
Args:
coords: internal coordinate name to use as abscissa.
"""
from pymatgen.util.plotting import pretty_plot
plt = pretty_plot(12, 8)
d = self.read_scan()
if coords and coords in d["coords"]:
x = d["coords"][coords]
plt.xlabel(coords)
else:
x = range(len(d["energies"]))
plt.xlabel("points")
plt.ylabel("Energy (eV)")
e_min = min(d["energies"])
y = [(e - e_min) * Ha_to_eV for e in d["energies"]]
plt.plot(x, y, "ro--")
return plt
def save_scan_plot(self, filename="scan.pdf", img_format="pdf", coords=None):
"""
Save matplotlib plot of the potential energy surface to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
coords: internal coordinate name to use as abcissa.
"""
plt = self.get_scan_plot(coords)
plt.savefig(filename, format=img_format)
def read_excitation_energies(self):
"""
Read a excitation energies after a TD-DFT calculation.
Returns:
A list: A list of tuple for each transition such as
[(energie (eV), lambda (nm), oscillatory strength), ... ]
"""
transitions = []
# read in file
with zopen(self.filename, "r") as f:
line = f.readline()
td = False
while line != "":
if re.search(r"^\sExcitation energies and oscillator strengths:", line):
td = True
if td:
if re.search(r"^\sExcited State\s*\d", line):
val = [float(v) for v in float_patt.findall(line)]
transitions.append(tuple(val[0:3]))
line = f.readline()
return transitions
def get_spectre_plot(self, sigma=0.05, step=0.01):
"""
Get a matplotlib plot of the UV-visible xas. Transitions are plotted
as vertical lines and as a sum of normal functions with sigma with. The
broadening is applied in energy and the xas is plotted as a function
of the wavelength.
Args:
sigma: Full width at half maximum in eV for normal functions.
step: bin interval in eV
Returns:
A dict: {"energies": values, "lambda": values, "xas": values}
where values are lists of abscissa (energies, lamba) and
the sum of gaussian functions (xas).
A matplotlib plot.
"""
from scipy.stats import norm
from pymatgen.util.plotting import pretty_plot
plt = pretty_plot(12, 8)
transitions = self.read_excitation_energies()
minval = min(val[0] for val in transitions) - 5.0 * sigma
maxval = max(val[0] for val in transitions) + 5.0 * sigma
npts = int((maxval - minval) / step) + 1
eneval = np.linspace(minval, maxval, npts) # in eV
lambdaval = [cst.h * cst.c / (val * cst.e) * 1.0e9 for val in eneval] # in nm
# sum of gaussian functions
spectre = np.zeros(npts)
for trans in transitions:
spectre += trans[2] * norm(eneval, trans[0], sigma)
spectre /= spectre.max()
plt.plot(lambdaval, spectre, "r-", label="spectre")
data = {"energies": eneval, "lambda": lambdaval, "xas": spectre}
# plot transitions as vlines
plt.vlines(
[val[1] for val in transitions],
0.0,
[val[2] for val in transitions],
color="blue",
label="transitions",
linewidth=2,
)
plt.xlabel("$\\lambda$ (nm)")
plt.ylabel("Arbitrary unit")
plt.legend()
return data, plt
def save_spectre_plot(self, filename="spectre.pdf", img_format="pdf", sigma=0.05, step=0.01):
"""
Save matplotlib plot of the spectre to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
sigma: Full width at half maximum in eV for normal functions.
step: bin interval in eV
"""
d, plt = self.get_spectre_plot(sigma, step)
plt.savefig(filename, format=img_format)
def to_input(
self,
mol=None,
charge=None,
spin_multiplicity=None,
title=None,
functional=None,
basis_set=None,
route_parameters=None,
input_parameters=None,
link0_parameters=None,
dieze_tag=None,
cart_coords=False,
):
"""
Create a new input object using by default the last geometry read in
the output file and with the same calculation parameters. Arguments
are the same as GaussianInput class.
Returns
gaunip (GaussianInput) : the gaussian input object
"""
if not mol:
mol = self.final_structure
if charge is None:
charge = self.charge
if spin_multiplicity is None:
spin_multiplicity = self.spin_multiplicity
if not title:
title = self.title
if not functional:
functional = self.functional
if not basis_set:
basis_set = self.basis_set
if not route_parameters:
route_parameters = self.route_parameters
if not link0_parameters:
link0_parameters = self.link0
if not dieze_tag:
dieze_tag = self.dieze_tag
return GaussianInput(
mol=mol,
charge=charge,
spin_multiplicity=spin_multiplicity,
title=title,
functional=functional,
basis_set=basis_set,
route_parameters=route_parameters,
input_parameters=input_parameters,
link0_parameters=link0_parameters,
dieze_tag=dieze_tag,
)
|
materialsproject/pymatgen
|
pymatgen/io/gaussian.py
|
Python
|
mit
| 59,197
|
[
"Gaussian",
"pymatgen"
] |
5caed03a0a6df9e5d5e8d669e2453654284b5085a8f8e2029aa701b8be8f0156
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattergl"
_path_str = "scattergl.marker"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorbar",
"colorscale",
"colorsrc",
"line",
"opacity",
"opacitysrc",
"reversescale",
"showscale",
"size",
"sizemin",
"sizemode",
"sizeref",
"sizesrc",
"symbol",
"symbolsrc",
}
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in `marker.color`is
set to a numerical array. In case `colorscale` is unspecified
or `autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color` array are
all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.color`) or the
bounds set in `marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical array. Defaults
to `false` when `marker.cmin` and `marker.cmax` are set by the
user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmin`
must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `marker.cmin`
and/or `marker.cmax` to be equidistant to this point. Has an
effect only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`. Has no
effect when `marker.cauto` is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmax`
must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# color
# -----
@property
def color(self):
"""
Sets themarkercolor. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A number that will be interpreted as a color
according to scattergl.marker.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorbar
# --------
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergl.marker.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Supported dict properties:
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format
We add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.scatter
gl.marker.colorbar.Tickformatstop` instances or
dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.scattergl.marker.colorbar.tickformatstopdefau
lts), sets the default property values to use
for elements of
scattergl.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.scattergl.marker.c
olorbar.Title` instance or dict with compatible
properties
titlefont
Deprecated: Please use
scattergl.marker.colorbar.title.font instead.
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
scattergl.marker.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
plotly.graph_objs.scattergl.marker.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in `marker.color`is
set to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At minimum, a mapping for
the lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively, `colorscale`
may be a palette name string of the following list: Greys,YlGnB
u,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland
,Jet,Hot,Blackbody,Earth,Electric,Viridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'peach', 'phase', 'picnic', 'pinkyl', 'piyg',
'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn', 'puor',
'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu', 'rdgy',
'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar', 'spectral',
'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn', 'tealrose',
'tempo', 'temps', 'thermal', 'tropic', 'turbid', 'twilight',
'viridis', 'ylgn', 'ylgnbu', 'ylorbr', 'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergl.marker.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.line.colorscale`. Has an
effect only if in `marker.line.color`is set to
a numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has
an effect only if in `marker.line.color`is set
to a numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are
set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.line.cmin` and/or
`marker.line.cmax` to be equidistant to this
point. Has an effect only if in
`marker.line.color`is set to a numerical array.
Value should have the same units as in
`marker.line.color`. Has no effect when
`marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
color
Sets themarker.linecolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if
set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array.
The colorscale must be an array containing
arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.line.cmin` and
`marker.line.cmax`. Alternatively, `colorscale`
may be a palette name string of the following
list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,R
eds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Black
body,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.line.color`is set to
a numerical array. If true, `marker.line.cmin`
will correspond to the last color in the array
and `marker.line.cmax` will correspond to the
first color.
width
Sets the width (in px) of the lines bounding
the marker points.
widthsrc
Sets the source reference on Chart Studio Cloud
for width .
Returns
-------
plotly.graph_objs.scattergl.marker.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the marker opacity.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# opacitysrc
# ----------
@property
def opacitysrc(self):
"""
Sets the source reference on Chart Studio Cloud for opacity .
The 'opacitysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["opacitysrc"]
@opacitysrc.setter
def opacitysrc(self, val):
self["opacitysrc"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.color`is set to a numerical array. If true,
`marker.cmin` will correspond to the last color in the array
and `marker.cmax` will correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# showscale
# ---------
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace. Has an effect only if in `marker.color`is set to a
numerical array.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
# size
# ----
@property
def size(self):
"""
Sets the marker size (in px).
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizemin
# -------
@property
def sizemin(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the minimum size (in px) of the rendered marker
points.
The 'sizemin' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["sizemin"]
@sizemin.setter
def sizemin(self, val):
self["sizemin"] = val
# sizemode
# --------
@property
def sizemode(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the rule for which the data in `size` is converted
to pixels.
The 'sizemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['diameter', 'area']
Returns
-------
Any
"""
return self["sizemode"]
@sizemode.setter
def sizemode(self, val):
self["sizemode"] = val
# sizeref
# -------
@property
def sizeref(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the scale factor used to determine the rendered
size of marker points. Use with `sizemin` and `sizemode`.
The 'sizeref' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["sizeref"]
@sizeref.setter
def sizeref(self, val):
self["sizeref"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# symbol
# ------
@property
def symbol(self):
"""
Sets the marker symbol type. Adding 100 is equivalent to
appending "-open" to a symbol name. Adding 200 is equivalent to
appending "-dot" to a symbol name. Adding 300 is equivalent to
appending "-open-dot" or "dot-open" to a symbol name.
The 'symbol' property is an enumeration that may be specified as:
- One of the following enumeration values:
[0, 'circle', 100, 'circle-open', 200, 'circle-dot', 300,
'circle-open-dot', 1, 'square', 101, 'square-open', 201,
'square-dot', 301, 'square-open-dot', 2, 'diamond', 102,
'diamond-open', 202, 'diamond-dot', 302,
'diamond-open-dot', 3, 'cross', 103, 'cross-open', 203,
'cross-dot', 303, 'cross-open-dot', 4, 'x', 104, 'x-open',
204, 'x-dot', 304, 'x-open-dot', 5, 'triangle-up', 105,
'triangle-up-open', 205, 'triangle-up-dot', 305,
'triangle-up-open-dot', 6, 'triangle-down', 106,
'triangle-down-open', 206, 'triangle-down-dot', 306,
'triangle-down-open-dot', 7, 'triangle-left', 107,
'triangle-left-open', 207, 'triangle-left-dot', 307,
'triangle-left-open-dot', 8, 'triangle-right', 108,
'triangle-right-open', 208, 'triangle-right-dot', 308,
'triangle-right-open-dot', 9, 'triangle-ne', 109,
'triangle-ne-open', 209, 'triangle-ne-dot', 309,
'triangle-ne-open-dot', 10, 'triangle-se', 110,
'triangle-se-open', 210, 'triangle-se-dot', 310,
'triangle-se-open-dot', 11, 'triangle-sw', 111,
'triangle-sw-open', 211, 'triangle-sw-dot', 311,
'triangle-sw-open-dot', 12, 'triangle-nw', 112,
'triangle-nw-open', 212, 'triangle-nw-dot', 312,
'triangle-nw-open-dot', 13, 'pentagon', 113,
'pentagon-open', 213, 'pentagon-dot', 313,
'pentagon-open-dot', 14, 'hexagon', 114, 'hexagon-open',
214, 'hexagon-dot', 314, 'hexagon-open-dot', 15,
'hexagon2', 115, 'hexagon2-open', 215, 'hexagon2-dot',
315, 'hexagon2-open-dot', 16, 'octagon', 116,
'octagon-open', 216, 'octagon-dot', 316,
'octagon-open-dot', 17, 'star', 117, 'star-open', 217,
'star-dot', 317, 'star-open-dot', 18, 'hexagram', 118,
'hexagram-open', 218, 'hexagram-dot', 318,
'hexagram-open-dot', 19, 'star-triangle-up', 119,
'star-triangle-up-open', 219, 'star-triangle-up-dot', 319,
'star-triangle-up-open-dot', 20, 'star-triangle-down',
120, 'star-triangle-down-open', 220,
'star-triangle-down-dot', 320,
'star-triangle-down-open-dot', 21, 'star-square', 121,
'star-square-open', 221, 'star-square-dot', 321,
'star-square-open-dot', 22, 'star-diamond', 122,
'star-diamond-open', 222, 'star-diamond-dot', 322,
'star-diamond-open-dot', 23, 'diamond-tall', 123,
'diamond-tall-open', 223, 'diamond-tall-dot', 323,
'diamond-tall-open-dot', 24, 'diamond-wide', 124,
'diamond-wide-open', 224, 'diamond-wide-dot', 324,
'diamond-wide-open-dot', 25, 'hourglass', 125,
'hourglass-open', 26, 'bowtie', 126, 'bowtie-open', 27,
'circle-cross', 127, 'circle-cross-open', 28, 'circle-x',
128, 'circle-x-open', 29, 'square-cross', 129,
'square-cross-open', 30, 'square-x', 130, 'square-x-open',
31, 'diamond-cross', 131, 'diamond-cross-open', 32,
'diamond-x', 132, 'diamond-x-open', 33, 'cross-thin', 133,
'cross-thin-open', 34, 'x-thin', 134, 'x-thin-open', 35,
'asterisk', 135, 'asterisk-open', 36, 'hash', 136,
'hash-open', 236, 'hash-dot', 336, 'hash-open-dot', 37,
'y-up', 137, 'y-up-open', 38, 'y-down', 138,
'y-down-open', 39, 'y-left', 139, 'y-left-open', 40,
'y-right', 140, 'y-right-open', 41, 'line-ew', 141,
'line-ew-open', 42, 'line-ns', 142, 'line-ns-open', 43,
'line-ne', 143, 'line-ne-open', 44, 'line-nw', 144,
'line-nw-open']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["symbol"]
@symbol.setter
def symbol(self, val):
self["symbol"] = val
# symbolsrc
# ---------
@property
def symbolsrc(self):
"""
Sets the source reference on Chart Studio Cloud for symbol .
The 'symbolsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["symbolsrc"]
@symbolsrc.setter
def symbolsrc(self, val):
self["symbolsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.scattergl.marker.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
line
:class:`plotly.graph_objects.scattergl.marker.Line`
instance or dict with compatible properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud for
opacity .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud for
symbol .
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorbar=None,
colorscale=None,
colorsrc=None,
line=None,
opacity=None,
opacitysrc=None,
reversescale=None,
showscale=None,
size=None,
sizemin=None,
sizemode=None,
sizeref=None,
sizesrc=None,
symbol=None,
symbolsrc=None,
**kwargs
):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattergl.Marker`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.scattergl.marker.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
line
:class:`plotly.graph_objects.scattergl.marker.Line`
instance or dict with compatible properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud for
opacity .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud for
symbol .
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergl.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergl.Marker`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autocolorscale", None)
_v = autocolorscale if autocolorscale is not None else _v
if _v is not None:
self["autocolorscale"] = _v
_v = arg.pop("cauto", None)
_v = cauto if cauto is not None else _v
if _v is not None:
self["cauto"] = _v
_v = arg.pop("cmax", None)
_v = cmax if cmax is not None else _v
if _v is not None:
self["cmax"] = _v
_v = arg.pop("cmid", None)
_v = cmid if cmid is not None else _v
if _v is not None:
self["cmid"] = _v
_v = arg.pop("cmin", None)
_v = cmin if cmin is not None else _v
if _v is not None:
self["cmin"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("coloraxis", None)
_v = coloraxis if coloraxis is not None else _v
if _v is not None:
self["coloraxis"] = _v
_v = arg.pop("colorbar", None)
_v = colorbar if colorbar is not None else _v
if _v is not None:
self["colorbar"] = _v
_v = arg.pop("colorscale", None)
_v = colorscale if colorscale is not None else _v
if _v is not None:
self["colorscale"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("opacitysrc", None)
_v = opacitysrc if opacitysrc is not None else _v
if _v is not None:
self["opacitysrc"] = _v
_v = arg.pop("reversescale", None)
_v = reversescale if reversescale is not None else _v
if _v is not None:
self["reversescale"] = _v
_v = arg.pop("showscale", None)
_v = showscale if showscale is not None else _v
if _v is not None:
self["showscale"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizemin", None)
_v = sizemin if sizemin is not None else _v
if _v is not None:
self["sizemin"] = _v
_v = arg.pop("sizemode", None)
_v = sizemode if sizemode is not None else _v
if _v is not None:
self["sizemode"] = _v
_v = arg.pop("sizeref", None)
_v = sizeref if sizeref is not None else _v
if _v is not None:
self["sizeref"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
_v = arg.pop("symbol", None)
_v = symbol if symbol is not None else _v
if _v is not None:
self["symbol"] = _v
_v = arg.pop("symbolsrc", None)
_v = symbolsrc if symbolsrc is not None else _v
if _v is not None:
self["symbolsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotly/python-api
|
packages/python/plotly/plotly/graph_objs/scattergl/_marker.py
|
Python
|
mit
| 55,155
|
[
"Bowtie"
] |
8ce5e70574befe6f6adfb165d246220a940d6977d1c5cc00c34a87e2fec4d3fd
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2016 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database (Truhlar) of several classes of noncovalent interactions.
| Geometries from Truhlar and coworkers at site http://comp.chem.umn.edu/database_noncov/noncovalent.htm
| Reference energies from Truhlar and coworkers at site http://comp.chem.umn.edu/database_noncov/noncovalent.htm
| First comprehensive citation JPCA 109 5656 (2005).
- **cp** ``'off'`` || ``'on'``
- **rlxd** ``'off'`` || ``'on'``
- **benchmark**
- ``'<benchmark_name>'`` <Reference>.
- |dl| ``'<default_benchmark_name>'`` |dr| <Reference>.
- **subset**
- ``'small'`` 3: HF-HF, He-Ne, HCCH-HCCH
- ``'large'`` 1: BzBz_PD
- ``'HB6'`` hydrogen-bonded
- ``'CT7'`` charge-transfer
- ``'DI6'`` dipole-interacting
- ``'WI7'`` weakly interacting
- ``'PPS5'`` pi-pi stacking
"""
import qcdb
# <<< NCB31 Database Module >>>
dbse = 'NCB31'
# <<< Database Members >>>
HRXN_SM = ['HB6-2', 'WI7-1', 'PPS5-1']
HRXN_LG = ['PPS5-5']
HB6 = ['HB6-1', 'HB6-2', 'HB6-3', 'HB6-4', 'HB6-5', 'HB6-6']
CT7 = ['CT7-1', 'CT7-2', 'CT7-3', 'CT7-4', 'CT7-5', 'CT7-6', 'CT7-7']
DI6 = ['DI6-1', 'DI6-2', 'DI6-3', 'DI6-4', 'DI6-5', 'DI6-6']
WI7 = ['WI7-1', 'WI7-2', 'WI7-3', 'WI7-4', 'WI7-5', 'WI7-6', 'WI7-7']
PPS5 = ['PPS5-1', 'PPS5-2', 'PPS5-3', 'PPS5-4', 'PPS5-5']
HRXN = sum([HB6, CT7, DI6, WI7, PPS5], [])
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
RXNM_CPRLX = {} # reaction matrix of reagent contributions per reaction for counterpoise- and deformation-corrected
ACTV = {} # order of active reagents per reaction
ACTV_CP = {} # order of active reagents per counterpoise-corrected reaction
ACTV_SA = {} # order of active reagents for non-supramolecular calculations
ACTV_RLX = {} # order of active reagents for deformation-corrected reaction
ACTV_CPRLX = {} # order of active reagents for counterpoise- and deformation-corrected reaction
hold = {}
hold['CT7-1'] = ['C2H4', 'F2']
hold['CT7-2'] = ['NH3', 'F2']
hold['CT7-3'] = ['HCCH', 'ClF']
hold['CT7-4'] = ['HCN', 'ClF']
hold['CT7-5'] = ['NH3', 'Cl2']
hold['CT7-6'] = ['H2O', 'ClF']
hold['CT7-7'] = ['NH3', 'ClF']
hold['DI6-1'] = ['H2S', 'H2S']
hold['DI6-2'] = ['HCl', 'HCl']
hold['DI6-3'] = ['HCl', 'H2S']
hold['DI6-4'] = ['CH3Cl', 'HCl']
hold['DI6-5'] = ['HCN', 'CH3SH']
hold['DI6-6'] = ['CH3SH', 'HCl']
hold['HB6-1'] = ['NH3', 'NH3']
hold['HB6-2'] = ['HF', 'HF']
hold['HB6-3'] = ['H2O', 'H2O']
hold['HB6-4'] = ['NH3', 'H2O']
hold['HB6-5'] = ['HCONH2', 'HCONH2']
hold['HB6-6'] = ['HCOOH', 'HCOOH']
hold['PPS5-1'] = ['HCCH', 'HCCH']
hold['PPS5-2'] = ['C2H4', 'C2H4']
hold['PPS5-3'] = ['Bz', 'Bz']
hold['PPS5-4'] = ['Bz', 'Bz']
hold['PPS5-5'] = ['Bz', 'Bz']
hold['WI7-1'] = ['He', 'Ne']
hold['WI7-2'] = ['He', 'Ar']
hold['WI7-3'] = ['Ne', 'Ne']
hold['WI7-4'] = ['Ne', 'Ar']
hold['WI7-5'] = ['CH4', 'Ne']
hold['WI7-6'] = ['Bz', 'Ne']
hold['WI7-7'] = ['CH4', 'CH4']
for rxn in HRXN:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-%s-monoA-unCP' % (dbse, rxn) : -1,
'%s-%s-monoB-unCP' % (dbse, rxn) : -1,
'%s-%s-mono-RLX' % (dbse, hold[rxn][0]) : -1,
'%s-%s-mono-RLX' % (dbse, hold[rxn][1]) : -1 }
RXNM_CPRLX['%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-%s-monoA-unCP' % (dbse, rxn) : +1,
'%s-%s-monoB-unCP' % (dbse, rxn) : +1,
'%s-%s-mono-RLX' % (dbse, hold[rxn][0]) : -1,
'%s-%s-mono-RLX' % (dbse, hold[rxn][1]) : -1 }
ACTV_SA[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-unCP' % (dbse, rxn),
'%s-%s-monoB-unCP' % (dbse, rxn) ]
ACTV_CP[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV_RLX[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-mono-RLX' % (dbse, hold[rxn][0]),
'%s-%s-mono-RLX' % (dbse, hold[rxn][1]) ]
ACTV_CPRLX['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn),
'%s-%s-monoA-unCP' % (dbse, rxn),
'%s-%s-monoB-unCP' % (dbse, rxn),
'%s-%s-mono-RLX' % (dbse, hold[rxn][0]),
'%s-%s-mono-RLX' % (dbse, hold[rxn][1]) ]
# <<< Reference Values [kcal/mol] >>>
BIND = {}
nan = float('NaN')
BIND['%s-%s' % (dbse, 'CT7-1' )] = -1.06
BIND['%s-%s' % (dbse, 'CT7-2' )] = -1.81
BIND['%s-%s' % (dbse, 'CT7-3' )] = -3.81
BIND['%s-%s' % (dbse, 'CT7-4' )] = -4.86
BIND['%s-%s' % (dbse, 'CT7-5' )] = -4.88
BIND['%s-%s' % (dbse, 'CT7-6' )] = -5.36
BIND['%s-%s' % (dbse, 'CT7-7' )] = -10.62
BIND['%s-%s' % (dbse, 'DI6-1' )] = -1.66
BIND['%s-%s' % (dbse, 'DI6-2' )] = -2.01
BIND['%s-%s' % (dbse, 'DI6-3' )] = -3.35
BIND['%s-%s' % (dbse, 'DI6-4' )] = -3.55
BIND['%s-%s' % (dbse, 'DI6-5' )] = -3.59
BIND['%s-%s' % (dbse, 'DI6-6' )] = -4.16
BIND['%s-%s' % (dbse, 'HB6-1' )] = -3.15
BIND['%s-%s' % (dbse, 'HB6-2' )] = -4.57
BIND['%s-%s' % (dbse, 'HB6-3' )] = -4.97
BIND['%s-%s' % (dbse, 'HB6-4' )] = -6.41
BIND['%s-%s' % (dbse, 'HB6-5' )] = -14.94
BIND['%s-%s' % (dbse, 'HB6-6' )] = -16.15
BIND['%s-%s' % (dbse, 'PPS5-1' )] = -1.34
BIND['%s-%s' % (dbse, 'PPS5-2' )] = -1.42
BIND['%s-%s' % (dbse, 'PPS5-3' )] = -1.81
BIND['%s-%s' % (dbse, 'PPS5-4' )] = -2.74
BIND['%s-%s' % (dbse, 'PPS5-5' )] = -2.78
BIND['%s-%s' % (dbse, 'WI7-1' )] = -0.04
BIND['%s-%s' % (dbse, 'WI7-2' )] = -0.06
BIND['%s-%s' % (dbse, 'WI7-3' )] = -0.08
BIND['%s-%s' % (dbse, 'WI7-4' )] = -0.13
BIND['%s-%s' % (dbse, 'WI7-5' )] = -0.22
BIND['%s-%s' % (dbse, 'WI7-6' )] = -0.47
BIND['%s-%s' % (dbse, 'WI7-7' )] = -0.51
# <<< Comment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, 'CT7-1' )] = """Ethene-Fluorine Molecule Complex (C2H4-F2) """
TAGL['%s-%s-dimer' % (dbse, 'CT7-1' )] = """Dimer from Ethene-Fluorine Molecule Complex (C2H4-F2) """
TAGL['%s-%s-monoA-CP' % (dbse, 'CT7-1' )] = """Monomer A from Ethene-Fluorine Molecule Complex (C2H4-F2) """
TAGL['%s-%s-monoB-CP' % (dbse, 'CT7-1' )] = """Monomer B from Ethene-Fluorine Molecule Complex (C2H4-F2) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'CT7-1' )] = """Monomer A from Ethene-Fluorine Molecule Complex (C2H4-F2) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'CT7-1' )] = """Monomer B from Ethene-Fluorine Molecule Complex (C2H4-F2) """
TAGL['%s-%s' % (dbse, 'CT7-2' )] = """Ammonia-Fluorine Molecule Complex (NH3-F2) """
TAGL['%s-%s-dimer' % (dbse, 'CT7-2' )] = """Dimer from Ammonia-Fluorine Molecule Complex (NH3-F2) """
TAGL['%s-%s-monoA-CP' % (dbse, 'CT7-2' )] = """Monomer A from Ammonia-Fluorine Molecule Complex (NH3-F2) """
TAGL['%s-%s-monoB-CP' % (dbse, 'CT7-2' )] = """Monomer B from Ammonia-Fluorine Molecule Complex (NH3-F2) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'CT7-2' )] = """Monomer A from Ammonia-Fluorine Molecule Complex (NH3-F2) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'CT7-2' )] = """Monomer B from Ammonia-Fluorine Molecule Complex (NH3-F2) """
TAGL['%s-%s' % (dbse, 'CT7-3' )] = """Ethine-Chlorine Monofluoride Complex (HCCH-ClF) """
TAGL['%s-%s-dimer' % (dbse, 'CT7-3' )] = """Dimer from Ethine-Chlorine Monofluoride Complex (HCCH-ClF) """
TAGL['%s-%s-monoA-CP' % (dbse, 'CT7-3' )] = """Monomer A from Ethine-Chlorine Monofluoride Complex (HCCH-ClF) """
TAGL['%s-%s-monoB-CP' % (dbse, 'CT7-3' )] = """Monomer B from Ethine-Chlorine Monofluoride Complex (HCCH-ClF) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'CT7-3' )] = """Monomer A from Ethine-Chlorine Monofluoride Complex (HCCH-ClF) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'CT7-3' )] = """Monomer B from Ethine-Chlorine Monofluoride Complex (HCCH-ClF) """
TAGL['%s-%s' % (dbse, 'CT7-4' )] = """Hydrogen Cyanide-Chlorine Monofluoride Complex (HCN-ClF) """
TAGL['%s-%s-dimer' % (dbse, 'CT7-4' )] = """Dimer from Hydrogen Cyanide-Chlorine Monofluoride Complex (HCN-ClF) """
TAGL['%s-%s-monoA-CP' % (dbse, 'CT7-4' )] = """Monomer A from Hydrogen Cyanide-Chlorine Monofluoride Complex (HCN-ClF) """
TAGL['%s-%s-monoB-CP' % (dbse, 'CT7-4' )] = """Monomer B from Hydrogen Cyanide-Chlorine Monofluoride Complex (HCN-ClF) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'CT7-4' )] = """Monomer A from Hydrogen Cyanide-Chlorine Monofluoride Complex (HCN-ClF) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'CT7-4' )] = """Monomer B from Hydrogen Cyanide-Chlorine Monofluoride Complex (HCN-ClF) """
TAGL['%s-%s' % (dbse, 'CT7-5' )] = """Ammonia-Chlorine Molecule (NH3-Cl2) """
TAGL['%s-%s-dimer' % (dbse, 'CT7-5' )] = """Dimer from Ammonia-Chlorine Molecule (NH3-Cl2) """
TAGL['%s-%s-monoA-CP' % (dbse, 'CT7-5' )] = """Monomer A from Ammonia-Chlorine Molecule (NH3-Cl2) """
TAGL['%s-%s-monoB-CP' % (dbse, 'CT7-5' )] = """Monomer B from Ammonia-Chlorine Molecule (NH3-Cl2) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'CT7-5' )] = """Monomer A from Ammonia-Chlorine Molecule (NH3-Cl2) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'CT7-5' )] = """Monomer B from Ammonia-Chlorine Molecule (NH3-Cl2) """
TAGL['%s-%s' % (dbse, 'CT7-6' )] = """Water-Chlorine Monofluoride Complex (H2O-ClF) """
TAGL['%s-%s-dimer' % (dbse, 'CT7-6' )] = """Dimer from Water-Chlorine Monofluoride Complex (H2O-ClF) """
TAGL['%s-%s-monoA-CP' % (dbse, 'CT7-6' )] = """Monomer A from Water-Chlorine Monofluoride Complex (H2O-ClF) """
TAGL['%s-%s-monoB-CP' % (dbse, 'CT7-6' )] = """Monomer B from Water-Chlorine Monofluoride Complex (H2O-ClF) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'CT7-6' )] = """Monomer A from Water-Chlorine Monofluoride Complex (H2O-ClF) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'CT7-6' )] = """Monomer B from Water-Chlorine Monofluoride Complex (H2O-ClF) """
TAGL['%s-%s' % (dbse, 'CT7-7' )] = """Ammonia-Chlorine Monofluoride Complex (NH3-ClF) """
TAGL['%s-%s-dimer' % (dbse, 'CT7-7' )] = """Dimer from Ammonia-Chlorine Monofluoride Complex (NH3-ClF) """
TAGL['%s-%s-monoA-CP' % (dbse, 'CT7-7' )] = """Monomer A from Ammonia-Chlorine Monofluoride Complex (NH3-ClF) """
TAGL['%s-%s-monoB-CP' % (dbse, 'CT7-7' )] = """Monomer B from Ammonia-Chlorine Monofluoride Complex (NH3-ClF) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'CT7-7' )] = """Monomer A from Ammonia-Chlorine Monofluoride Complex (NH3-ClF) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'CT7-7' )] = """Monomer B from Ammonia-Chlorine Monofluoride Complex (NH3-ClF) """
TAGL['%s-%s' % (dbse, 'DI6-1' )] = """Hydrogen Sulfide Dimer (H2S-H2S) """
TAGL['%s-%s-dimer' % (dbse, 'DI6-1' )] = """Dimer from Hydrogen Sulfide Dimer (H2S-H2S) """
TAGL['%s-%s-monoA-CP' % (dbse, 'DI6-1' )] = """Monomer A from Hydrogen Sulfide Dimer (H2S-H2S) """
TAGL['%s-%s-monoB-CP' % (dbse, 'DI6-1' )] = """Monomer B from Hydrogen Sulfide Dimer (H2S-H2S) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'DI6-1' )] = """Monomer A from Hydrogen Sulfide Dimer (H2S-H2S) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'DI6-1' )] = """Monomer B from Hydrogen Sulfide Dimer (H2S-H2S) """
TAGL['%s-%s' % (dbse, 'DI6-2' )] = """Hydrogen Chloride Dimer (HCl-HCl) """
TAGL['%s-%s-dimer' % (dbse, 'DI6-2' )] = """Dimer from Hydrogen Chloride Dimer (HCl-HCl) """
TAGL['%s-%s-monoA-CP' % (dbse, 'DI6-2' )] = """Monomer A from Hydrogen Chloride Dimer (HCl-HCl) """
TAGL['%s-%s-monoB-CP' % (dbse, 'DI6-2' )] = """Monomer B from Hydrogen Chloride Dimer (HCl-HCl) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'DI6-2' )] = """Monomer A from Hydrogen Chloride Dimer (HCl-HCl) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'DI6-2' )] = """Monomer B from Hydrogen Chloride Dimer (HCl-HCl) """
TAGL['%s-%s' % (dbse, 'DI6-3' )] = """Hydrogen Chloride-Hydrogen Sulfide Complex (HCl-H2S) """
TAGL['%s-%s-dimer' % (dbse, 'DI6-3' )] = """Dimer from Hydrogen Chloride-Hydrogen Sulfide Complex (HCl-H2S) """
TAGL['%s-%s-monoA-CP' % (dbse, 'DI6-3' )] = """Monomer A from Hydrogen Chloride-Hydrogen Sulfide Complex (HCl-H2S) """
TAGL['%s-%s-monoB-CP' % (dbse, 'DI6-3' )] = """Monomer B from Hydrogen Chloride-Hydrogen Sulfide Complex (HCl-H2S) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'DI6-3' )] = """Monomer A from Hydrogen Chloride-Hydrogen Sulfide Complex (HCl-H2S) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'DI6-3' )] = """Monomer B from Hydrogen Chloride-Hydrogen Sulfide Complex (HCl-H2S) """
TAGL['%s-%s' % (dbse, 'DI6-4' )] = """Methyl Chloride-Hydrogen Chloride (CH3Cl-HCl) """
TAGL['%s-%s-dimer' % (dbse, 'DI6-4' )] = """Dimer from Methyl Chloride-Hydrogen Chloride (CH3Cl-HCl) """
TAGL['%s-%s-monoA-CP' % (dbse, 'DI6-4' )] = """Monomer A from Methyl Chloride-Hydrogen Chloride (CH3Cl-HCl) """
TAGL['%s-%s-monoB-CP' % (dbse, 'DI6-4' )] = """Monomer B from Methyl Chloride-Hydrogen Chloride (CH3Cl-HCl) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'DI6-4' )] = """Monomer A from Methyl Chloride-Hydrogen Chloride (CH3Cl-HCl) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'DI6-4' )] = """Monomer B from Methyl Chloride-Hydrogen Chloride (CH3Cl-HCl) """
TAGL['%s-%s' % (dbse, 'DI6-5' )] = """Hydrogen Cyanide-Methanethiol (HCN-CH3SH) """
TAGL['%s-%s-dimer' % (dbse, 'DI6-5' )] = """Dimer from Hydrogen Cyanide-Methanethiol (HCN-CH3SH) """
TAGL['%s-%s-monoA-CP' % (dbse, 'DI6-5' )] = """Monomer A from Hydrogen Cyanide-Methanethiol (HCN-CH3SH) """
TAGL['%s-%s-monoB-CP' % (dbse, 'DI6-5' )] = """Monomer B from Hydrogen Cyanide-Methanethiol (HCN-CH3SH) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'DI6-5' )] = """Monomer A from Hydrogen Cyanide-Methanethiol (HCN-CH3SH) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'DI6-5' )] = """Monomer B from Hydrogen Cyanide-Methanethiol (HCN-CH3SH) """
TAGL['%s-%s' % (dbse, 'DI6-6' )] = """Methanethiol-Hydrogen Chloride Complex (CH3SH-HCl) """
TAGL['%s-%s-dimer' % (dbse, 'DI6-6' )] = """Dimer from Methanethiol-Hydrogen Chloride Complex (CH3SH-HCl) """
TAGL['%s-%s-monoA-CP' % (dbse, 'DI6-6' )] = """Monomer A from Methanethiol-Hydrogen Chloride Complex (CH3SH-HCl) """
TAGL['%s-%s-monoB-CP' % (dbse, 'DI6-6' )] = """Monomer B from Methanethiol-Hydrogen Chloride Complex (CH3SH-HCl) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'DI6-6' )] = """Monomer A from Methanethiol-Hydrogen Chloride Complex (CH3SH-HCl) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'DI6-6' )] = """Monomer B from Methanethiol-Hydrogen Chloride Complex (CH3SH-HCl) """
TAGL['%s-%s' % (dbse, 'HB6-1' )] = """Ammonia Dimer (NH3-NH3) """
TAGL['%s-%s-dimer' % (dbse, 'HB6-1' )] = """Dimer from Ammonia Dimer (NH3-NH3) """
TAGL['%s-%s-monoA-CP' % (dbse, 'HB6-1' )] = """Monomer A from Ammonia Dimer (NH3-NH3) """
TAGL['%s-%s-monoB-CP' % (dbse, 'HB6-1' )] = """Monomer B from Ammonia Dimer (NH3-NH3) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'HB6-1' )] = """Monomer A from Ammonia Dimer (NH3-NH3) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'HB6-1' )] = """Monomer B from Ammonia Dimer (NH3-NH3) """
TAGL['%s-%s' % (dbse, 'HB6-2' )] = """Hydrogen Fluoride Dimer (HF-HF) """
TAGL['%s-%s-dimer' % (dbse, 'HB6-2' )] = """Dimer from Hydrogen Fluoride Dimer (HF-HF) """
TAGL['%s-%s-monoA-CP' % (dbse, 'HB6-2' )] = """Monomer A from Hydrogen Fluoride Dimer (HF-HF) """
TAGL['%s-%s-monoB-CP' % (dbse, 'HB6-2' )] = """Monomer B from Hydrogen Fluoride Dimer (HF-HF) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'HB6-2' )] = """Monomer A from Hydrogen Fluoride Dimer (HF-HF) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'HB6-2' )] = """Monomer B from Hydrogen Fluoride Dimer (HF-HF) """
TAGL['%s-%s' % (dbse, 'HB6-3' )] = """Water Dimer (H2O-H2O) """
TAGL['%s-%s-dimer' % (dbse, 'HB6-3' )] = """Dimer from Water Dimer (H2O-H2O) """
TAGL['%s-%s-monoA-CP' % (dbse, 'HB6-3' )] = """Monomer A from Water Dimer (H2O-H2O) """
TAGL['%s-%s-monoB-CP' % (dbse, 'HB6-3' )] = """Monomer B from Water Dimer (H2O-H2O) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'HB6-3' )] = """Monomer A from Water Dimer (H2O-H2O) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'HB6-3' )] = """Monomer B from Water Dimer (H2O-H2O) """
TAGL['%s-%s' % (dbse, 'HB6-4' )] = """Ammonia-Water Complex (NH3-H2O) """
TAGL['%s-%s-dimer' % (dbse, 'HB6-4' )] = """Dimer from Ammonia-Water Complex (NH3-H2O) """
TAGL['%s-%s-monoA-CP' % (dbse, 'HB6-4' )] = """Monomer A from Ammonia-Water Complex (NH3-H2O) """
TAGL['%s-%s-monoB-CP' % (dbse, 'HB6-4' )] = """Monomer B from Ammonia-Water Complex (NH3-H2O) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'HB6-4' )] = """Monomer A from Ammonia-Water Complex (NH3-H2O) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'HB6-4' )] = """Monomer B from Ammonia-Water Complex (NH3-H2O) """
TAGL['%s-%s' % (dbse, 'HB6-5' )] = """Formamide Dimer (HCONH2-HCONH2) """
TAGL['%s-%s-dimer' % (dbse, 'HB6-5' )] = """Dimer from Formamide Dimer (HCONH2-HCONH2) """
TAGL['%s-%s-monoA-CP' % (dbse, 'HB6-5' )] = """Monomer A from Formamide Dimer (HCONH2-HCONH2) """
TAGL['%s-%s-monoB-CP' % (dbse, 'HB6-5' )] = """Monomer B from Formamide Dimer (HCONH2-HCONH2) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'HB6-5' )] = """Monomer A from Formamide Dimer (HCONH2-HCONH2) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'HB6-5' )] = """Monomer B from Formamide Dimer (HCONH2-HCONH2) """
TAGL['%s-%s' % (dbse, 'HB6-6' )] = """Formic Acid Dimer (HCOOH-HCOOH) """
TAGL['%s-%s-dimer' % (dbse, 'HB6-6' )] = """Dimer from Formic Acid Dimer (HCOOH-HCOOH) """
TAGL['%s-%s-monoA-CP' % (dbse, 'HB6-6' )] = """Monomer A from Formic Acid Dimer (HCOOH-HCOOH) """
TAGL['%s-%s-monoB-CP' % (dbse, 'HB6-6' )] = """Monomer B from Formic Acid Dimer (HCOOH-HCOOH) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'HB6-6' )] = """Monomer A from Formic Acid Dimer (HCOOH-HCOOH) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'HB6-6' )] = """Monomer B from Formic Acid Dimer (HCOOH-HCOOH) """
TAGL['%s-%s' % (dbse, 'PPS5-1' )] = """Ethine Dimer (HCCH-HCCH) """
TAGL['%s-%s-dimer' % (dbse, 'PPS5-1' )] = """Dimer from Ethine Dimer (HCCH-HCCH) """
TAGL['%s-%s-monoA-CP' % (dbse, 'PPS5-1' )] = """Monomer A from Ethine Dimer (HCCH-HCCH) """
TAGL['%s-%s-monoB-CP' % (dbse, 'PPS5-1' )] = """Monomer B from Ethine Dimer (HCCH-HCCH) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'PPS5-1' )] = """Monomer A from Ethine Dimer (HCCH-HCCH) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'PPS5-1' )] = """Monomer B from Ethine Dimer (HCCH-HCCH) """
TAGL['%s-%s' % (dbse, 'PPS5-2' )] = """Ethene Dimer (C2H4-C2H4) """
TAGL['%s-%s-dimer' % (dbse, 'PPS5-2' )] = """Dimer from Ethene Dimer (C2H4-C2H4) """
TAGL['%s-%s-monoA-CP' % (dbse, 'PPS5-2' )] = """Monomer A from Ethene Dimer (C2H4-C2H4) """
TAGL['%s-%s-monoB-CP' % (dbse, 'PPS5-2' )] = """Monomer B from Ethene Dimer (C2H4-C2H4) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'PPS5-2' )] = """Monomer A from Ethene Dimer (C2H4-C2H4) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'PPS5-2' )] = """Monomer B from Ethene Dimer (C2H4-C2H4) """
TAGL['%s-%s' % (dbse, 'PPS5-3' )] = """Sandwich Benzene Dimer (BzBz_S) """
TAGL['%s-%s-dimer' % (dbse, 'PPS5-3' )] = """Dimer from Sandwich Benzene Dimer (BzBz_S) """
TAGL['%s-%s-monoA-CP' % (dbse, 'PPS5-3' )] = """Monomer A from Sandwich Benzene Dimer (BzBz_S) """
TAGL['%s-%s-monoB-CP' % (dbse, 'PPS5-3' )] = """Monomer B from Sandwich Benzene Dimer (BzBz_S) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'PPS5-3' )] = """Monomer A from Sandwich Benzene Dimer (BzBz_S) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'PPS5-3' )] = """Monomer B from Sandwich Benzene Dimer (BzBz_S) """
TAGL['%s-%s' % (dbse, 'PPS5-4' )] = """T-Shaped Benzene Dimer (BzBz_T) """
TAGL['%s-%s-dimer' % (dbse, 'PPS5-4' )] = """Dimer from T-Shaped Benzene Dimer (BzBz_T) """
TAGL['%s-%s-monoA-CP' % (dbse, 'PPS5-4' )] = """Monomer A from T-Shaped Benzene Dimer (BzBz_T) """
TAGL['%s-%s-monoB-CP' % (dbse, 'PPS5-4' )] = """Monomer B from T-Shaped Benzene Dimer (BzBz_T) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'PPS5-4' )] = """Monomer A from T-Shaped Benzene Dimer (BzBz_T) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'PPS5-4' )] = """Monomer B from T-Shaped Benzene Dimer (BzBz_T) """
TAGL['%s-%s' % (dbse, 'PPS5-5' )] = """Parallel-Displaced Benzene Dimer (BzBz_PD) """
TAGL['%s-%s-dimer' % (dbse, 'PPS5-5' )] = """Dimer from Parallel-Displaced Benzene Dimer (BzBz_PD) """
TAGL['%s-%s-monoA-CP' % (dbse, 'PPS5-5' )] = """Monomer A from Parallel-Displaced Benzene Dimer (BzBz_PD) """
TAGL['%s-%s-monoB-CP' % (dbse, 'PPS5-5' )] = """Monomer B from Parallel-Displaced Benzene Dimer (BzBz_PD) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'PPS5-5' )] = """Monomer A from Parallel-Displaced Benzene Dimer (BzBz_PD) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'PPS5-5' )] = """Monomer B from Parallel-Displaced Benzene Dimer (BzBz_PD) """
TAGL['%s-%s' % (dbse, 'WI7-1' )] = """Helium-Neon Complex (He-Ne) """
TAGL['%s-%s-dimer' % (dbse, 'WI7-1' )] = """Dimer from Helium-Neon Complex (He-Ne) """
TAGL['%s-%s-monoA-CP' % (dbse, 'WI7-1' )] = """Monomer A from Helium-Neon Complex (He-Ne) """
TAGL['%s-%s-monoB-CP' % (dbse, 'WI7-1' )] = """Monomer B from Helium-Neon Complex (He-Ne) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'WI7-1' )] = """Monomer A from Helium-Neon Complex (He-Ne) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'WI7-1' )] = """Monomer B from Helium-Neon Complex (He-Ne) """
TAGL['%s-%s' % (dbse, 'WI7-2' )] = """Helium-Argon Complex (He-Ar) """
TAGL['%s-%s-dimer' % (dbse, 'WI7-2' )] = """Dimer from Helium-Argon Complex (He-Ar) """
TAGL['%s-%s-monoA-CP' % (dbse, 'WI7-2' )] = """Monomer A from Helium-Argon Complex (He-Ar) """
TAGL['%s-%s-monoB-CP' % (dbse, 'WI7-2' )] = """Monomer B from Helium-Argon Complex (He-Ar) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'WI7-2' )] = """Monomer A from Helium-Argon Complex (He-Ar) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'WI7-2' )] = """Monomer B from Helium-Argon Complex (He-Ar) """
TAGL['%s-%s' % (dbse, 'WI7-3' )] = """Neon Dimer (Ne-Ne) """
TAGL['%s-%s-dimer' % (dbse, 'WI7-3' )] = """Dimer from Neon Dimer (Ne-Ne) """
TAGL['%s-%s-monoA-CP' % (dbse, 'WI7-3' )] = """Monomer A from Neon Dimer (Ne-Ne) """
TAGL['%s-%s-monoB-CP' % (dbse, 'WI7-3' )] = """Monomer B from Neon Dimer (Ne-Ne) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'WI7-3' )] = """Monomer A from Neon Dimer (Ne-Ne) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'WI7-3' )] = """Monomer B from Neon Dimer (Ne-Ne) """
TAGL['%s-%s' % (dbse, 'WI7-4' )] = """Neon-Argon Complex (Ne-Ar) """
TAGL['%s-%s-dimer' % (dbse, 'WI7-4' )] = """Dimer from Neon-Argon Complex (Ne-Ar) """
TAGL['%s-%s-monoA-CP' % (dbse, 'WI7-4' )] = """Monomer A from Neon-Argon Complex (Ne-Ar) """
TAGL['%s-%s-monoB-CP' % (dbse, 'WI7-4' )] = """Monomer B from Neon-Argon Complex (Ne-Ar) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'WI7-4' )] = """Monomer A from Neon-Argon Complex (Ne-Ar) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'WI7-4' )] = """Monomer B from Neon-Argon Complex (Ne-Ar) """
TAGL['%s-%s' % (dbse, 'WI7-5' )] = """Methane-Neon Complex (CH4-Ne) """
TAGL['%s-%s-dimer' % (dbse, 'WI7-5' )] = """Dimer from Methane-Neon Complex (CH4-Ne) """
TAGL['%s-%s-monoA-CP' % (dbse, 'WI7-5' )] = """Monomer A from Methane-Neon Complex (CH4-Ne) """
TAGL['%s-%s-monoB-CP' % (dbse, 'WI7-5' )] = """Monomer B from Methane-Neon Complex (CH4-Ne) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'WI7-5' )] = """Monomer A from Methane-Neon Complex (CH4-Ne) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'WI7-5' )] = """Monomer B from Methane-Neon Complex (CH4-Ne) """
TAGL['%s-%s' % (dbse, 'WI7-6' )] = """Benzene-Neon Complex (Bz-Ne) """
TAGL['%s-%s-dimer' % (dbse, 'WI7-6' )] = """Dimer from Benzene-Neon Complex (Bz-Ne) """
TAGL['%s-%s-monoA-CP' % (dbse, 'WI7-6' )] = """Monomer A from Benzene-Neon Complex (Bz-Ne) """
TAGL['%s-%s-monoB-CP' % (dbse, 'WI7-6' )] = """Monomer B from Benzene-Neon Complex (Bz-Ne) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'WI7-6' )] = """Monomer A from Benzene-Neon Complex (Bz-Ne) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'WI7-6' )] = """Monomer B from Benzene-Neon Complex (Bz-Ne) """
TAGL['%s-%s' % (dbse, 'WI7-7' )] = """Methane Dimer (CH4-CH4) """
TAGL['%s-%s-dimer' % (dbse, 'WI7-7' )] = """Dimer from Methane Dimer (CH4-CH4) """
TAGL['%s-%s-monoA-CP' % (dbse, 'WI7-7' )] = """Monomer A from Methane Dimer (CH4-CH4) """
TAGL['%s-%s-monoB-CP' % (dbse, 'WI7-7' )] = """Monomer B from Methane Dimer (CH4-CH4) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'WI7-7' )] = """Monomer A from Methane Dimer (CH4-CH4) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'WI7-7' )] = """Monomer B from Methane Dimer (CH4-CH4) """
TAGL['%s-%s-mono-RLX' % (dbse, 'HCCH' )] = """Ethine Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'C2H4' )] = """Ethene Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'Bz' )] = """Benzene Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'CH3Cl' )] = """Methyl Chloride Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'CH3SH' )] = """Methanethiol Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'CH4' )] = """Methane Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'F2' )] = """Fluorine Molecule Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'H2O' )] = """Water Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'H2S' )] = """Hydrogen Sulfide Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'HCl' )] = """Hydrogen Chloride Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'HCN' )] = """Hydrogen Cyanide Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'HCONH2' )] = """Formamide Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'HCOOH' )] = """Formic Acid Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'He' )] = """Helium Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'Ne' )] = """Neon Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'Ar' )] = """Argon Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'HF' )] = """Hydrogen Fluoride Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'NH3' )] = """Ammonia Relaxed Monomer """
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-%s' % (dbse, 'CT7-1', 'dimer')] = qcdb.Molecule("""
0 1
C 0.00000000 -2.19285000 -0.66839500
C -0.00000000 -2.19286000 0.66839500
H -0.92518700 -2.19231600 -1.23398200
H 0.92518700 -2.19232500 -1.23398300
H -0.92518700 -2.19232000 1.23398200
H 0.92518700 -2.19231100 1.23398200
--
0 1
F 0.00000000 0.78568800 0.00000000
F 0.00000000 2.20564800 0.00000100
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'CT7-2', 'dimer')] = qcdb.Molecule("""
0 1
N 0.00000000 0.00000000 -2.14998500
H 0.00000000 0.93965200 -2.53440100
H 0.81376200 -0.46982600 -2.53440100
H -0.81376200 -0.46982600 -2.53440100
--
0 1
F 0.00000000 0.00000000 0.54577100
F 0.00000000 0.00000000 1.97124000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'CT7-3', 'dimer')] = qcdb.Molecule("""
0 1
H 0.00000000 1.67189100 -2.21255500
C 0.00000000 0.60529300 -2.19955900
C 0.00000000 -0.60529300 -2.19955900
H 0.00000000 -1.67189100 -2.21255500
--
0 1
Cl 0.00000000 -0.00000000 0.61188000
F 0.00000000 -0.00000000 2.26865100
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'CT7-4', 'dimer')] = qcdb.Molecule("""
0 1
N 0.00000000 0.00000000 -1.83951900
C 0.00000000 0.00000000 -2.99573100
H 0.00000000 0.00000000 -4.06502600
--
0 1
F -0.00000000 0.00000000 2.42592000
Cl -0.00000000 0.00000000 0.76957400
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'CT7-5', 'dimer')] = qcdb.Molecule("""
0 1
N 0.00000000 0.00000000 -2.83845100
H 0.00000000 0.94268700 -3.21538300
H 0.81639100 -0.47134300 -3.21538300
H -0.81639100 -0.47134300 -3.21538300
--
0 1
Cl 0.00000000 0.00000000 -0.15004400
Cl 0.00000000 0.00000000 1.88623900
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'CT7-6', 'dimer')] = qcdb.Molecule("""
0 1
O 2.23981900 0.00002700 -0.08823100
H 2.60088700 0.76196300 0.37705500
H 2.60108700 -0.76172700 0.37719400
--
0 1
Cl -0.31586800 -0.00006600 -0.01691400
F -1.97230800 0.00007400 0.02657000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'CT7-7', 'dimer')] = qcdb.Molecule("""
0 1
N 0.00000000 0.00000000 -2.05789900
H 0.00000000 0.94960500 -2.41448800
H 0.82238200 -0.47480300 -2.41448800
H -0.82238200 -0.47480300 -2.41448800
--
0 1
Cl 0.00000000 0.00000000 0.24385500
F 0.00000000 0.00000000 1.94480300
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'DI6-1', 'dimer')] = qcdb.Molecule("""
0 1
S -2.03099600 0.10323300 -0.00078200
H -1.93402000 -0.81846200 0.96967600
H -1.94045000 -0.83661600 -0.95429900
--
0 1
S 2.07983800 -0.08511200 0.00018100
H 2.33915400 1.23101900 -0.00221400
H 0.75384800 0.13412100 -0.00353700
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'DI6-2', 'dimer')] = qcdb.Molecule("""
0 1
Cl 1.86082400 -0.06541100 -0.00006800
H 1.75394100 1.21098100 0.00034100
--
0 1
Cl -1.92526600 0.00557100 -0.00009700
H -0.65842700 -0.19370300 0.00247600
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'DI6-3', 'dimer')] = qcdb.Molecule("""
0 1
Cl -1.91163600 -0.00001100 0.00349800
H -0.62731700 -0.00005800 -0.10405100
--
0 1
S 1.84252900 0.00001300 -0.10154300
H 1.82277900 -0.96181000 0.83465000
H 1.82187700 0.96186000 0.83462200
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'DI6-4', 'dimer')] = qcdb.Molecule("""
0 1
C -1.49512800 1.12579900 -0.00000200
Cl -1.40247600 -0.66254400 0.00013900
H -0.48106900 1.51836100 -0.00121600
H -2.02718100 1.43516300 0.89531200
H -2.02924000 1.43492300 -0.89417200
--
0 1
Cl 2.13960800 0.03729800 -0.00013800
H 0.97700200 -0.51405400 0.00007200
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'DI6-5', 'dimer')] = qcdb.Molecule("""
0 1
C 1.99644300 0.05718500 -0.00648300
N 2.98021800 0.65834500 0.10945000
H 1.07234100 -0.48518900 -0.10641600
--
0 1
S -1.51439900 -0.79999400 -0.11697900
C -1.57014400 1.01297400 0.01160700
H -1.55457900 -1.05260000 1.20049200
H -1.54556000 1.39238100 -1.01019600
H -0.70866100 1.40255300 0.55309700
H -2.49314500 1.33992300 0.48665400
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'DI6-6', 'dimer')] = qcdb.Molecule("""
0 1
C -1.44764800 1.15564900 0.01851300
S -1.41459500 -0.65984600 -0.08354400
H -1.46628400 1.51681600 -1.00988000
H -0.55297100 1.53526500 0.51001200
H -2.34423900 1.49773300 0.53186300
H -1.37736100 -0.89092100 1.23821400
--
0 1
Cl 2.12576600 0.02408100 0.00315600
H 0.92223800 -0.44463500 -0.09824700
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HB6-1', 'dimer')] = qcdb.Molecule("""
0 1
N 1.57522500 0.00008500 -0.04260700
H 2.13110800 0.81394900 -0.28661400
H 1.49645000 -0.00293600 0.97025700
H 2.13172100 -0.81189200 -0.29145300
--
0 1
N -1.68824500 0.00008300 0.10484800
H -2.12640300 -0.81268000 -0.31731000
H -2.12744200 0.81184200 -0.31815800
H -0.71429700 0.00054300 -0.19240700
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HB6-2', 'dimer')] = qcdb.Molecule("""
0 1
F 1.32373600 -0.09022600 -0.00000700
H 1.74043700 0.73339000 0.00001300
--
0 1
F -1.45719500 0.01925700 -0.00001100
H -0.53931000 -0.09466400 0.00014500
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HB6-3', 'dimer')] = qcdb.Molecule("""
0 1
O 1.53175000 0.00592200 -0.12088000
H 0.57596800 -0.00524900 0.02496600
H 1.90624900 -0.03756100 0.76321800
--
0 1
O -1.39622600 -0.00499000 0.10676600
H -1.78937200 -0.74228300 -0.37100900
H -1.77703700 0.77763800 -0.30426400
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HB6-4', 'dimer')] = qcdb.Molecule("""
0 1
N -1.39559100 -0.02156400 0.00003700
H -1.62981100 0.96109600 -0.10622400
H -1.86276700 -0.51254400 -0.75597400
H -1.83354700 -0.33077000 0.86230700
--
0 1
O 1.56850100 0.10589200 0.00000500
H 0.60673600 -0.03396200 -0.00062800
H 1.94051900 -0.78000500 0.00022200
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HB6-5', 'dimer')] = qcdb.Molecule("""
0 1
O -1.14108700 1.44521200 0.00000000
C -0.06175400 2.03094700 0.00000000
H -0.01368700 3.13016900 0.00000000
N 1.14108700 1.43587700 0.00000000
H 1.21768600 0.41652700 0.00000000
H 1.97144600 2.00209500 0.00000000
--
0 1
O 1.14108700 -1.44521200 0.00000000
C 0.06175400 -2.03094700 0.00000000
H 0.01368700 -3.13016900 0.00000000
N -1.14108700 -1.43587700 0.00000000
H -1.21768600 -0.41652700 0.00000000
H -1.97144600 -2.00209500 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HB6-6', 'dimer')] = qcdb.Molecule("""
0 1
C -0.12023400 1.91407000 0.00000000
H -0.16729500 3.00701800 0.00000000
O -1.12185700 1.22098200 0.00000000
O 1.12185700 1.48048900 0.00000000
H 1.12758200 0.48902400 0.00000000
--
0 1
O 1.12185700 -1.22098200 0.00000000
C 0.12023400 -1.91407000 0.00000000
O -1.12185700 -1.48048900 0.00000000
H -1.12758200 -0.48902400 0.00000000
H 0.16729500 -3.00701800 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'PPS5-1', 'dimer')] = qcdb.Molecule("""
0 1
C -0.41254600 1.67817500 0.00000000
C 0.41254600 2.56162700 0.00000000
H -1.13202600 0.89080900 0.00000000
H 1.13465100 3.34577000 0.00000000
--
0 1
C 0.41254600 -1.67817500 0.00000000
C -0.41254600 -2.56162700 0.00000000
H 1.13202600 -0.89080900 0.00000000
H -1.13465100 -3.34577000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'PPS5-2', 'dimer')] = qcdb.Molecule("""
0 1
C 1.85776800 0.47280300 0.47242500
C 1.85776800 -0.47280300 -0.47242500
H 0.93377200 0.87468800 0.87406300
H 2.78381800 0.87170900 0.87155600
H 2.78381800 -0.87170900 -0.87155600
H 0.93377200 -0.87468800 -0.87406300
--
0 1
C -1.85776800 0.47280300 -0.47242500
C -1.85776800 -0.47280300 0.47242500
H -2.78381800 0.87170900 -0.87155600
H -0.93377200 0.87468800 -0.87406300
H -0.93377200 -0.87468800 0.87406300
H -2.78381800 -0.87170900 0.87155600
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'PPS5-3', 'dimer')] = qcdb.Molecule("""
0 1
C 0.00000000 1.95000000 1.39150000
H 0.00000000 1.95000000 2.47150000
C 1.20507435 1.95000000 0.69575000
H 2.14038179 1.95000000 1.23575000
C 1.20507435 1.95000000 -0.69575000
H 2.14038179 1.95000000 -1.23575000
C -0.00000000 1.95000000 -1.39150000
H -0.00000000 1.95000000 -2.47150000
C -1.20507435 1.95000000 -0.69575000
H -2.14038179 1.95000000 -1.23575000
C -1.20507435 1.95000000 0.69575000
H -2.14038179 1.95000000 1.23575000
--
0 1
C -1.20507435 -1.95000000 -0.69575000
H -2.14038179 -1.95000000 -1.23575000
C -0.00000000 -1.95000000 -1.39150000
H -0.00000000 -1.95000000 -2.47150000
C 1.20507435 -1.95000000 -0.69575000
H 2.14038179 -1.95000000 -1.23575000
C 1.20507435 -1.95000000 0.69575000
H 2.14038179 -1.95000000 1.23575000
C -0.00000000 -1.95000000 1.39150000
H -0.00000000 -1.95000000 2.47150000
C -1.20507435 -1.95000000 0.69575000
H -2.14038179 -1.95000000 1.23575000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'PPS5-4', 'dimer')] = qcdb.Molecule("""
0 1
C 1.39150000 -0.00000000 2.49575000
H 2.47150000 -0.00000000 2.49575000
C 0.69575000 1.20507435 2.49575000
H 1.23575000 2.14038179 2.49575000
C 0.69575000 -1.20507435 2.49575000
H 1.23575000 -2.14038179 2.49575000
C -0.69575000 1.20507435 2.49575000
H -1.23575000 2.14038179 2.49575000
C -0.69575000 -1.20507435 2.49575000
H -1.23575000 -2.14038179 2.49575000
C -1.39150000 -0.00000000 2.49575000
H -2.47150000 -0.00000000 2.49575000
--
0 1
C 0.00000000 0.00000000 -1.10425000
C -0.00000000 -1.20507435 -1.80000000
H -0.00000000 -2.14038179 -1.26000000
H 0.00000000 0.00000000 -0.02425000
C -0.00000000 -1.20507435 -3.19150000
H -0.00000000 -2.14038179 -3.73150000
C -0.00000000 0.00000000 -3.88725000
H -0.00000000 0.00000000 -4.96725000
C -0.00000000 1.20507435 -3.19150000
H 0.00000000 2.14038179 -3.73150000
C 0.00000000 1.20507435 -1.80000000
H 0.00000000 2.14038179 -1.26000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'PPS5-5', 'dimer')] = qcdb.Molecule("""
0 1
C -0.80000000 1.80000000 1.39150000
H -0.80000000 1.80000000 2.47150000
C 0.40507435 1.80000000 0.69575000
H 1.34038179 1.80000000 1.23575000
C -2.00507435 1.80000000 0.69575000
H -2.94038179 1.80000000 1.23575000
C 0.40507435 1.80000000 -0.69575000
H 1.34038179 1.80000000 -1.23575000
C -2.00507435 1.80000000 -0.69575000
H -2.94038179 1.80000000 -1.23575000
C -0.80000000 1.80000000 -1.39150000
H -0.80000000 1.80000000 -2.47150000
--
0 1
C 0.80000000 -1.80000000 -1.39150000
C 2.00507435 -1.80000000 -0.69575000
H 2.94038179 -1.80000000 -1.23575000
H 0.80000000 -1.80000000 -2.47150000
C 2.00507435 -1.80000000 0.69575000
H 2.94038179 -1.80000000 1.23575000
C 0.80000000 -1.80000000 1.39150000
H 0.80000000 -1.80000000 2.47150000
C -0.40507435 -1.80000000 0.69575000
H -1.34038179 -1.80000000 1.23575000
C -0.40507435 -1.80000000 -0.69575000
H -1.34038179 -1.80000000 -1.23575000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'WI7-1', 'dimer')] = qcdb.Molecule("""
0 1
He 0.00000000 0.00000000 0.00000000
--
0 1
Ne 3.03100000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'WI7-2', 'dimer')] = qcdb.Molecule("""
0 1
He 0.00000000 0.00000000 0.00000000
--
0 1
Ar 3.48000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'WI7-3', 'dimer')] = qcdb.Molecule("""
0 1
Ne 0.00000000 0.00000000 0.00000000
--
0 1
Ne 3.09100000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'WI7-4', 'dimer')] = qcdb.Molecule("""
0 1
Ne 0.00000000 0.00000000 0.00000000
--
0 1
Ar 3.48900000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'WI7-5', 'dimer')] = qcdb.Molecule("""
0 1
Ne 0.00070500 -0.03504900 -1.74260200
--
0 1
C -0.00070500 0.03504800 1.74257700
H -0.00115700 0.05752400 2.83186300
H -0.02121400 1.05430800 1.35836800
H -0.87960700 -0.50371400 1.39016200
H 0.89915700 -0.46792400 1.39016200
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'WI7-6', 'dimer')] = qcdb.Molecule("""
0 1
C 0.00000000 1.39566300 -0.61935100
C -1.20868000 0.69783100 -0.61935100
C -1.20868000 -0.69783100 -0.61935100
C -0.00000000 -1.39566300 -0.61935100
C 1.20868000 -0.69783100 -0.61935100
C 1.20868000 0.69783100 -0.61935100
H 0.00000000 2.48003700 -0.61754900
H -2.14777500 1.24001800 -0.61754900
H -2.14777500 -1.24001800 -0.61754900
H -0.00000000 -2.48003700 -0.61754900
H 2.14777500 -1.24001800 -0.61754900
H 2.14777500 1.24001800 -0.61754900
--
0 1
Ne 0.00000000 0.00000000 2.60019400
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'WI7-7', 'dimer')] = qcdb.Molecule("""
0 1
C -0.00000000 0.00000000 1.80727900
H -0.00000000 1.02664300 1.44240000
H -0.88909900 -0.51332200 1.44240000
H -0.00000000 0.00000000 2.89684300
H 0.88909900 -0.51332200 1.44240000
--
0 1
C -0.00000000 -0.00000000 -1.80727900
H 0.88909900 0.51332200 -1.44240000
H -0.00000000 -0.00000000 -2.89684300
H -0.88909900 0.51332200 -1.44240000
H -0.00000000 -1.02664300 -1.44240000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HCCH', 'mono-RLX')] = qcdb.Molecule("""
0 1
C 0.00000400 -0.60420400 0.00000000
C 0.00000400 0.60419800 0.00000000
H 0.00679500 -1.67012800 0.00000000
H -0.00683900 1.67016300 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'C2H4', 'mono-RLX')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 0.66807800
C 0.00000000 0.00000000 -0.66807800
H 0.00000000 0.92453300 1.23491900
H 0.00000000 -0.92453300 1.23491900
H 0.00000000 0.92453300 -1.23491900
H 0.00000000 -0.92453300 -1.23491900
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'Bz', 'mono-RLX')] = qcdb.Molecule("""
0 1
C 0.00000000 1.39567100 -0.61715800
C -1.20868600 0.69783500 -0.61715800
C -1.20868600 -0.69783500 -0.61715800
C 0.00000000 -1.39567100 -0.61715800
C 1.20868600 -0.69783500 -0.61715800
C 1.20868600 0.69783500 -0.61715800
H 0.00000000 2.47987600 -0.61699800
H -2.14763600 1.23993800 -0.61699800
H -2.14763600 -1.23993800 -0.61699800
H 0.00000000 -2.47987600 -0.61699800
H 2.14763600 -1.23993800 -0.61699800
H 2.14763600 1.23993800 -0.61699800
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'CH3Cl', 'mono-RLX')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 -1.12626800
Cl 0.00000000 0.00000000 0.65820600
H 0.00000000 1.03097000 -1.47059600
H 0.89284600 -0.51548500 -1.47059600
H -0.89284600 -0.51548500 -1.47059600
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'CH3SH', 'mono-RLX')] = qcdb.Molecule("""
0 1
C -0.04788200 1.15150600 0.00000000
S -0.04788200 -0.66495900 0.00000000
H 1.28433700 -0.82104700 0.00000000
H -1.09471300 1.45662100 0.00000000
H 0.43188500 1.54736900 0.89371000
H 0.43188500 1.54736900 -0.89371000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'CH4', 'mono-RLX')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 -1.08947061 0.00000000
H -1.02716274 0.36315688 0.00000000
H 0.34238759 0.36315688 0.96841832
H 0.34238759 0.36315688 -0.96841832
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'F2', 'mono-RLX')] = qcdb.Molecule("""
0 1
F 0.00000000 0.00000000 1.41423000
F 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'H2O', 'mono-RLX')] = qcdb.Molecule("""
0 1
O 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 0.96183119
H 0.00000000 0.93357861 -0.23140921
O
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'H2S', 'mono-RLX')] = qcdb.Molecule("""
0 1
S 0.00000000 0.00000000 0.10389400
H 0.00000000 0.96116200 -0.83115300
H 0.00000000 -0.96116200 -0.83115300
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HCl', 'mono-RLX')] = qcdb.Molecule("""
0 1
Cl 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.27907275
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HCN', 'mono-RLX')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 -0.50103200
N 0.00000000 0.00000000 0.65706900
H 0.00000000 0.00000000 -1.57005300
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HCONH2', 'mono-RLX')] = qcdb.Molecule("""
0 1
C -0.16068500 0.38839900 -0.00053800
O -1.19570500 -0.24639200 0.00018900
N 1.08330000 -0.15841900 -0.00029100
H -0.13991800 1.49035000 0.00139300
H 1.18225800 -1.16041500 0.00111600
H 1.90431600 0.41973500 0.00124500
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HCOOH', 'mono-RLX')] = qcdb.Molecule("""
0 1
C -0.13470200 0.40125100 -0.00024900
O -1.13426200 -0.26458200 0.00006900
O 1.11868000 -0.09107500 0.00005600
H -0.10761700 1.49546500 0.00051300
H 1.04048400 -1.05771400 -0.00002000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'He', 'mono-RLX')] = qcdb.Molecule("""
0 1
He 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'Ne', 'mono-RLX')] = qcdb.Molecule("""
0 1
Ne 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'Ar', 'mono-RLX')] = qcdb.Molecule("""
0 1
Ar 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HF', 'mono-RLX')] = qcdb.Molecule("""
0 1
F 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 0.92073754
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'NH3', 'mono-RLX')] = qcdb.Molecule("""
0 1
N 0.00000000 0.00000000 0.11501300
H 0.00000000 0.93975200 -0.26836400
H 0.81385000 -0.46987600 -0.26836400
H -0.81385000 -0.46987600 -0.26836400
units angstrom
""")
# <<< Derived Geometry Strings >>>
for rxn in HRXN:
GEOS['%s-%s-monoA-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1)
GEOS['%s-%s-monoB-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2)
GEOS['%s-%s-monoA-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1, 2)
GEOS['%s-%s-monoB-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2, 1)
|
kannon92/psi4
|
psi4/share/psi4/databases/NCB31.py
|
Python
|
gpl-2.0
| 53,565
|
[
"Psi4"
] |
3f9d9d5b0efafd833c161dee775977dfce13ec8927bb6e773e7bbd7de705231b
|
"""PyQt v5 widget for VTK."""
__all__ = ['QVTKRenderWindowInteractor']
|
mspark93/VTK
|
Wrapping/Python/vtk/qt5/__init__.py
|
Python
|
bsd-3-clause
| 72
|
[
"VTK"
] |
bab0d4dca7b81f22afeccaff1d521847d048a069b3cbf99858d2dee4fad53bc7
|
"""Guess the MIME type of a file.
This module defines two useful functions:
guess_type(url, strict=1) -- guess the MIME type and encoding of a URL.
guess_extension(type, strict=1) -- guess the extension for a given MIME type.
It also contains the following, for tuning the behavior:
Data:
knownfiles -- list of files to parse
inited -- flag set when init() has been called
suffix_map -- dictionary mapping suffixes to suffixes
encodings_map -- dictionary mapping suffixes to encodings
types_map -- dictionary mapping suffixes to types
Functions:
init([files]) -- parse a list of files, default knownfiles (on Windows, the
default values are taken from the registry)
read_mime_types(file) -- parse one file, return a dictionary or None
"""
import os
import sys
import posixpath
import urllib
try:
import _winreg
except ImportError:
_winreg = None
__all__ = [
"guess_type","guess_extension","guess_all_extensions",
"add_type","read_mime_types","init"
]
knownfiles = [
"/etc/mime.types",
"/etc/httpd/mime.types", # Mac OS X
"/etc/httpd/conf/mime.types", # Apache
"/etc/apache/mime.types", # Apache 1
"/etc/apache2/mime.types", # Apache 2
"/usr/local/etc/httpd/conf/mime.types",
"/usr/local/lib/netscape/mime.types",
"/usr/local/etc/httpd/conf/mime.types", # Apache 1.2
"/usr/local/etc/mime.types", # Apache 1.3
]
inited = False
_db = None
class MimeTypes:
"""MIME-types datastore.
This datastore can handle information from mime.types-style files
and supports basic determination of MIME type from a filename or
URL, and can guess a reasonable extension given a MIME type.
"""
def __init__(self, filenames=(), strict=True):
if not inited:
init()
self.encodings_map = encodings_map.copy()
self.suffix_map = suffix_map.copy()
self.types_map = ({}, {}) # dict for (non-strict, strict)
self.types_map_inv = ({}, {})
for (ext, type) in types_map.items():
self.add_type(type, ext, True)
for (ext, type) in common_types.items():
self.add_type(type, ext, False)
for name in filenames:
self.read(name, strict)
def add_type(self, type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
self.types_map[strict][ext] = type
exts = self.types_map_inv[strict].setdefault(type, [])
if ext not in exts:
exts.append(ext)
def guess_type(self, url, strict=True):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if
the type can't be guessed (no or unknown suffix) or a string
of the form type/subtype, usable for a MIME Content-type
header; and encoding is None for no encoding or the name of
the program used to encode (e.g. compress or gzip). The
mappings are table driven. Encoding suffixes are case
sensitive; type suffixes are first tried case sensitive, then
case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all
mapped to '.tar.gz'. (This is table-driven too, using the
dictionary suffix_map.)
Optional `strict' argument when False adds a bunch of commonly found,
but non-standard types.
"""
scheme, url = urllib.splittype(url)
if scheme == 'data':
# syntax of data URLs:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
# type/subtype defaults to "text/plain"
comma = url.find(',')
if comma < 0:
# bad data URL
return None, None
semi = url.find(';', 0, comma)
if semi >= 0:
type = url[:semi]
else:
type = url[:comma]
if '=' in type or '/' not in type:
type = 'text/plain'
return type, None # never compressed, so encoding is None
base, ext = posixpath.splitext(url)
while ext in self.suffix_map:
base, ext = posixpath.splitext(base + self.suffix_map[ext])
if ext in self.encodings_map:
encoding = self.encodings_map[ext]
base, ext = posixpath.splitext(base)
else:
encoding = None
types_map = self.types_map[True]
if ext in types_map:
return types_map[ext], encoding
elif ext.lower() in types_map:
return types_map[ext.lower()], encoding
elif strict:
return None, encoding
types_map = self.types_map[False]
if ext in types_map:
return types_map[ext], encoding
elif ext.lower() in types_map:
return types_map[ext.lower()], encoding
else:
return None, encoding
def guess_all_extensions(self, type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data stream,
but would be mapped to the MIME type `type' by guess_type().
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
type = type.lower()
extensions = self.types_map_inv[True].get(type, [])
if not strict:
for ext in self.types_map_inv[False].get(type, []):
if ext not in extensions:
extensions.append(ext)
return extensions
def guess_extension(self, type, strict=True):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension,
including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
extensions = self.guess_all_extensions(type, strict)
if not extensions:
return None
return extensions[0]
def read(self, filename, strict=True):
"""
Read a single mime.types-format file, specified by pathname.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
with open(filename) as fp:
self.readfp(fp, strict)
def readfp(self, fp, strict=True):
"""
Read a single mime.types-format file.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
while 1:
line = fp.readline()
if not line:
break
words = line.split()
for i in range(len(words)):
if words[i][0] == '#':
del words[i:]
break
if not words:
continue
type, suffixes = words[0], words[1:]
for suff in suffixes:
self.add_type(type, '.' + suff, strict)
def read_windows_registry(self, strict=True):
"""
Load the MIME types database from Windows registry.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
# Windows only
if not _winreg:
return
def enum_types(mimedb):
i = 0
while True:
try:
ctype = _winreg.EnumKey(mimedb, i)
except EnvironmentError:
break
try:
ctype = ctype.encode(default_encoding) # omit in 3.x!
except UnicodeEncodeError:
pass
else:
yield ctype
i += 1
default_encoding = sys.getdefaultencoding()
with _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT,
r'MIME\Database\Content Type') as mimedb:
for ctype in enum_types(mimedb):
try:
with _winreg.OpenKey(mimedb, ctype) as key:
suffix, datatype = _winreg.QueryValueEx(key,
'Extension')
except EnvironmentError:
continue
if datatype != _winreg.REG_SZ:
continue
try:
suffix = suffix.encode(default_encoding) # omit in 3.x!
except UnicodeEncodeError:
continue
self.add_type(ctype, suffix, strict)
def guess_type(url, strict=True):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if the
type can't be guessed (no or unknown suffix) or a string of the
form type/subtype, usable for a MIME Content-type header; and
encoding is None for no encoding or the name of the program used
to encode (e.g. compress or gzip). The mappings are table
driven. Encoding suffixes are case sensitive; type suffixes are
first tried case sensitive, then case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped
to ".tar.gz". (This is table-driven too, using the dictionary
suffix_map).
Optional `strict' argument when false adds a bunch of commonly found, but
non-standard types.
"""
if _db is None:
init()
return _db.guess_type(url, strict)
def guess_all_extensions(type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_all_extensions(type, strict)
def guess_extension(type, strict=True):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension, including the
leading dot ('.'). The extension is not guaranteed to have been
associated with any particular data stream, but would be mapped to the
MIME type `type' by guess_type(). If no extension can be guessed for
`type', None is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_extension(type, strict)
def add_type(type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
if _db is None:
init()
return _db.add_type(type, ext, strict)
def init(files=None):
global suffix_map, types_map, encodings_map, common_types
global inited, _db
inited = True # so that MimeTypes.__init__() doesn't call us again
db = MimeTypes()
if files is None:
if _winreg:
db.read_windows_registry()
files = knownfiles
for file in files:
if os.path.isfile(file):
db.read(file)
encodings_map = db.encodings_map
suffix_map = db.suffix_map
types_map = db.types_map[True]
common_types = db.types_map[False]
# Make the DB a global variable now that it is fully initialized
_db = db
def read_mime_types(file):
try:
f = open(file)
except IOError:
return None
db = MimeTypes()
db.readfp(f, True)
return db.types_map[True]
def _default_mime_types():
global suffix_map
global encodings_map
global types_map
global common_types
suffix_map = {
'.tgz': '.tar.gz',
'.taz': '.tar.gz',
'.tz': '.tar.gz',
'.tbz2': '.tar.bz2',
'.txz': '.tar.xz',
}
encodings_map = {
'.gz': 'gzip',
'.Z': 'compress',
'.bz2': 'bzip2',
'.xz': 'xz',
}
# Before adding new types, make sure they are either registered with IANA,
# at http://www.isi.edu/in-notes/iana/assignments/media-types
# or extensions, i.e. using the x- prefix
# If you add to these, please keep them sorted!
types_map = {
'.a' : 'application/octet-stream',
'.ai' : 'application/postscript',
'.aif' : 'audio/x-aiff',
'.aifc' : 'audio/x-aiff',
'.aiff' : 'audio/x-aiff',
'.au' : 'audio/basic',
'.avi' : 'video/x-msvideo',
'.bat' : 'text/plain',
'.bcpio' : 'application/x-bcpio',
'.bin' : 'application/octet-stream',
'.bmp' : 'image/x-ms-bmp',
'.c' : 'text/plain',
# Duplicates :(
'.cdf' : 'application/x-cdf',
'.cdf' : 'application/x-netcdf',
'.cpio' : 'application/x-cpio',
'.csh' : 'application/x-csh',
'.css' : 'text/css',
'.dll' : 'application/octet-stream',
'.doc' : 'application/msword',
'.dot' : 'application/msword',
'.dvi' : 'application/x-dvi',
'.eml' : 'message/rfc822',
'.eps' : 'application/postscript',
'.etx' : 'text/x-setext',
'.exe' : 'application/octet-stream',
'.gif' : 'image/gif',
'.gtar' : 'application/x-gtar',
'.h' : 'text/plain',
'.hdf' : 'application/x-hdf',
'.htm' : 'text/html',
'.html' : 'text/html',
'.ico' : 'image/vnd.microsoft.icon',
'.ief' : 'image/ief',
'.jpe' : 'image/jpeg',
'.jpeg' : 'image/jpeg',
'.jpg' : 'image/jpeg',
'.js' : 'application/javascript',
'.ksh' : 'text/plain',
'.latex' : 'application/x-latex',
'.m1v' : 'video/mpeg',
'.man' : 'application/x-troff-man',
'.me' : 'application/x-troff-me',
'.mht' : 'message/rfc822',
'.mhtml' : 'message/rfc822',
'.mif' : 'application/x-mif',
'.mov' : 'video/quicktime',
'.movie' : 'video/x-sgi-movie',
'.mp2' : 'audio/mpeg',
'.mp3' : 'audio/mpeg',
'.mp4' : 'video/mp4',
'.mpa' : 'video/mpeg',
'.mpe' : 'video/mpeg',
'.mpeg' : 'video/mpeg',
'.mpg' : 'video/mpeg',
'.ms' : 'application/x-troff-ms',
'.nc' : 'application/x-netcdf',
'.nws' : 'message/rfc822',
'.o' : 'application/octet-stream',
'.obj' : 'application/octet-stream',
'.oda' : 'application/oda',
'.p12' : 'application/x-pkcs12',
'.p7c' : 'application/pkcs7-mime',
'.pbm' : 'image/x-portable-bitmap',
'.pdf' : 'application/pdf',
'.pfx' : 'application/x-pkcs12',
'.pgm' : 'image/x-portable-graymap',
'.pl' : 'text/plain',
'.png' : 'image/png',
'.pnm' : 'image/x-portable-anymap',
'.pot' : 'application/vnd.ms-powerpoint',
'.ppa' : 'application/vnd.ms-powerpoint',
'.ppm' : 'image/x-portable-pixmap',
'.pps' : 'application/vnd.ms-powerpoint',
'.ppt' : 'application/vnd.ms-powerpoint',
'.ps' : 'application/postscript',
'.pwz' : 'application/vnd.ms-powerpoint',
'.py' : 'text/x-python',
'.pyc' : 'application/x-python-code',
'.pyo' : 'application/x-python-code',
'.qt' : 'video/quicktime',
'.ra' : 'audio/x-pn-realaudio',
'.ram' : 'application/x-pn-realaudio',
'.ras' : 'image/x-cmu-raster',
'.rdf' : 'application/xml',
'.rgb' : 'image/x-rgb',
'.roff' : 'application/x-troff',
'.rtx' : 'text/richtext',
'.sgm' : 'text/x-sgml',
'.sgml' : 'text/x-sgml',
'.sh' : 'application/x-sh',
'.shar' : 'application/x-shar',
'.snd' : 'audio/basic',
'.so' : 'application/octet-stream',
'.src' : 'application/x-wais-source',
'.sv4cpio': 'application/x-sv4cpio',
'.sv4crc' : 'application/x-sv4crc',
'.swf' : 'application/x-shockwave-flash',
'.t' : 'application/x-troff',
'.tar' : 'application/x-tar',
'.tcl' : 'application/x-tcl',
'.tex' : 'application/x-tex',
'.texi' : 'application/x-texinfo',
'.texinfo': 'application/x-texinfo',
'.tif' : 'image/tiff',
'.tiff' : 'image/tiff',
'.tr' : 'application/x-troff',
'.tsv' : 'text/tab-separated-values',
'.txt' : 'text/plain',
'.ustar' : 'application/x-ustar',
'.vcf' : 'text/x-vcard',
'.wav' : 'audio/x-wav',
'.wiz' : 'application/msword',
'.wsdl' : 'application/xml',
'.xbm' : 'image/x-xbitmap',
'.xlb' : 'application/vnd.ms-excel',
# Duplicates :(
'.xls' : 'application/excel',
'.xls' : 'application/vnd.ms-excel',
'.xml' : 'text/xml',
'.xpdl' : 'application/xml',
'.xpm' : 'image/x-xpixmap',
'.xsl' : 'application/xml',
'.xwd' : 'image/x-xwindowdump',
'.zip' : 'application/zip',
}
# These are non-standard types, commonly found in the wild. They will
# only match if strict=0 flag is given to the API methods.
# Please sort these too
common_types = {
'.jpg' : 'image/jpg',
'.mid' : 'audio/midi',
'.midi': 'audio/midi',
'.pct' : 'image/pict',
'.pic' : 'image/pict',
'.pict': 'image/pict',
'.rtf' : 'application/rtf',
'.xul' : 'text/xul'
}
_default_mime_types()
if __name__ == '__main__':
import getopt
USAGE = """\
Usage: mimetypes.py [options] type
Options:
--help / -h -- print this message and exit
--lenient / -l -- additionally search of some common, but non-standard
types.
--extension / -e -- guess extension instead of type
More than one type argument may be given.
"""
def usage(code, msg=''):
print USAGE
if msg: print msg
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hle',
['help', 'lenient', 'extension'])
except getopt.error, msg:
usage(1, msg)
strict = 1
extension = 0
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-l', '--lenient'):
strict = 0
elif opt in ('-e', '--extension'):
extension = 1
for gtype in args:
if extension:
guess = guess_extension(gtype, strict)
if not guess: print "I don't know anything about type", gtype
else: print guess
else:
guess, encoding = guess_type(gtype, strict)
if not guess: print "I don't know anything about type", gtype
else: print 'type:', guess, 'encoding:', encoding
|
kleientertainment/ds_mod_tools
|
pkg/win32/Python27/Lib/mimetypes.py
|
Python
|
mit
| 21,297
|
[
"NetCDF"
] |
621d9bd7d8f1c32f6fffd490b12492bc7195c908a88530bc55893da5ebd83f46
|
from setuptools import setup
setup(
name='allay',
version='0.3.1',
description='Alleviate environmental pains',
url='https://github.com/brian-dlee/Allay.git',
author='Brian Lee',
author_email='briandl92391@gmail.com',
license='MIT',
packages=['allay'],
install_requires=[
'PyYaml',
'termcolor',
'pip',
'magnet'
],
dependency_links=[
"http://github.com/brian-dlee/magnet/tarball/master#egg=magnet"
],
scripts=['scripts/allay'],
zip_safe=True,
test_suite='allay.test',
)
|
orionnetworksolutions/Allay
|
setup.py
|
Python
|
mit
| 570
|
[
"Brian"
] |
e5b7f04cc90551d634c1afbe4ad109f7cf9cf155889bd642b38cdbb88d902fac
|
# coding=utf-8
"""
License/Disclaimer
------------------
Copyright 2017 Brian Romanchuk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
brianr747/sfc_gui
|
sfc_gui/__init__.py
|
Python
|
apache-2.0
| 618
|
[
"Brian"
] |
410092c721f674ad506da0973a2b5b20a52f4579d62a09ff7d172e70319b9b19
|
# $Id$
#
# Copyright (C) 2003-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" command line utility for growing composite models
**Usage**
_GrowComposite [optional args] filename_
**Command Line Arguments**
- -n *count*: number of new models to build
- -C *pickle file name*: name of file containing composite upon which to build.
- --inNote *note*: note to be used in loading composite models from the database
for growing
- --balTable *table name*: table from which to take the original data set
(for balancing)
- --balWeight *weight*: (between 0 and 1) weighting factor for the new data
(for balancing). OR, *weight* can be a list of weights
- --balCnt *count*: number of individual models in the balanced composite
(for balancing)
- --balH: use only the holdout set from the original data set in the balancing
(for balancing)
- --balT: use only the training set from the original data set in the balancing
(for balancing)
- -S: shuffle the original data set
(for balancing)
- -r: randomize the activities of the original data set
(for balancing)
- -N *note*: note to be attached to the grown composite when it's saved in the
database
- --outNote *note*: equivalent to -N
- -o *filename*: name of an output file to hold the pickled composite after
it has been grown.
If multiple balance weights are used, the weights will be added to
the filenames.
- -L *limit*: provide an (integer) limit on individual model complexity
- -d *database name*: instead of reading the data from a QDAT file,
pull it from a database. In this case, the _filename_ argument
provides the name of the database table containing the data set.
- -p *tablename*: store persistence data in the database
in table *tablename*
- -l: locks the random number generator to give consistent sets
of training and hold-out data. This is primarily intended
for testing purposes.
- -g: be less greedy when training the models.
- -G *number*: force trees to be rooted at descriptor *number*.
- -D: show a detailed breakdown of the composite model performance
across the training and, when appropriate, hold-out sets.
- -t *threshold value*: use high-confidence predictions for the final
analysis of the hold-out data.
- -q *list string*: Add QuantTrees to the composite and use the list
specified in *list string* as the number of target quantization
bounds for each descriptor. Don't forget to include 0's at the
beginning and end of *list string* for the name and value fields.
For example, if there are 4 descriptors and you want 2 quant bounds
apiece, you would use _-q "[0,2,2,2,2,0]"_.
Two special cases:
1) If you would like to ignore a descriptor in the model building,
use '-1' for its number of quant bounds.
2) If you have integer valued data that should not be quantized
further, enter 0 for that descriptor.
- -V: print the version number and exit
"""
from __future__ import print_function
from rdkit import RDConfig
import numpy
from rdkit.ML.Data import DataUtils,SplitData
from rdkit.ML import ScreenComposite,BuildComposite
from rdkit.ML.Composite import AdjustComposite
from rdkit.Dbase.DbConnection import DbConnect
from rdkit.ML import CompositeRun
from rdkit.six.moves import cPickle
import sys,time,types
_runDetails = CompositeRun.CompositeRun()
__VERSION_STRING="0.5.0"
_verbose = 1
def message(msg):
""" emits messages to _sys.stdout_
override this in modules which import this one to redirect output
**Arguments**
- msg: the string to be displayed
"""
if _verbose: sys.stdout.write('%s\n'%(msg))
def GrowIt(details,composite,progressCallback=None,
saveIt=1,setDescNames=0,data=None):
""" does the actual work of building a composite model
**Arguments**
- details: a _CompositeRun.CompositeRun_ object containing details
(options, parameters, etc.) about the run
- composite: the composite model to grow
- progressCallback: (optional) a function which is called with a single
argument (the number of models built so far) after each model is built.
- saveIt: (optional) if this is nonzero, the resulting model will be pickled
and dumped to the filename specified in _details.outName_
- setDescNames: (optional) if nonzero, the composite's _SetInputOrder()_ method
will be called using the results of the data set's _GetVarNames()_ method;
it is assumed that the details object has a _descNames attribute which
is passed to the composites _SetDescriptorNames()_ method. Otherwise
(the default), _SetDescriptorNames()_ gets the results of _GetVarNames()_.
- data: (optional) the data set to be used. If this is not provided, the
data set described in details will be used.
**Returns**
the enlarged composite model
"""
details.rundate = time.asctime()
if data is None:
fName = details.tableName.strip()
if details.outName == '':
details.outName = fName + '.pkl'
if details.dbName == '':
data = DataUtils.BuildQuantDataSet(fName)
elif details.qBounds != []:
details.tableName = fName
data = details.GetDataSet()
else:
data = DataUtils.DBToQuantData(details.dbName,fName,quantName=details.qTableName,
user=details.dbUser,password=details.dbPassword)
nExamples = data.GetNPts()
seed = composite._randomSeed
DataUtils.InitRandomNumbers(seed)
testExamples = []
if details.shuffleActivities == 1:
DataUtils.RandomizeActivities(data,shuffle=1,runDetails=details)
elif details.randomActivities == 1:
DataUtils.RandomizeActivities(data,shuffle=0,runDetails=details)
namedExamples = data.GetNamedData()
trainExamples = namedExamples
nExamples = len(trainExamples)
message('Training with %d examples'%(nExamples))
message('\t%d descriptors'%(len(trainExamples[0])-2))
nVars = data.GetNVars()
nPossibleVals = composite.nPossibleVals
attrs = range(1,nVars+1)
if details.useTrees:
from rdkit.ML.DecTree import CrossValidate,PruneTree
if details.qBounds != []:
from rdkit.ML.DecTree import BuildQuantTree
builder = BuildQuantTree.QuantTreeBoot
else:
from rdkit.ML.DecTree import ID3
builder = ID3.ID3Boot
driver = CrossValidate.CrossValidationDriver
pruner = PruneTree.PruneTree
if setDescNames:
composite.SetInputOrder(data.GetVarNames())
composite.Grow(trainExamples,attrs,[0]+nPossibleVals,
buildDriver=driver,
pruner=pruner,
nTries=details.nModels,pruneIt=details.pruneIt,
lessGreedy=details.lessGreedy,needsQuantization=0,
treeBuilder=builder,nQuantBounds=details.qBounds,
startAt=details.startAt,
maxDepth=details.limitDepth,
progressCallback=progressCallback,
silent=not _verbose)
else:
from rdkit.ML.Neural import CrossValidate
driver = CrossValidate.CrossValidationDriver
composite.Grow(trainExamples,attrs,[0]+nPossibleVals,nTries=details.nModels,
buildDriver=driver,needsQuantization=0)
composite.AverageErrors()
composite.SortModels()
modelList,counts,avgErrs = composite.GetAllData()
counts = numpy.array(counts)
avgErrs = numpy.array(avgErrs)
composite._varNames = data.GetVarNames()
for i in range(len(modelList)):
modelList[i].NameModel(composite._varNames)
# do final statistics
weightedErrs = counts*avgErrs
averageErr = sum(weightedErrs)/sum(counts)
devs = (avgErrs - averageErr)
devs = devs * counts
devs = numpy.sqrt(devs*devs)
avgDev = sum(devs)/sum(counts)
if _verbose:
message('# Overall Average Error: %%% 5.2f, Average Deviation: %%% 6.2f'%(100.*averageErr,100.*avgDev))
if details.bayesModel:
composite.Train(trainExamples,verbose=0)
badExamples = []
if not details.detailedRes:
if _verbose:
message('Testing all examples')
wrong = BuildComposite.testall(composite,namedExamples,badExamples)
if _verbose:
message('%d examples (%% %5.2f) were misclassified'%(len(wrong),100.*float(len(wrong))/float(len(namedExamples))))
_runDetails.overall_error = float(len(wrong))/len(namedExamples)
if details.detailedRes:
if _verbose:
message('\nEntire data set:')
resTup = ScreenComposite.ShowVoteResults(range(data.GetNPts()),data,composite,
nPossibleVals[-1],details.threshold)
nGood,nBad,nSkip,avgGood,avgBad,avgSkip,voteTab = resTup
nPts = len(namedExamples)
nClass = nGood+nBad
_runDetails.overall_error = float(nBad) / nClass
_runDetails.overall_correct_conf = avgGood
_runDetails.overall_incorrect_conf = avgBad
_runDetails.overall_result_matrix = repr(voteTab)
nRej = nClass-nPts
if nRej > 0:
_runDetails.overall_fraction_dropped = float(nRej)/nPts
return composite
def GetComposites(details):
res = []
if details.persistTblName and details.inNote:
conn = DbConnect(details.dbName,details.persistTblName)
mdls = conn.GetData(fields='MODEL',where="where note='%s'"%(details.inNote))
for row in mdls:
rawD = row[0]
res.append(cPickle.loads(str(rawD)))
elif details.composFileName:
res.append(cPickle.load(open(details.composFileName,'rb')))
return res
def BalanceComposite(details,composite,data1=None,data2=None):
""" balances the composite using the parameters provided in details
**Arguments**
- details a _CompositeRun.RunDetails_ object
- composite: the composite model to be balanced
- data1: (optional) if provided, this should be the
data set used to construct the original models
- data2: (optional) if provided, this should be the
data set used to construct the new individual models
"""
if not details.balCnt or details.balCnt > len(composite):
return composite
message("Balancing Composite")
#
# start by getting data set 1: which is the data set used to build the
# original models
#
if data1 is None:
message("\tReading First Data Set")
fName = details.balTable.strip()
tmp = details.tableName
details.tableName = fName
dbName = details.dbName
details.dbName = details.balDb
data1 = details.GetDataSet()
details.tableName = tmp
details.dbName = dbName
if data1 is None:
return composite
details.splitFrac = composite._splitFrac
details.randomSeed = composite._randomSeed
DataUtils.InitRandomNumbers(details.randomSeed)
if details.shuffleActivities == 1:
DataUtils.RandomizeActivities(data1,shuffle=1,runDetails=details)
elif details.randomActivities == 1:
DataUtils.RandomizeActivities(data1,shuffle=0,runDetails=details)
namedExamples = data1.GetNamedData()
if details.balDoHoldout or details.balDoTrain:
trainIdx,testIdx = SplitData.SplitIndices(len(namedExamples),details.splitFrac,
silent=1)
trainExamples = [namedExamples[x] for x in trainIdx]
testExamples = [namedExamples[x] for x in testIdx]
if details.filterFrac != 0.0:
trainIdx,temp = DataUtils.FilterData(trainExamples,details.filterVal,
details.filterFrac,-1,
indicesOnly=1)
tmp = [trainExamples[x] for x in trainIdx]
testExamples += [trainExamples[x] for x in temp]
trainExamples = tmp
if details.balDoHoldout:
testExamples,trainExamples = trainExamples,testExamples
else:
trainExamples = namedExamples
dataSet1 = trainExamples
cols1 = [x.upper() for x in data1.GetVarNames()]
data1 = None
#
# now grab data set 2: the data used to build the new individual models
#
if data2 is None:
message("\tReading Second Data Set")
data2 = details.GetDataSet()
if data2 is None:
return composite
details.splitFrac = composite._splitFrac
details.randomSeed = composite._randomSeed
DataUtils.InitRandomNumbers(details.randomSeed)
if details.shuffleActivities == 1:
DataUtils.RandomizeActivities(data2,shuffle=1,runDetails=details)
elif details.randomActivities == 1:
DataUtils.RandomizeActivities(data2,shuffle=0,runDetails=details)
dataSet2 = data2.GetNamedData()
cols2 = [x.upper() for x in data2.GetVarNames()]
data2 = None
# and balance it:
res = []
weights = details.balWeight
if type(weights) not in (types.TupleType,types.ListType):
weights = (weights,)
for weight in weights:
message("\tBalancing with Weight: %.4f"%(weight))
res.append(AdjustComposite.BalanceComposite(composite,dataSet1,dataSet2,
weight,
details.balCnt,
names1=cols1,names2=cols2))
return res
def ShowVersion(includeArgs=0):
""" prints the version number
"""
print('This is GrowComposite.py version %s'%(__VERSION_STRING))
if includeArgs:
import sys
print('command line was:')
print(' '.join(sys.argv))
def Usage():
""" provides a list of arguments for when this is used from the command line
"""
import sys
print(__doc__)
sys.exit(-1)
def SetDefaults(runDetails=None):
""" initializes a details object with default values
**Arguments**
- details: (optional) a _CompositeRun.CompositeRun_ object.
If this is not provided, the global _runDetails will be used.
**Returns**
the initialized _CompositeRun_ object.
"""
if runDetails is None: runDetails = _runDetails
return CompositeRun.SetDefaults(runDetails)
def ParseArgs(runDetails):
""" parses command line arguments and updates _runDetails_
**Arguments**
- runDetails: a _CompositeRun.CompositeRun_ object.
"""
import getopt
args,extra = getopt.getopt(sys.argv[1:],'P:o:n:p:b:sf:F:v:hlgd:rSTt:Q:q:DVG:L:C:N:',
['inNote=','outNote=','balTable=','balWeight=','balCnt=',
'balH','balT','balDb=',])
runDetails.inNote=''
runDetails.composFileName=''
runDetails.balTable=''
runDetails.balWeight=(0.5,)
runDetails.balCnt=0
runDetails.balDoHoldout=0
runDetails.balDoTrain=0
runDetails.balDb=''
for arg,val in args:
if arg == '-n':
runDetails.nModels = int(val)
elif arg == '-C':
runDetails.composFileName=val
elif arg=='--balTable':
runDetails.balTable=val
elif arg=='--balWeight':
runDetails.balWeight=eval(val)
if type(runDetails.balWeight) not in (types.TupleType,types.ListType):
runDetails.balWeight=(runDetails.balWeight,)
elif arg=='--balCnt':
runDetails.balCnt=int(val)
elif arg=='--balH':
runDetails.balDoHoldout=1
elif arg=='--balT':
runDetails.balDoTrain=1
elif arg=='--balDb':
runDetails.balDb=val
elif arg == '--inNote':
runDetails.inNote=val
elif arg == '-N' or arg=='--outNote':
runDetails.note=val
elif arg == '-o':
runDetails.outName = val
elif arg == '-p':
runDetails.persistTblName=val
elif arg == '-r':
runDetails.randomActivities = 1
elif arg == '-S':
runDetails.shuffleActivities = 1
elif arg == '-h':
Usage()
elif arg == '-l':
runDetails.lockRandom = 1
elif arg == '-g':
runDetails.lessGreedy=1
elif arg == '-G':
runDetails.startAt = int(val)
elif arg == '-d':
runDetails.dbName=val
elif arg == '-T':
runDetails.useTrees = 0
elif arg == '-t':
runDetails.threshold=float(val)
elif arg == '-D':
runDetails.detailedRes = 1
elif arg == '-L':
runDetails.limitDepth = int(val)
elif arg == '-q':
qBounds = eval(val)
assert type(qBounds) in (types.TupleType,types.ListType),'bad argument type for -q, specify a list as a string'
runDetails.qBoundCount=val
runDetails.qBounds = qBounds
elif arg == '-Q':
qBounds = eval(val)
assert type(qBounds) in [type([]),type(())],'bad argument type for -Q, specify a list as a string'
runDetails.activityBounds=qBounds
runDetails.activityBoundsVals=val
elif arg == '-V':
ShowVersion()
sys.exit(0)
else:
print('bad argument:',arg,file=sys.stderr)
Usage()
runDetails.tableName=extra[0]
if not runDetails.balDb:
runDetails.balDb=runDetails.dbName
if __name__ == '__main__':
if len(sys.argv) < 2:
Usage()
_runDetails.cmd = ' '.join(sys.argv)
SetDefaults(_runDetails)
ParseArgs(_runDetails)
ShowVersion(includeArgs=1)
initModels = GetComposites(_runDetails)
nModels = len(initModels)
if nModels>1:
for i in range(nModels):
sys.stderr.write('---------------------------------\n\tDoing %d of %d\n---------------------------------\n'%(i+1,nModels))
composite = GrowIt(_runDetails,initModels[i],setDescNames=1)
if _runDetails.balTable and _runDetails.balCnt:
composites = BalanceComposite(_runDetails,composite)
else:
composites=[composite]
for mdl in composites:
mdl.ClearModelExamples()
if _runDetails.outName:
nWeights = len(_runDetails.balWeight)
if nWeights==1:
outName = _runDetails.outName
composites[0].Pickle(outName)
else:
for i in range(nWeights):
weight = int(100*_runDetails.balWeight[i])
model = composites[i]
outName = '%s.%d.pkl'%(_runDetails.outName.split('.pkl')[0],weight)
model.Pickle(outName)
if _runDetails.persistTblName and _runDetails.dbName:
message('Updating results table %s:%s'%(_runDetails.dbName,_runDetails.persistTblName))
if(len(_runDetails.balWeight))>1:
message('WARNING: updating results table with models having different weights')
# save the composite
for i in range(len(composites)):
_runDetails.model = cPickle.dumps(composites[i])
_runDetails.Store(db=_runDetails.dbName,table=_runDetails.persistTblName)
elif nModels==1:
composite = GrowIt(_runDetails,initModels[0],setDescNames=1)
if _runDetails.balTable and _runDetails.balCnt:
composites = BalanceComposite(_runDetails,composite)
else:
composites=[composite]
for mdl in composites:
mdl.ClearModelExamples()
if _runDetails.outName:
nWeights = len(_runDetails.balWeight)
if nWeights==1:
outName = _runDetails.outName
composites[0].Pickle(outName)
else:
for i in range(nWeights):
weight = int(100*_runDetails.balWeight[i])
model = composites[i]
outName = '%s.%d.pkl'%(_runDetails.outName.split('.pkl')[0],weight)
model.Pickle(outName)
if _runDetails.persistTblName and _runDetails.dbName:
message('Updating results table %s:%s'%(_runDetails.dbName,_runDetails.persistTblName))
if(len(composites))>1:
message('WARNING: updating results table with models having different weights')
for i in range(len(composites)):
_runDetails.model = cPickle.dumps(composites[i])
_runDetails.Store(db=_runDetails.dbName,table=_runDetails.persistTblName)
else:
message("No models found")
|
adalke/rdkit
|
rdkit/ML/GrowComposite.py
|
Python
|
bsd-3-clause
| 19,778
|
[
"RDKit"
] |
ef62428206ad6d8a9496888fc807cbdddedcf27e4e26fbdc7efa39733beea010
|
#-*- coding:utf-8 -*-
# Panedr -- a library to manipulate Gromacs EDR file in python
# Copyright (C) 2016 Jonathan Barnoud
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA
# Most of this file is a python rewrite of part of
# `src/gromacs/fileio/enxio.c` from Gromacs 5.1.
# See gromacs.org.
"""
Panedr -- Read Gromacs energy file (EDR) in python
==================================================
The ``panedr`` library allows to read and manipulate the content of Gromacs
energy file (.edr files) in python.
The current version of ``panedr`` tries to be in par with Gromacs 5.1.1 when
it comes to read EDR files.
So far, only one function is exposed by the library : the :fun:`edr_to_df`
function that returns a pandas ``DataFrame`` from an EDR file.
.. autofunction:: edr_to_df
"""
from __future__ import print_function
import xdrlib
import collections
import warnings
import sys
import itertools
import time
import pandas
#Index for the IDs of additional blocks in the energy file.
#Blocks can be added without sacrificing backward and forward
#compatibility of the energy files.
#For backward compatibility, the order of these should not be changed.
(enxOR, # Time and ensemble averaged data for orientation restraints
enxORI, # Instantaneous data for orientation restraints
enxORT, # Order tensor(s) for orientation restraints
ensDISRE, # Distance restraint blocks
enxDHCOLL, # Data about the free energy blocks in this frame
enxDHHIST, # BAR histogram
enxDH, # BAR raw delta H data
enxNR # Total number of extra blocks in the current code,
# note that the enxio code can read files written by
# future code which contain more blocks.
) = range(8)
# xdr_datatype
# note that there is no data type 'real' because
# here we deal with the types as they are actually written to disk.
(xdr_datatype_int, xdr_datatype_float, xdr_datatype_double,
xdr_datatype_int64, xdr_datatype_char, xdr_datatype_string) = range(6)
Enxnm = collections.namedtuple('Enxnm', 'name unit')
ENX_VERSION = 5
__all__ = ['edr_to_df']
class EDRFile(object):
def __init__(self, path):
with open(path, 'rb') as infile:
content = infile.read()
self.data = GMX_Unpacker(content)
self.do_enxnms()
def __iter__(self):
while True:
try:
self.frame = Frame()
self.do_enx()
except EOFError:
return
else:
yield self.frame
def do_enxnms(self):
bReadFirstStep = False
data = self.data
magic = data.unpack_int()
if magic > 0:
# Assume this is an old edr format
file_version = 1
nre = magic
bOldFileOpen = True
else:
bOldFileOpen = False
if magic != -55555:
raise ValueError("Energy names magic number mismatch, this is not a GROMACS edr file")
file_version = ENX_VERSION
file_version = data.unpack_int()
if (file_version > ENX_VERSION):
raise ValueError('Reading file version {} with version {} implementation'.format(file_version, ENX_VERSION))
nre = data.unpack_int()
if file_version != ENX_VERSION:
warnings.warn('Note: enx file_version {}, implementation version {}'.format(file_version, ENX_VERSION))
nms = edr_strings(data, file_version, nre)
self.file_version = file_version
self.nre = nre
self.nms = nms
self.bOldFileOpen = bOldFileOpen
self.bReadFirstStep = False
def do_eheader(self, nre_test):
data = self.data
file_version = self.file_version
fr = self.frame
magic = -7777777
zero = 0
dum = 0
tempfix_nr = 0
ndisre = 0
startb = 0
bWrongPrecision = False
bOK = True
# We decide now whether we're single- or double-precision. Just peek
# ahead and see whether we find the magic number where it should.
base_pos = data.get_position()
data.set_position(base_pos + 4)
data.gmx_double = not is_frame_magic(data)
data.set_position(base_pos)
first_real_to_check = data.unpack_real()
if first_real_to_check > -1e-10:
# Assume we are reading an old format
file_version = 1
fr.t = first_real_to_check
fr.step = data.unpack_int()
else:
if not is_frame_magic(data):
raise ValueError("Energy header magic number mismatch, this is not a GROMACS edr file")
file_version = data.unpack_int()
if file_version > ENX_VERSION:
raise ValueError('Reading file version {} with version {} implementation'.format(file_version, ENX_VERSION))
fr.t = data.unpack_double()
fr.step = data.unpack_hyper()
fr.nsum = data.unpack_int()
if file_version >= 3:
fr.nsteps = data.unpack_hyper()
else:
fr.nsteps = max(1, fr.nsum)
if file_version >= 5:
fr.dt = data.unpack_double()
else:
fr.dt = 0
fr.nre = data.unpack_int()
if file_version < 4:
ndisre = data.unpack_int()
else:
# now reserved for possible future use
data.unpack_int()
fr.nblock = data.unpack_int()
assert fr.nblock >= 0
if ndisre != 0:
if file_version >= 4:
raise ValueError("Distance restraint blocks in old style in new style file")
fr.nblock += 1
# Frames could have nre=0, so we can not rely only on the fr.nre check
if (nre_test >= 0
and ((fr.nre > 0 and fr.nre != nre_test)
or fr.nre < 0 or ndisre < 0 or fr.nblock < 0)):
bWrongPrecision = True
return
# we now know what these should be, or we've already bailed out because
# of wrong precision
if file_version == 1 and (fr.t < 0 or fr.t > 1e20 or fr.step < 0):
raise ValueError("edr file with negative step number or unreasonable time (and without version number).")
fr.add_blocks(fr.nblock)
startb = 0
if ndisre > 0:
# sub[0] is the instantaneous data, sub[1] is time averaged
fr.block[0].add_subblocks(2)
fr.block[0].id = enxDISRE
fr.block[0].sub[0].nr = ndisre
fr.block[0].sub[1].nr = ndisre
fr.block[0].sub[0].type = dtreal
fr.block[0].sub[1].type = dtreal
startb += 1
# read block header info
for b in range(startb, fr.nblock):
if file_version < 4:
# blocks in old version files always have 1 subblock that
# consists of reals.
fr.block[b].add_subblocks(1)
nrint = data.unpack_int()
fr.block[b].id = b - startb
fr.block[b].sub[0].nr = nrint
fr.block[b].sub[0].typr = dtreal
else:
fr.block[b].id = data.unpack_int()
nsub = data.unpack_int()
fr.block[b].nsub = nsub
fr.block[b].add_subblocks(nsub)
for sub in fr.block[b].sub:
typenr = data.unpack_int()
sub.nr = data.unpack_int()
sub.type = typenr
fr.e_size = data.unpack_int()
# now reserved for possible future use
data.unpack_int()
data.unpack_int()
# here, stuff about old versions
def do_enx(self):
data = self.data
fr = self.frame
file_version = -1
framenr = 0
frametime = 0
try:
self.do_eheader(-1)
except ValueError:
print("Last energy frame read {} time {:8.3f}".format(framenr - 1,
frametime))
raise RuntimeError()
framenr += 1
frametime = fr.t
bSane = (fr.nre > 0)
for block in fr.block:
bSane |= (block.nsub > 0)
if not (fr.step >= 0 and bSane):
raise ValueError('Something went wrong')
if fr.nre > fr.e_alloc:
for i in range(fr.nre - fr.e_alloc):
fr.ener.append(Energy(0, 0, 0))
fr.e_alloc = fr.nre
for i in range(fr.nre):
fr.ener[i].e = data.unpack_real()
if file_version == 1 or fr.nsum > 0:
fr.ener[i].eav = data.unpack_real()
fr.ener[i].esum = data.unpack_real()
if file_version == 1:
# Old, unused real
data.unpack_real()
# Old version stuff to add later
# Read the blocks
ndo_readers = (ndo_int, ndo_float, ndo_double,
ndo_int64, ndo_char, ndo_string)
for block in fr.block:
for sub in block.sub:
try:
sub.val = ndo_readers[sub.type](data, sub.nr)
except IndexError:
raise ValueError("Reading unknown block data type: this file is corrupted or from the future")
class Energy(object):
__slot__ = ['e', 'eav', 'esum']
def __init__(self, e=0, eav=0, esum=0):
self.e = 0
self.eav = 0
self.esum = 0
def __repr__(self):
return '<{} e={}, eav={}, esum={}>'.format(type(self).__name__,
self.e, self.eav,
self.esum)
class SubBlock(object):
def __init__(self):
self.nr = 0
self.type = xdr_datatype_float # should be double
# if compile in double
self.val = []
self.val_alloc = 0
def alloc(self):
self.val = [0 for _ in range(self.nr)]
self.vac_alloc = self.nr
class Block(object):
def __init__(self):
# See enxblock_init
self.id = enxOR
self.nsub = 0
self.sub = []
self.nsub_alloc = 0
def add_subblocks(self, final_number):
# See add_subblocks_enxblock
self.nsub = final_number
if final_number > self.nsub_alloc:
for _ in range(final_number - self.nsub_alloc):
self.sub.append(SubBlock())
self.nsub_alloc = final_number
class Frame(object):
def __init__(self):
# See init_enxframe
self.e_alloc = 0
self.ener = []
self.nblock = 0
self.nblock_alloc = 0
self.block = []
def add_blocks(self, final_number):
# See add_blocks_enxframe
self.nblock = final_number
if final_number > self.nblock_alloc:
for _ in range(final_number - self.nblock_alloc):
self.block.append(Block())
self.nblock_alloc = final_number
class GMX_Unpacker(xdrlib.Unpacker):
"""xdrlib.Unpacker subclass that implements `unpack_real`
Decision on whether to return 32- or 64-bit reals is controlled by the
`gmx_double` attribute, set to ``False`` by default.
"""
gmx_double = False
def unpack_real(self):
if self.gmx_double:
return self.unpack_double()
return self.unpack_float()
def ndo_int(data, n):
"""mimic of gmx_fio_ndo_int in gromacs"""
return [data.unpack_int() for i in range(n)]
def ndo_float(data, n):
"""mimic of gmx_fio_ndo_float in gromacs"""
return [data.unpack_float() for i in range(n)]
def ndo_double(data, n):
"""mimic of gmx_fio_ndo_double in gromacs"""
return [data.unpack_double() for i in range(n)]
def ndo_int64(data, n):
"""mimic of gmx_fio_ndo_int64 in gromacs"""
return [data.unpack_huge() for i in range(n)]
def ndo_char(data, n):
"""mimic of gmx_fio_ndo_char in gromacs"""
return [data.unpack_char() for i in range(n)]
def ndo_string(data, n):
"""mimic of gmx_fio_ndo_string in gromacs"""
return [data.unpack_string() for i in range(n)]
def edr_strings(data, file_version, n):
nms = []
for i in range(n):
name = data.unpack_string().decode('ascii')
if file_version >= 2:
unit = data.unpack_string().decode('ascii')
else:
unit = 'kJ/mol'
nms.append(Enxnm(name=name, unit=unit))
return nms
def is_frame_magic(data):
"""Unpacks an int and checks whether it matches the EDR frame magic number
Does not roll the reading position back.
"""
magic = data.unpack_int()
return magic == -7777777
def edr_to_df(path, verbose=False):
begin = time.time()
edr_file = EDRFile(str(path))
all_energies = []
all_names = [u'Time'] + [nm.name for nm in edr_file.nms]
times = []
for ifr, frame in enumerate(edr_file):
if verbose:
if ((ifr < 20 or ifr % 10 == 0) and
(ifr < 200 or ifr % 100 == 0) and
(ifr < 2000 or ifr % 1000 == 0)):
print('\rRead frame : {}, time : {} ps'.format(ifr, frame.t),
end='', file=sys.stderr)
if frame.ener:
# Export only frames that contain energies
times.append(frame.t)
all_energies.append([frame.t] + [ener.e for ener in frame.ener])
end = time.time()
if verbose:
print('\rLast Frame read : {}, time : {} ps'
.format(ifr, frame.t),
end='', file=sys.stderr)
print('\n{} frame read in {:.2f} seconds'.format(ifr, end - begin),
file=sys.stderr)
df = pandas.DataFrame(all_energies, columns=all_names, index=times)
return df
|
jbarnoud/panedr
|
panedr/panedr.py
|
Python
|
lgpl-2.1
| 14,511
|
[
"Gromacs"
] |
4318e8a77982cf2a0b5bb5868c93e8049db80e83b939d2addd3386c70df408be
|
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Provide trainers which estimate parameters based on training sequences.
These should be used to 'train' a Markov Model prior to actually using
it to decode state paths. When supplied training sequences and a model
to work from, these classes will estimate parameters of the model.
This aims to estimate two parameters:
- a_{kl} -- the number of times there is a transition from k to l in the
training data.
- e_{k}(b) -- the number of emissions of the state b from the letter k
in the training data.
"""
# standard modules
import math
# local stuff
from .DynamicProgramming import ScaledDPAlgorithms
class TrainingSequence(object):
"""Hold a training sequence with emissions and optionally, a state path.
"""
def __init__(self, emissions, state_path):
"""Initialize a training sequence.
Arguments:
o emissions - A Seq object containing the sequence of emissions in
the training sequence, and the alphabet of the sequence.
o state_path - A Seq object containing the sequence of states and
the alphabet of the states. If there is no known state path, then
the sequence of states should be an empty string.
"""
if len(state_path) > 0:
assert len(emissions) == len(state_path), \
"State path does not match associated emissions."
self.emissions = emissions
self.states = state_path
class AbstractTrainer(object):
"""Provide generic functionality needed in all trainers.
"""
def __init__(self, markov_model):
self._markov_model = markov_model
def log_likelihood(self, probabilities):
"""Calculate the log likelihood of the training seqs.
Arguments:
o probabilities -- A list of the probabilities of each training
sequence under the current parameters, calculated using the forward
algorithm.
"""
total_likelihood = 0
for probability in probabilities:
total_likelihood += math.log(probability)
return total_likelihood
def estimate_params(self, transition_counts, emission_counts):
"""Get a maximum likelihood estimation of transition and emmission.
Arguments:
o transition_counts -- A dictionary with the total number of counts
of transitions between two states.
o emissions_counts -- A dictionary with the total number of counts
of emmissions of a particular emission letter by a state letter.
This then returns the maximum likelihood estimators for the
transitions and emissions, estimated by formulas 3.18 in
Durbin et al:
a_{kl} = A_{kl} / sum(A_{kl'})
e_{k}(b) = E_{k}(b) / sum(E_{k}(b'))
Returns:
Transition and emission dictionaries containing the maximum
likelihood estimators.
"""
# now calculate the information
ml_transitions = self.ml_estimator(transition_counts)
ml_emissions = self.ml_estimator(emission_counts)
return ml_transitions, ml_emissions
def ml_estimator(self, counts):
"""Calculate the maximum likelihood estimator.
This can calculate maximum likelihoods for both transitions
and emissions.
Arguments:
o counts -- A dictionary of the counts for each item.
See estimate_params for a description of the formula used for
calculation.
"""
# get an ordered list of all items
all_ordered = sorted(counts)
ml_estimation = {}
# the total counts for the current letter we are on
cur_letter = None
cur_letter_counts = 0
for cur_item in all_ordered:
# if we are on a new letter (ie. the first letter of the tuple)
if cur_item[0] != cur_letter:
# set the new letter we are working with
cur_letter = cur_item[0]
# count up the total counts for this letter
cur_letter_counts = counts[cur_item]
# add counts for all other items with the same first letter
cur_position = all_ordered.index(cur_item) + 1
# keep adding while we have the same first letter or until
# we get to the end of the ordered list
while (cur_position < len(all_ordered) and
all_ordered[cur_position][0] == cur_item[0]):
cur_letter_counts += counts[all_ordered[cur_position]]
cur_position += 1
# otherwise we've already got the total counts for this letter
else:
pass
# now calculate the ml and add it to the estimation
cur_ml = float(counts[cur_item]) / float(cur_letter_counts)
ml_estimation[cur_item] = cur_ml
return ml_estimation
class BaumWelchTrainer(AbstractTrainer):
"""Trainer that uses the Baum-Welch algorithm to estimate parameters.
These should be used when a training sequence for an HMM has unknown
paths for the actual states, and you need to make an estimation of the
model parameters from the observed emissions.
This uses the Baum-Welch algorithm, first described in
Baum, L.E. 1972. Inequalities. 3:1-8
This is based on the description in 'Biological Sequence Analysis' by
Durbin et al. in section 3.3
This algorithm is guaranteed to converge to a local maximum, but not
necessarily to the global maxima, so use with care!
"""
def __init__(self, markov_model):
"""Initialize the trainer.
Arguments:
o markov_model - The model we are going to estimate parameters for.
This should have the parameters with some initial estimates, that
we can build from.
"""
AbstractTrainer.__init__(self, markov_model)
def train(self, training_seqs, stopping_criteria,
dp_method=ScaledDPAlgorithms):
"""Estimate the parameters using training sequences.
The algorithm for this is taken from Durbin et al. p64, so this
is a good place to go for a reference on what is going on.
Arguments:
o training_seqs -- A list of TrainingSequence objects to be used
for estimating the parameters.
o stopping_criteria -- A function, that when passed the change
in log likelihood and threshold, will indicate if we should stop
the estimation iterations.
o dp_method -- A class instance specifying the dynamic programming
implementation we should use to calculate the forward and
backward variables. By default, we use the scaling method.
"""
prev_log_likelihood = None
num_iterations = 1
while True:
transition_count = self._markov_model.get_blank_transitions()
emission_count = self._markov_model.get_blank_emissions()
# remember all of the sequence probabilities
all_probabilities = []
for training_seq in training_seqs:
# calculate the forward and backward variables
DP = dp_method(self._markov_model, training_seq)
forward_var, seq_prob = DP.forward_algorithm()
backward_var = DP.backward_algorithm()
all_probabilities.append(seq_prob)
# update the counts for transitions and emissions
transition_count = self.update_transitions(transition_count,
training_seq,
forward_var,
backward_var,
seq_prob)
emission_count = self.update_emissions(emission_count,
training_seq,
forward_var,
backward_var,
seq_prob)
# update the markov model with the new probabilities
ml_transitions, ml_emissions = \
self.estimate_params(transition_count, emission_count)
self._markov_model.transition_prob = ml_transitions
self._markov_model.emission_prob = ml_emissions
cur_log_likelihood = self.log_likelihood(all_probabilities)
# if we have previously calculated the log likelihood (ie.
# not the first round), see if we can finish
if prev_log_likelihood is not None:
# XXX log likelihoods are negatives -- am I calculating
# the change properly, or should I use the negatives...
# I'm not sure at all if this is right.
log_likelihood_change = abs(abs(cur_log_likelihood) -
abs(prev_log_likelihood))
# check whether we have completed enough iterations to have
# a good estimation
if stopping_criteria(log_likelihood_change, num_iterations):
break
# set up for another round of iterations
prev_log_likelihood = cur_log_likelihood
num_iterations += 1
return self._markov_model
def update_transitions(self, transition_counts, training_seq,
forward_vars, backward_vars, training_seq_prob):
"""Add the contribution of a new training sequence to the transitions.
Arguments:
o transition_counts -- A dictionary of the current counts for the
transitions
o training_seq -- The training sequence we are working with
o forward_vars -- Probabilities calculated using the forward
algorithm.
o backward_vars -- Probabilities calculated using the backwards
algorithm.
o training_seq_prob - The probability of the current sequence.
This calculates A_{kl} (the estimated transition counts from state
k to state l) using formula 3.20 in Durbin et al.
"""
# set up the transition and emission probabilities we are using
transitions = self._markov_model.transition_prob
emissions = self._markov_model.emission_prob
# loop over the possible combinations of state path letters
for k in training_seq.states.alphabet.letters:
for l in self._markov_model.transitions_from(k):
estimated_counts = 0
# now loop over the entire training sequence
for i in range(len(training_seq.emissions) - 1):
# the forward value of k at the current position
forward_value = forward_vars[(k, i)]
# the backward value of l in the next position
backward_value = backward_vars[(l, i + 1)]
# the probability of a transition from k to l
trans_value = transitions[(k, l)]
# the probability of getting the emission at the next pos
emm_value = emissions[(l, training_seq.emissions[i + 1])]
estimated_counts += (forward_value * trans_value *
emm_value * backward_value)
# update the transition approximation
transition_counts[(k, l)] += (float(estimated_counts) /
training_seq_prob)
return transition_counts
def update_emissions(self, emission_counts, training_seq,
forward_vars, backward_vars, training_seq_prob):
"""Add the contribution of a new training sequence to the emissions
Arguments:
o emission_counts -- A dictionary of the current counts for the
emissions
o training_seq -- The training sequence we are working with
o forward_vars -- Probabilities calculated using the forward
algorithm.
o backward_vars -- Probabilities calculated using the backwards
algorithm.
o training_seq_prob - The probability of the current sequence.
This calculates E_{k}(b) (the estimated emission probability for
emission letter b from state k) using formula 3.21 in Durbin et al.
"""
# loop over the possible combinations of state path letters
for k in training_seq.states.alphabet.letters:
# now loop over all of the possible emissions
for b in training_seq.emissions.alphabet.letters:
expected_times = 0
# finally loop over the entire training sequence
for i in range(len(training_seq.emissions)):
# only count the forward and backward probability if the
# emission at the position is the same as b
if training_seq.emissions[i] == b:
# f_{k}(i) b_{k}(i)
expected_times += (forward_vars[(k, i)] *
backward_vars[(k, i)])
# add to E_{k}(b)
emission_counts[(k, b)] += (float(expected_times) /
training_seq_prob)
return emission_counts
class KnownStateTrainer(AbstractTrainer):
"""Estimate probabilities with known state sequences.
This should be used for direct estimation of emission and transition
probabilities when both the state path and emission sequence are
known for the training examples.
"""
def __init__(self, markov_model):
AbstractTrainer.__init__(self, markov_model)
def train(self, training_seqs):
"""Estimate the Markov Model parameters with known state paths.
This trainer requires that both the state and the emissions are
known for all of the training sequences in the list of
TrainingSequence objects.
This training will then count all of the transitions and emissions,
and use this to estimate the parameters of the model.
"""
# count up all of the transitions and emissions
transition_counts = self._markov_model.get_blank_transitions()
emission_counts = self._markov_model.get_blank_emissions()
for training_seq in training_seqs:
emission_counts = self._count_emissions(training_seq,
emission_counts)
transition_counts = self._count_transitions(training_seq.states,
transition_counts)
# update the markov model from the counts
ml_transitions, ml_emissions = \
self.estimate_params(transition_counts,
emission_counts)
self._markov_model.transition_prob = ml_transitions
self._markov_model.emission_prob = ml_emissions
return self._markov_model
def _count_emissions(self, training_seq, emission_counts):
"""Add emissions from the training sequence to the current counts.
Arguments:
o training_seq -- A TrainingSequence with states and emissions
to get the counts from
o emission_counts -- The current emission counts to add to.
"""
for index in range(len(training_seq.emissions)):
cur_state = training_seq.states[index]
cur_emission = training_seq.emissions[index]
try:
emission_counts[(cur_state, cur_emission)] += 1
except KeyError:
raise KeyError("Unexpected emission (%s, %s)"
% (cur_state, cur_emission))
return emission_counts
def _count_transitions(self, state_seq, transition_counts):
"""Add transitions from the training sequence to the current counts.
Arguments:
o state_seq -- A Seq object with the states of the current training
sequence.
o transition_counts -- The current transition counts to add to.
"""
for cur_pos in range(len(state_seq) - 1):
cur_state = state_seq[cur_pos]
next_state = state_seq[cur_pos + 1]
try:
transition_counts[(cur_state, next_state)] += 1
except KeyError:
raise KeyError("Unexpected transition (%s, %s)" %
(cur_state, next_state))
return transition_counts
|
zjuchenyuan/BioWeb
|
Lib/Bio/HMM/Trainer.py
|
Python
|
mit
| 16,877
|
[
"Biopython"
] |
c6550395919ca0aaa146f7f47449943a746eb73d0ceecf41f8add56756ffa116
|
# ******************************************************************************
# Copyright 2014-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""
Generalized gradient testing applied to bias layer
"""
import itertools as itt
import numpy as np
import pytest
from neon import NervanaObject
from neon.layers.layer import Bias
from neon.initializers.initializer import Gaussian
from grad_funcs import general_gradient_comp
# add a reset methods to the layer classes
# this is used to reset the layer so that
# running fprop and bprop multiple times
# produces repeatable results
# some layers just need the function defined
class BiasWithReset(Bias):
def reset(self):
self.y = None
def pytest_generate_tests(metafunc):
# main test generator
# generates the parameter combos for
# the tests based on whether the
# "--all" option is given to py.test
# that option is added in conftest.py
# global parameter
if metafunc.config.option.all:
bsz_rng = [16, 32, 64]
else:
bsz_rng = [16]
# mlp tests
if 'biasargs' in metafunc.fixturenames:
fargs = []
if metafunc.config.option.all:
nin_rng = [1, 2, 3, 4, 15, 16, 17, 32]
else:
nin_rng = [1, 2, 32]
# generate the params lists
fargs = itt.product(nin_rng, bsz_rng)
# parameterize the call for all test functions
# with mlpargs as an argument
metafunc.parametrize("biasargs", fargs)
def test_bias(backend_cpu64, biasargs):
n, batch_size = biasargs
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
init = Gaussian()
layer = BiasWithReset(init=init)
inp = np.random.randn(n, batch_size)
epsilon = 1.0e-5
pert_frac = 0.1 # test 10% of the inputs
# select pert_frac fraction of inps to perturb
pert_cnt = int(np.ceil(inp.size * pert_frac))
pert_inds = np.random.permutation(inp.size)[0:pert_cnt]
(max_abs, max_rel) = general_gradient_comp(layer,
inp,
epsilon=epsilon,
lshape=inp.shape,
pert_inds=pert_inds)
assert max_abs < 1.0e-7
@pytest.mark.xfail(reason="Precision differences with MKL backend. #914")
def test_bias_mkl(backend_mkl, biasargs):
n, batch_size = biasargs
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
init = Gaussian()
layer = BiasWithReset(init=init)
inp = np.random.randn(n, batch_size)
epsilon = 1.0e-5
pert_frac = 0.1 # test 10% of the inputs
# select pert_frac fraction of inps to perturb
pert_cnt = int(np.ceil(inp.size * pert_frac))
pert_inds = np.random.permutation(inp.size)[0:pert_cnt]
(max_abs, max_rel) = general_gradient_comp(layer,
inp,
epsilon=epsilon,
lshape=inp.shape,
pert_inds=pert_inds)
assert max_abs < 1.0e-7
|
NervanaSystems/neon
|
tests/test_gradient_bias.py
|
Python
|
apache-2.0
| 3,767
|
[
"Gaussian"
] |
bb1e8a7282e018b126b600141d9c0e1ba5cf6359818488b66a8a18f42ebb171f
|
# !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-09-20 13:24:13
# @Last modified by: Brian Cherinka
# @Last Modified time: 2018-11-15 12:00:27
from __future__ import absolute_import, division, print_function
import copy
from marvin import config
from marvin.utils.datamodel.dap import datamodel
from marvin.utils.datamodel.query.base import query_params
from .base import QueryDataModel
# Current Total Parameter Count (with spaxel properties and without)
# all, no spaxelprop, no daptables
# MPL-4 - 571 (309) (309)
# MPL-5 - 703 (322) (301)
# MPL-6 - 1676 (1008) (1031)
# MPL-7 - 1676 (1008) (1031)
# DR15 - 1676 (1008) (1031)
# MPL-8 - xx (xx) (1031)
def groups():
return copy.deepcopy(query_params)
# exclude the DAP tables
daptables = ['modelcube', 'modelspaxel', 'redcorr']
if not config._allow_DAP_queries:
# add the spaxelprop table to list of things to remove
daptables.append('spaxelprop')
# MPL-4
# list of tables to exclude
BASE_EXCLUDE = ['anime', 'catalogue', 'pipeline', 'maskbit', 'hdu', 'query', 'user',
'extcol', 'exttype', 'extname', 'cube_shape', 'spaxelprops', 'testtable']
EXCLUDE = daptables + ['obsinfo', 'dapall'] + BASE_EXCLUDE
MPL4 = QueryDataModel(release='MPL-4', groups=groups(), aliases=['MPL4', 'v1_5_1', '1.1.1'], exclude=EXCLUDE, dapdm=datamodel['MPL-4'])
# MPL-5
# list of tables to exclude
if not config._allow_DAP_queries:
# remove the spaxelprop table from the list to re-include
daptables.remove('spaxelprop')
dapset = set(daptables) if config._allow_DAP_queries else set()
EX5 = set(EXCLUDE) - dapset | set(['executionplan', 'current_default'])
MPL5 = QueryDataModel(release='MPL-5', groups=groups(), aliases=['MPL5', 'v2_0_2', '2.0.1'], exclude=EX5, dapdm=datamodel['MPL-5'])
# MPL-6
# list of tables to exclude
EX6 = set(EX5) - set(['obsinfo', 'dapall'])
MPL6 = QueryDataModel(release='MPL-6', groups=groups(), aliases=['MPL6', 'v2_3_1', '2.1.3'], exclude=EX6, dapdm=datamodel['MPL-6'])
# MPL-7
MPL7 = QueryDataModel(release='MPL-7', groups=groups(), aliases=['MPL7', 'v2_4_3', '2.2.0'], exclude=EX6, dapdm=datamodel['MPL-7'])
# DR15
DR15 = QueryDataModel(release='DR15', groups=groups(), aliases=['DR15', 'v2_4_3', '2.2.0'], exclude=EX6, dapdm=datamodel['DR15'])
# MPL-8
MPL8 = QueryDataModel(release='MPL-8', groups=groups(),
aliases=['MPL8', 'v2_5_3', '2.3.0'], exclude=EX6, dapdm=datamodel['MPL-8'])
# DR16
DR16 = QueryDataModel(release='DR16', groups=groups(), aliases=['DR15', 'v2_4_3', '2.2.0'], exclude=EX6, dapdm=datamodel['DR16'])
# MPL-9
MPL9 = QueryDataModel(release='MPL-9', groups=groups(),
aliases=['MPL9', 'v2_7_1', '2.4.1'], exclude=EX6, dapdm=datamodel['MPL-9'])
# MPL-10
MPL10 = QueryDataModel(release='MPL-10', groups=groups(),
aliases=['MPL10', 'v3_0_1', '3.0.1'], exclude=EX6, dapdm=datamodel['MPL-10'])
# MPL-11
MPL11 = QueryDataModel(release='MPL-11', groups=groups(),
aliases=['DR17', 'MPL11', 'v3_1_1', '3.1.0'], exclude=EX6, dapdm=datamodel['MPL-11'])
# DR17
DR17 = QueryDataModel(release='DR17', groups=groups(),
aliases=['DR17', 'MPL11', 'v3_1_1', '3.1.0'], exclude=EX6, dapdm=datamodel['DR17'])
|
sdss/marvin
|
python/marvin/utils/datamodel/query/MPL.py
|
Python
|
bsd-3-clause
| 3,336
|
[
"Brian"
] |
66d0d108dc34cacc27743d8853f632ec9177f920efdf0e79a7bd8ee279fcf615
|
class gmx_atp():
def __init__(self,top):
self.top = top
def write_atp(self,filename):
def r(string,length):
string = str(string)
while len(string)<length: string=string+' ';
return string
def l(string,length):
string = str(string)
while len(string)<length: string=' '+string;
return string
print 'writing',filename
f = open(filename,'w')
atp = {}
for i in self.top.atoms_list:
atp[r(i.type,10)+l('%2.5f' %i.mass,16)+str('\t; TTT')]=''
for i in atp.keys():
if i.upper()!=i and i.find('amber')<0:
#if i not in ['CT','H1','C','O','N','N3','HC'] and i.find('amber')<0: #only print those entries which are not already known by ffamber99sb --> i.e. do not contain amber99_xx
print >>f, i
f.close()
|
t-/gromacs_ligand_param
|
tools/top2itp/gmx_atp.py
|
Python
|
gpl-3.0
| 903
|
[
"Amber"
] |
b016072e0b1e266b477510e416c85ac3b49c7f26fc73b958c0733a53dac1f02c
|
#
# @file TestXMLNamespaces.py
# @brief XMLNamespaces unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Michael Hucka <mhucka@caltech.edu>
#
# $Id: TestXMLNamespaces.py 11441 2010-07-09 02:22:23Z mhucka $
# $HeadURL: https://sbml.svn.sourceforge.net/svnroot/sbml/trunk/libsbml/src/bindings/python/test/xml/TestXMLNamespaces.py $
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/xml/test/TestXMLNamespaces.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestXMLNamespaces(unittest.TestCase):
global NS
NS = None
def setUp(self):
self.NS = libsbml.XMLNamespaces()
if (self.NS == None):
pass
pass
def tearDown(self):
_dummyList = [ self.NS ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLNamespaces_add(self):
self.assert_( self.NS.getLength() == 0 )
self.assert_( self.NS.isEmpty() == True )
self.NS.add( "http://test1.org/", "test1")
self.assert_( self.NS.getLength() == 1 )
self.assert_( self.NS.isEmpty() == False )
self.NS.add( "http://test2.org/", "test2")
self.assert_( self.NS.getLength() == 2 )
self.assert_( self.NS.isEmpty() == False )
self.NS.add( "http://test1.org/", "test1a")
self.assert_( self.NS.getLength() == 3 )
self.assert_( self.NS.isEmpty() == False )
self.NS.add( "http://test1.org/", "test1a")
self.assert_( self.NS.getLength() == 3 )
self.assert_( self.NS.isEmpty() == False )
self.assert_( (self.NS.getIndex( "http://test1.org/") == -1) == False )
pass
def test_XMLNamespaces_add1(self):
self.assert_( self.NS.getLength() == 0 )
self.assert_( self.NS.isEmpty() == True )
i = self.NS.add( "http://test1.org/", "test1")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.NS.getLength() == 1 )
self.assert_( self.NS.isEmpty() == False )
pass
def test_XMLNamespaces_baseline(self):
self.assert_( self.NS.getLength() == 0 )
self.assert_( self.NS.isEmpty() == True )
pass
def test_XMLNamespaces_clear(self):
self.NS.add( "http://test1.org/", "test1")
self.NS.add( "http://test2.org/", "test2")
self.NS.add( "http://test3.org/", "test3")
self.NS.add( "http://test4.org/", "test4")
self.NS.add( "http://test5.org/", "test5")
self.assert_( self.NS.getLength() == 5 )
i = self.NS.clear()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.NS.getLength() == 0 )
pass
def test_XMLNamespaces_get(self):
self.NS.add( "http://test1.org/", "test1")
self.NS.add( "http://test2.org/", "test2")
self.NS.add( "http://test3.org/", "test3")
self.NS.add( "http://test4.org/", "test4")
self.NS.add( "http://test5.org/", "test5")
self.NS.add( "http://test6.org/", "test6")
self.NS.add( "http://test7.org/", "test7")
self.NS.add( "http://test8.org/", "test8")
self.NS.add( "http://test9.org/", "test9")
self.assert_( self.NS.getLength() == 9 )
self.assert_( self.NS.getIndex( "http://test1.org/") == 0 )
self.assert_( ( "test2" != self.NS.getPrefix(1) ) == False )
self.assert_( ( "test1" != self.NS.getPrefix( "http://test1.org/") ) == False )
self.assert_( ( "http://test2.org/" != self.NS.getURI(1) ) == False )
self.assert_( ( "http://test2.org/" != self.NS.getURI( "test2") ) == False )
self.assert_( self.NS.getIndex( "http://test1.org/") == 0 )
self.assert_( self.NS.getIndex( "http://test2.org/") == 1 )
self.assert_( self.NS.getIndex( "http://test5.org/") == 4 )
self.assert_( self.NS.getIndex( "http://test9.org/") == 8 )
self.assert_( self.NS.getIndex( "http://testX.org/") == -1 )
self.assert_( self.NS.hasURI( "http://test1.org/") != False )
self.assert_( self.NS.hasURI( "http://test2.org/") != False )
self.assert_( self.NS.hasURI( "http://test5.org/") != False )
self.assert_( self.NS.hasURI( "http://test9.org/") != False )
self.assert_( self.NS.hasURI( "http://testX.org/") == False )
self.assert_( self.NS.getIndexByPrefix( "test1") == 0 )
self.assert_( self.NS.getIndexByPrefix( "test5") == 4 )
self.assert_( self.NS.getIndexByPrefix( "test9") == 8 )
self.assert_( self.NS.getIndexByPrefix( "testX") == -1 )
self.assert_( self.NS.hasPrefix( "test1") != False )
self.assert_( self.NS.hasPrefix( "test5") != False )
self.assert_( self.NS.hasPrefix( "test9") != False )
self.assert_( self.NS.hasPrefix( "testX") == False )
self.assert_( self.NS.hasNS( "http://test1.org/", "test1") != False )
self.assert_( self.NS.hasNS( "http://test5.org/", "test5") != False )
self.assert_( self.NS.hasNS( "http://test9.org/", "test9") != False )
self.assert_( self.NS.hasNS( "http://testX.org/", "testX") == False )
pass
def test_XMLNamespaces_remove(self):
self.NS.add( "http://test1.org/", "test1")
self.NS.add( "http://test2.org/", "test2")
self.NS.add( "http://test3.org/", "test3")
self.NS.add( "http://test4.org/", "test4")
self.NS.add( "http://test5.org/", "test5")
self.assert_( self.NS.getLength() == 5 )
self.NS.remove(4)
self.assert_( self.NS.getLength() == 4 )
self.NS.remove(3)
self.assert_( self.NS.getLength() == 3 )
self.NS.remove(2)
self.assert_( self.NS.getLength() == 2 )
self.NS.remove(1)
self.assert_( self.NS.getLength() == 1 )
self.NS.remove(0)
self.assert_( self.NS.getLength() == 0 )
self.NS.add( "http://test1.org/", "test1")
self.NS.add( "http://test2.org/", "test2")
self.NS.add( "http://test3.org/", "test3")
self.NS.add( "http://test4.org/", "test4")
self.NS.add( "http://test5.org/", "test5")
self.assert_( self.NS.getLength() == 5 )
self.NS.remove(0)
self.assert_( self.NS.getLength() == 4 )
self.NS.remove(0)
self.assert_( self.NS.getLength() == 3 )
self.NS.remove(0)
self.assert_( self.NS.getLength() == 2 )
self.NS.remove(0)
self.assert_( self.NS.getLength() == 1 )
self.NS.remove(0)
self.assert_( self.NS.getLength() == 0 )
pass
def test_XMLNamespaces_remove1(self):
self.NS.add( "http://test1.org/", "test1")
self.NS.add( "http://test2.org/", "test2")
self.assert_( self.NS.getLength() == 2 )
i = self.NS.remove(4)
self.assert_( i == libsbml.LIBSBML_INDEX_EXCEEDS_SIZE )
self.assert_( self.NS.getLength() == 2 )
i = self.NS.remove( "test4")
self.assert_( i == libsbml.LIBSBML_INDEX_EXCEEDS_SIZE )
self.assert_( self.NS.getLength() == 2 )
i = self.NS.remove(1)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.NS.getLength() == 1 )
i = self.NS.remove( "test1")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.NS.getLength() == 0 )
pass
def test_XMLNamespaces_remove_by_prefix(self):
self.NS.add( "http://test1.org/", "test1")
self.NS.add( "http://test2.org/", "test2")
self.NS.add( "http://test3.org/", "test3")
self.NS.add( "http://test4.org/", "test4")
self.NS.add( "http://test5.org/", "test5")
self.assert_( self.NS.getLength() == 5 )
self.NS.remove( "test1")
self.assert_( self.NS.getLength() == 4 )
self.NS.remove( "test2")
self.assert_( self.NS.getLength() == 3 )
self.NS.remove( "test3")
self.assert_( self.NS.getLength() == 2 )
self.NS.remove( "test4")
self.assert_( self.NS.getLength() == 1 )
self.NS.remove( "test5")
self.assert_( self.NS.getLength() == 0 )
self.NS.add( "http://test1.org/", "test1")
self.NS.add( "http://test2.org/", "test2")
self.NS.add( "http://test3.org/", "test3")
self.NS.add( "http://test4.org/", "test4")
self.NS.add( "http://test5.org/", "test5")
self.assert_( self.NS.getLength() == 5 )
self.NS.remove( "test5")
self.assert_( self.NS.getLength() == 4 )
self.NS.remove( "test4")
self.assert_( self.NS.getLength() == 3 )
self.NS.remove( "test3")
self.assert_( self.NS.getLength() == 2 )
self.NS.remove( "test2")
self.assert_( self.NS.getLength() == 1 )
self.NS.remove( "test1")
self.assert_( self.NS.getLength() == 0 )
self.NS.add( "http://test1.org/", "test1")
self.NS.add( "http://test2.org/", "test2")
self.NS.add( "http://test3.org/", "test3")
self.NS.add( "http://test4.org/", "test4")
self.NS.add( "http://test5.org/", "test5")
self.assert_( self.NS.getLength() == 5 )
self.NS.remove( "test3")
self.assert_( self.NS.getLength() == 4 )
self.NS.remove( "test1")
self.assert_( self.NS.getLength() == 3 )
self.NS.remove( "test4")
self.assert_( self.NS.getLength() == 2 )
self.NS.remove( "test5")
self.assert_( self.NS.getLength() == 1 )
self.NS.remove( "test2")
self.assert_( self.NS.getLength() == 0 )
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestXMLNamespaces))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
alexholehouse/SBMLIntegrator
|
libsbml-5.0.0/src/bindings/python/test/xml/TestXMLNamespaces.py
|
Python
|
gpl-3.0
| 10,223
|
[
"VisIt"
] |
ae68a654c8dc24e73827f59200fe089aa1f8bcd1055ac078282f0a599c034b79
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Special Math Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
__all__ = [
"ndtr",
"ndtri",
"log_ndtr",
"log_cdf_laplace",
]
# log_ndtr uses different functions over the ranges
# (-infty, lower](lower, upper](upper, infty)
# Lower bound values were chosen by examining where the support of ndtr
# appears to be zero, relative to scipy's (which is always 64bit). They were
# then made more conservative just to be safe. (Conservative means use the
# expansion more than we probably need to.) See `NdtrTest` in
# special_math_test.py.
LOGNDTR_FLOAT64_LOWER = -20
LOGNDTR_FLOAT32_LOWER = -10
# Upper bound values were chosen by examining for which values of 'x'
# Log[cdf(x)] is 0, after which point we need to use the approximation
# Log[cdf(x)] = Log[1 - cdf(-x)] approx -cdf(-x). We chose a value slightly
# conservative, meaning we use the approximation earlier than needed.
LOGNDTR_FLOAT64_UPPER = 8
LOGNDTR_FLOAT32_UPPER = 5
def ndtr(x, name="ndtr"):
"""Normal distribution function.
Returns the area under the Gaussian probability density function, integrated
from minus infinity to x:
```
1 / x
ndtr(x) = ---------- | exp(-0.5 t**2) dt
sqrt(2 pi) /-inf
= 0.5 (1 + erf(x / sqrt(2)))
= 0.5 erfc(x / sqrt(2))
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="ndtr").
Returns:
ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x` is not floating-type.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype not in [np.float32, np.float64]:
raise TypeError(
"x.dtype=%s is not handled, see docstring for supported types."
% x.dtype)
return _ndtr(x)
def _ndtr(x):
"""Implements ndtr core logic."""
half_sqrt_2 = constant_op.constant(
0.5 * math.sqrt(2.), dtype=x.dtype, name="half_sqrt_2")
w = x * half_sqrt_2
z = math_ops.abs(w)
y = array_ops.where(math_ops.less(z, half_sqrt_2),
1. + math_ops.erf(w),
array_ops.where(math_ops.greater(w, 0.),
2. - math_ops.erfc(z),
math_ops.erfc(z)))
return 0.5 * y
def ndtri(p, name="ndtri"):
"""The inverse of the CDF of the Normal distribution function.
Returns x such that the area under the pdf from minus infinity to x is equal
to p.
A piece-wise rational approximation is done for the function.
This is a port of the implementation in netlib.
Args:
p: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="ndtri").
Returns:
x: `Tensor` with `dtype=p.dtype`.
Raises:
TypeError: if `p` is not floating-type.
"""
with ops.name_scope(name, values=[p]):
p = ops.convert_to_tensor(p, name="p")
if p.dtype.as_numpy_dtype not in [np.float32, np.float64]:
raise TypeError(
"p.dtype=%s is not handled, see docstring for supported types."
% p.dtype)
return _ndtri(p)
def _ndtri(p):
"""Implements ndtri core logic."""
# Constants used in piece-wise rational approximations. Taken from the cephes
# library:
# https://github.com/scipy/scipy/blob/master/scipy/special/cephes/ndtri.c
p0 = list(reversed([-5.99633501014107895267E1,
9.80010754185999661536E1,
-5.66762857469070293439E1,
1.39312609387279679503E1,
-1.23916583867381258016E0]))
q0 = list(reversed([1.0,
1.95448858338141759834E0,
4.67627912898881538453E0,
8.63602421390890590575E1,
-2.25462687854119370527E2,
2.00260212380060660359E2,
-8.20372256168333339912E1,
1.59056225126211695515E1,
-1.18331621121330003142E0]))
p1 = list(reversed([4.05544892305962419923E0,
3.15251094599893866154E1,
5.71628192246421288162E1,
4.40805073893200834700E1,
1.46849561928858024014E1,
2.18663306850790267539E0,
-1.40256079171354495875E-1,
-3.50424626827848203418E-2,
-8.57456785154685413611E-4]))
q1 = list(reversed([1.0,
1.57799883256466749731E1,
4.53907635128879210584E1,
4.13172038254672030440E1,
1.50425385692907503408E1,
2.50464946208309415979E0,
-1.42182922854787788574E-1,
-3.80806407691578277194E-2,
-9.33259480895457427372E-4]))
p2 = list(reversed([3.23774891776946035970E0,
6.91522889068984211695E0,
3.93881025292474443415E0,
1.33303460815807542389E0,
2.01485389549179081538E-1,
1.23716634817820021358E-2,
3.01581553508235416007E-4,
2.65806974686737550832E-6,
6.23974539184983293730E-9]))
q2 = list(reversed([1.0,
6.02427039364742014255E0,
3.67983563856160859403E0,
1.37702099489081330271E0,
2.16236993594496635890E-1,
1.34204006088543189037E-2,
3.28014464682127739104E-4,
2.89247864745380683936E-6,
6.79019408009981274425E-9]))
def _create_polynomial(var, coeffs):
"""Compute n_th order polynomial via Horner's method."""
if not coeffs:
return 0.
return coeffs[0] + _create_polynomial(var, coeffs[1:]) * var
maybe_complement_p = array_ops.where(p > 1. - np.exp(-2.), 1. - p, p)
# Write in an arbitrary value in place of 0 for p since 0 will cause NaNs
# later on. The result from the computation when p == 0 is not used so any
# number that doesn't result in NaNs is fine.
sanitized_mcp = array_ops.where(
maybe_complement_p <= 0.,
0.5 * array_ops.ones_like(p),
maybe_complement_p)
# Compute x for p > exp(-2): x/sqrt(2pi) = w + w**3 P0(w**2)/Q0(w**2).
w = sanitized_mcp - 0.5
ww = w ** 2
x_for_big_p = w + w * ww * (_create_polynomial(ww, p0)
/ _create_polynomial(ww, q0))
x_for_big_p *= -np.sqrt(2. * np.pi)
# Compute x for p <= exp(-2): x = z - log(z)/z - (1/z) P(1/z) / Q(1/z),
# where z = sqrt(-2. * log(p)), and P/Q are chosen between two different
# arrays based on wether p < exp(-32).
z = math_ops.sqrt(-2. * math_ops.log(sanitized_mcp))
first_term = z - math_ops.log(z) / z
second_term_small_p = (_create_polynomial(1. / z, p2)
/ _create_polynomial(1. / z, q2)) / z
second_term_otherwise = (_create_polynomial(1. / z, p1)
/ _create_polynomial(1. / z, q1)) / z
x_for_small_p = first_term - second_term_small_p
x_otherwise = first_term - second_term_otherwise
x = array_ops.where(sanitized_mcp > np.exp(-2.),
x_for_big_p,
array_ops.where(z >= 8.0, x_for_small_p, x_otherwise))
x = array_ops.where(p > 1. - np.exp(-2.), x, -x)
infinity = constant_op.constant(np.inf, dtype=x.dtype) * array_ops.ones_like(x)
x_nan_replaced = array_ops.where(
p <= 0.0, -infinity, array_ops.where(p >= 1.0, infinity, x))
return x_nan_replaced
def log_ndtr(x, series_order=3, name="log_ndtr"):
"""Log Normal distribution function.
For details of the Normal distribution function see `ndtr`.
This function calculates `(log o ndtr)(x)` by either calling `log(ndtr(x))` or
using an asymptotic series. Specifically:
- For `x > upper_segment`, use the approximation `-ndtr(-x)` based on
`log(1-x) ~= -x, x << 1`.
- For `lower_segment < x <= upper_segment`, use the existing `ndtr` technique
and take a log.
- For `x <= lower_segment`, we use the series approximation of erf to compute
the log CDF directly.
The `lower_segment` is set based on the precision of the input:
```
lower_segment = { -20, x.dtype=float64
{ -10, x.dtype=float32
upper_segment = { 8, x.dtype=float64
{ 5, x.dtype=float32
```
When `x < lower_segment`, the `ndtr` asymptotic series approximation is:
```
ndtr(x) = scale * (1 + sum) + R_N
scale = exp(-0.5 x**2) / (-x sqrt(2 pi))
sum = Sum{(-1)^n (2n-1)!! / (x**2)^n, n=1:N}
R_N = O(exp(-0.5 x**2) (2N+1)!! / |x|^{2N+3})
```
where `(2n-1)!! = (2n-1) (2n-3) (2n-5) ... (3) (1)` is a
[double-factorial](https://en.wikipedia.org/wiki/Double_factorial).
Args:
x: `Tensor` of type `float32`, `float64`.
series_order: Positive Python `integer`. Maximum depth to
evaluate the asymptotic expansion. This is the `N` above.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
log_ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled.
TypeError: if `series_order` is a not Python `integer.`
ValueError: if `series_order` is not in `[0, 30]`.
"""
if not isinstance(series_order, int):
raise TypeError("series_order must be a Python integer.")
if series_order < 0:
raise ValueError("series_order must be non-negative.")
if series_order > 30:
raise ValueError("series_order must be <= 30.")
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype == np.float64:
lower_segment = LOGNDTR_FLOAT64_LOWER
upper_segment = LOGNDTR_FLOAT64_UPPER
elif x.dtype.as_numpy_dtype == np.float32:
lower_segment = LOGNDTR_FLOAT32_LOWER
upper_segment = LOGNDTR_FLOAT32_UPPER
else:
raise TypeError("x.dtype=%s is not supported." % x.dtype)
# The basic idea here was ported from py/scipy/special/cephes/ndtr.c.
# We copy the main idea, with a few changes
# * For x >> 1, and X ~ Normal(0, 1),
# Log[P[X < x]] = Log[1 - P[X < -x]] approx -P[X < -x],
# which extends the range of validity of this function.
# * We use one fixed series_order for all of 'x', rather than adaptive.
# * Our docstring properly reflects that this is an asymptotic series, not a
# Taylor series. We also provided a correct bound on the remainder.
# * We need to use the max/min in the _log_ndtr_lower arg to avoid nan when
# x=0. This happens even though the branch is unchosen because when x=0
# the gradient of a select involves the calculation 1*dy+0*(-inf)=nan
# regardless of whether dy is finite. Note that the minimum is a NOP if
# the branch is chosen.
return array_ops.where(
math_ops.greater(x, upper_segment),
-_ndtr(-x), # log(1-x) ~= -x, x << 1
array_ops.where(math_ops.greater(x, lower_segment),
math_ops.log(_ndtr(math_ops.maximum(x, lower_segment))),
_log_ndtr_lower(math_ops.minimum(x, lower_segment),
series_order)))
def _log_ndtr_lower(x, series_order):
"""Asymptotic expansion version of `Log[cdf(x)]`, appropriate for `x<<-1`."""
x_2 = math_ops.square(x)
# Log of the term multiplying (1 + sum)
log_scale = -0.5 * x_2 - math_ops.log(-x) - 0.5 * math.log(2. * math.pi)
return log_scale + math_ops.log(_log_ndtr_asymptotic_series(x, series_order))
def _log_ndtr_asymptotic_series(x, series_order):
"""Calculates the asymptotic series used in log_ndtr."""
if series_order <= 0:
return 1.
x_2 = math_ops.square(x)
even_sum = 0.
odd_sum = 0.
x_2n = x_2 # Start with x^{2*1} = x^{2*n} with n = 1.
for n in range(1, series_order + 1):
if n % 2:
odd_sum += _double_factorial(2 * n - 1) / x_2n
else:
even_sum += _double_factorial(2 * n - 1) / x_2n
x_2n *= x_2
return 1. + even_sum - odd_sum
def _double_factorial(n):
"""The double factorial function for small Python integer `n`."""
return np.prod(np.arange(n, 1, -2))
def log_cdf_laplace(x, name="log_cdf_laplace"):
"""Log Laplace distribution function.
This function calculates `Log[L(x)]`, where `L(x)` is the cumulative
distribution function of the Laplace distribution, i.e.
```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt```
For numerical accuracy, `L(x)` is computed in different ways depending on `x`,
```
x <= 0:
Log[L(x)] = Log[0.5] + x, which is exact
0 < x:
Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
`Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
# For x < 0, L(x) = 0.5 * exp{x} exactly, so Log[L(x)] = log(0.5) + x.
lower_solution = -np.log(2.) + x
# safe_exp_neg_x = exp{-x} for x > 0, but is
# bounded above by 1, which avoids
# log[1 - 1] = -inf for x = log(1/2), AND
# exp{-x} --> inf, for x << -1
safe_exp_neg_x = math_ops.exp(-math_ops.abs(x))
# log1p(z) = log(1 + z) approx z for |z| << 1. This approxmation is used
# internally by log1p, rather than being done explicitly here.
upper_solution = math_ops.log1p(-0.5 * safe_exp_neg_x)
return array_ops.where(x < 0., lower_solution, upper_solution)
|
dyoung418/tensorflow
|
tensorflow/python/ops/distributions/special_math.py
|
Python
|
apache-2.0
| 14,730
|
[
"Gaussian"
] |
c8bdc0b19efaf66e448af3e695b8ea02c6d1e30a69993328193648ef72816cc2
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for Student's Profile Page.
"""
from contextlib import contextmanager
from datetime import datetime
from nose.plugins.attrib import attr
from common.test.acceptance.pages.common.logout import LogoutPage
from common.test.acceptance.pages.lms.account_settings import AccountSettingsPage
from common.test.acceptance.pages.lms.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.learner_profile import LearnerProfilePage
from common.test.acceptance.pages.lms.dashboard import DashboardPage
from common.test.acceptance.tests.helpers import AcceptanceTest, EventsTestMixin
class LearnerProfileTestMixin(EventsTestMixin):
"""
Mixin with helper methods for testing learner profile pages.
"""
PRIVACY_PUBLIC = u'all_users'
PRIVACY_PRIVATE = u'private'
PUBLIC_PROFILE_FIELDS = ['username', 'country', 'language_proficiencies', 'bio']
PRIVATE_PROFILE_FIELDS = ['username']
PUBLIC_PROFILE_EDITABLE_FIELDS = ['country', 'language_proficiencies', 'bio']
USER_SETTINGS_CHANGED_EVENT_NAME = u"edx.user.settings.changed"
def log_in_as_unique_user(self):
"""
Create a unique user and return the account's username and id.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
auto_auth_page = AutoAuthPage(self.browser, username=username).visit()
user_id = auto_auth_page.get_user_id()
return username, user_id
def set_public_profile_fields_data(self, profile_page):
"""
Fill in the public profile fields of a user.
"""
profile_page.value_for_dropdown_field('language_proficiencies', 'English')
profile_page.value_for_dropdown_field('country', 'United Arab Emirates')
profile_page.set_value_for_textarea_field('bio', 'Nothing Special')
# Waits here for text to appear/save on bio field
profile_page.wait_for_ajax()
def visit_profile_page(self, username, privacy=None):
"""
Visit a user's profile page and if a privacy is specified and
is different from the displayed value, then set the privacy to that value.
"""
profile_page = LearnerProfilePage(self.browser, username)
# Change the privacy if requested by loading the page and
# changing the drop down
if privacy is not None:
profile_page.visit()
# Change the privacy setting if it is not the desired one already
profile_page.privacy = privacy
# Verify the current setting is as expected
if privacy == self.PRIVACY_PUBLIC:
self.assertEqual(profile_page.privacy, 'all_users')
else:
self.assertEqual(profile_page.privacy, 'private')
if privacy == self.PRIVACY_PUBLIC:
self.set_public_profile_fields_data(profile_page)
# Reset event tracking so that the tests only see events from
# loading the profile page.
self.start_time = datetime.now() # pylint: disable=attribute-defined-outside-init
# Load the page
profile_page.visit()
return profile_page
def set_birth_year(self, birth_year):
"""
Set birth year for the current user to the specified value.
"""
account_settings_page = AccountSettingsPage(self.browser)
account_settings_page.visit()
account_settings_page.wait_for_page()
self.assertEqual(
account_settings_page.value_for_dropdown_field('year_of_birth', str(birth_year)),
str(birth_year)
)
def verify_profile_page_is_public(self, profile_page, is_editable=True):
"""
Verify that the profile page is currently public.
"""
self.assertEqual(profile_page.visible_fields, self.PUBLIC_PROFILE_FIELDS)
if is_editable:
self.assertTrue(profile_page.privacy_field_visible)
self.assertEqual(profile_page.editable_fields, self.PUBLIC_PROFILE_EDITABLE_FIELDS)
else:
self.assertEqual(profile_page.editable_fields, [])
def verify_profile_page_is_private(self, profile_page, is_editable=True):
"""
Verify that the profile page is currently private.
"""
if is_editable:
self.assertTrue(profile_page.privacy_field_visible)
self.assertEqual(profile_page.visible_fields, self.PRIVATE_PROFILE_FIELDS)
def verify_profile_page_view_event(self, requesting_username, profile_user_id, visibility=None):
"""
Verifies that the correct view event was captured for the profile page.
"""
actual_events = self.wait_for_events(
start_time=self.start_time,
event_filter={'event_type': 'edx.user.settings.viewed', 'username': requesting_username},
number_of_matches=1)
self.assert_events_match(
[
{
'username': requesting_username,
'event': {
'user_id': int(profile_user_id),
'page': 'profile',
'visibility': unicode(visibility)
}
}
],
actual_events
)
@contextmanager
def verify_pref_change_event_during(self, username, user_id, setting, **kwargs):
"""Assert that a single setting changed event is emitted for the user_api_userpreference table."""
expected_event = {
'username': username,
'event': {
'setting': setting,
'user_id': int(user_id),
'table': 'user_api_userpreference',
'truncated': []
}
}
expected_event['event'].update(kwargs)
event_filter = {
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'username': username,
}
with self.assert_events_match_during(event_filter=event_filter, expected_events=[expected_event]):
yield
def initialize_different_user(self, privacy=None, birth_year=None):
"""
Initialize the profile page for a different test user
"""
username, user_id = self.log_in_as_unique_user()
# Set the privacy for the new user
if privacy is None:
privacy = self.PRIVACY_PUBLIC
self.visit_profile_page(username, privacy=privacy)
# Set the user's year of birth
if birth_year:
self.set_birth_year(birth_year)
# Log the user out
LogoutPage(self.browser).visit()
return username, user_id
@attr(shard=4)
class OwnLearnerProfilePageTest(LearnerProfileTestMixin, AcceptanceTest):
"""
Tests that verify a student's own profile page.
"""
def verify_profile_forced_private_message(self, username, birth_year, message=None):
"""
Verify age limit messages for a user.
"""
if birth_year is None:
birth_year = ""
self.set_birth_year(birth_year=birth_year)
profile_page = self.visit_profile_page(username)
self.assertTrue(profile_page.privacy_field_visible)
if message:
self.assertTrue(profile_page.age_limit_message_present)
else:
self.assertFalse(profile_page.age_limit_message_present)
self.assertIn(message, profile_page.profile_forced_private_message)
def test_profile_defaults_to_public(self):
"""
Scenario: Verify that a new user's profile defaults to public.
Given that I am a new user.
When I go to my profile page.
Then I see that the profile visibility is set to public.
"""
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username)
self.verify_profile_page_is_public(profile_page)
def assert_default_image_has_public_access(self, profile_page):
"""
Assert that profile image has public access.
"""
self.assertTrue(profile_page.profile_has_default_image)
self.assertTrue(profile_page.profile_has_image_with_public_access())
def test_make_profile_public(self):
"""
Scenario: Verify that the user can change their privacy.
Given that I am a registered user
And I visit my private profile page
And I set the profile visibility to public
Then a user preference changed event should be recorded
When I reload the page
Then the profile visibility should be shown as public
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
with self.verify_pref_change_event_during(
username, user_id, 'account_privacy', old=self.PRIVACY_PRIVATE, new=self.PRIVACY_PUBLIC
):
profile_page.privacy = self.PRIVACY_PUBLIC
# Reload the page and verify that the profile is now public
self.browser.refresh()
profile_page.wait_for_page()
self.verify_profile_page_is_public(profile_page)
def test_make_profile_private(self):
"""
Scenario: Verify that the user can change their privacy.
Given that I am a registered user
And I visit my public profile page
And I set the profile visibility to private
Then a user preference changed event should be recorded
When I reload the page
Then the profile visibility should be shown as private
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
with self.verify_pref_change_event_during(
username, user_id, 'account_privacy', old=None, new=self.PRIVACY_PRIVATE
):
profile_page.privacy = self.PRIVACY_PRIVATE
# Reload the page and verify that the profile is now private
self.browser.refresh()
profile_page.wait_for_page()
self.verify_profile_page_is_private(profile_page)
def test_dashboard_learner_profile_link(self):
"""
Scenario: Verify that my profile link is present on dashboard page and we can navigate to correct page.
Given that I am a registered user.
When I go to Dashboard page.
And I click on username dropdown.
Then I see Profile link in the dropdown menu.
When I click on Profile link.
Then I will be navigated to Profile page.
"""
username, __ = self.log_in_as_unique_user()
dashboard_page = DashboardPage(self.browser)
dashboard_page.visit()
dashboard_page.click_username_dropdown()
self.assertIn('Profile', dashboard_page.username_dropdown_link_text)
dashboard_page.click_my_profile_link()
my_profile_page = LearnerProfilePage(self.browser, username)
my_profile_page.wait_for_page()
def test_fields_on_my_private_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at her own private profile.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to private.
And I reload the page.
Then I should see the profile visibility selector dropdown.
Then I see some of the profile fields are shown.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
self.verify_profile_page_is_private(profile_page)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_fields_on_my_public_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at her own public profile.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to public.
And I reload the page.
Then I should see the profile visibility selector dropdown.
Then I see all the profile fields are shown.
And `location`, `language` and `about me` fields are editable.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.verify_profile_page_is_public(profile_page)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PUBLIC)
def _test_dropdown_field(self, profile_page, field_id, new_value, displayed_value, mode):
"""
Test behaviour of a dropdown field.
"""
profile_page.value_for_dropdown_field(field_id, new_value)
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
self.browser.refresh()
profile_page.wait_for_page()
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
def _test_textarea_field(self, profile_page, field_id, new_value, displayed_value, mode):
"""
Test behaviour of a textarea field.
"""
profile_page.set_value_for_textarea_field(field_id, new_value)
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
self.browser.refresh()
profile_page.wait_for_page()
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
def test_country_field(self):
"""
Test behaviour of `Country` field.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to public and set default values for public fields.
Then I set country value to `Pakistan`.
Then displayed country should be `Pakistan` and country field mode should be `display`
And I reload the page.
Then displayed country should be `Pakistan` and country field mode should be `display`
And I make `country` field editable
Then `country` field mode should be `edit`
And `country` field icon should be visible.
"""
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self._test_dropdown_field(profile_page, 'country', 'Pakistan', 'Pakistan', 'display')
profile_page.make_field_editable('country')
self.assertEqual(profile_page.mode_for_field('country'), 'edit')
self.assertTrue(profile_page.field_icon_present('country'))
def test_language_field(self):
"""
Test behaviour of `Language` field.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to public and set default values for public fields.
Then I set language value to `Urdu`.
Then displayed language should be `Urdu` and language field mode should be `display`
And I reload the page.
Then displayed language should be `Urdu` and language field mode should be `display`
Then I set empty value for language.
Then displayed language should be `Add language` and language field mode should be `placeholder`
And I reload the page.
Then displayed language should be `Add language` and language field mode should be `placeholder`
And I make `language` field editable
Then `language` field mode should be `edit`
And `language` field icon should be visible.
"""
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self._test_dropdown_field(profile_page, 'language_proficiencies', 'Urdu', 'Urdu', 'display')
self._test_dropdown_field(profile_page, 'language_proficiencies', '', 'Add language', 'placeholder')
profile_page.make_field_editable('language_proficiencies')
self.assertTrue(profile_page.mode_for_field('language_proficiencies'), 'edit')
self.assertTrue(profile_page.field_icon_present('language_proficiencies'))
def test_about_me_field(self):
"""
Test behaviour of `About Me` field.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to public and set default values for public fields.
Then I set about me value to `ThisIsIt`.
Then displayed about me should be `ThisIsIt` and about me field mode should be `display`
And I reload the page.
Then displayed about me should be `ThisIsIt` and about me field mode should be `display`
Then I set empty value for about me.
Then displayed about me should be `Tell other learners a little about yourself: where you live,
what your interests are, why you're taking courses, or what you hope to learn.` and about me
field mode should be `placeholder`
And I reload the page.
Then displayed about me should be `Tell other learners a little about yourself: where you live,
what your interests are, why you're taking courses, or what you hope to learn.` and about me
field mode should be `placeholder`
And I make `about me` field editable
Then `about me` field mode should be `edit`
"""
placeholder_value = (
"Tell other learners a little about yourself: where you live, what your interests are, "
"why you're taking courses, or what you hope to learn."
)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self._test_textarea_field(profile_page, 'bio', 'ThisIsIt', 'ThisIsIt', 'display')
self._test_textarea_field(profile_page, 'bio', '', placeholder_value, 'placeholder')
profile_page.make_field_editable('bio')
self.assertTrue(profile_page.mode_for_field('bio'), 'edit')
def test_birth_year_not_set(self):
"""
Verify message if birth year is not set.
Given that I am a registered user.
And birth year is not set for the user.
And I visit my profile page.
Then I should see a message that the profile is private until the year of birth is set.
"""
username, user_id = self.log_in_as_unique_user()
message = "You must specify your birth year before you can share your full profile."
self.verify_profile_forced_private_message(username, birth_year=None, message=message)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_user_is_under_age(self):
"""
Verify message if user is under age.
Given that I am a registered user.
And birth year is set so that age is less than 13.
And I visit my profile page.
Then I should see a message that the profile is private as I am under thirteen.
"""
username, user_id = self.log_in_as_unique_user()
under_age_birth_year = datetime.now().year - 10
self.verify_profile_forced_private_message(
username,
birth_year=under_age_birth_year,
message='You must be over 13 to share a full profile.'
)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_user_can_only_see_default_image_for_private_profile(self):
"""
Scenario: Default profile image behaves correctly for under age user.
Given that I am on my profile page with private access
And I can see default image
When I move my cursor to the image
Then i cannot see the upload/remove image text
And i cannot upload/remove the image.
"""
year_of_birth = datetime.now().year - 5
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
self.verify_profile_forced_private_message(
username,
year_of_birth,
message='You must be over 13 to share a full profile.'
)
self.assertTrue(profile_page.profile_has_default_image)
self.assertFalse(profile_page.profile_has_image_with_private_access())
def test_user_can_see_default_image_for_public_profile(self):
"""
Scenario: Default profile image behaves correctly for public profile.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
And i am able to upload new image
"""
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
def test_user_can_upload_the_profile_image_with_success(self):
"""
Scenario: Upload profile image works correctly.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new image via file uploader
Then i can see the changed image
And i can also see the latest image after reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
profile_page.visit()
self.assertTrue(profile_page.image_upload_success)
def test_user_can_see_error_for_exceeding_max_file_size_limit(self):
"""
Scenario: Upload profile image does not work for > 1MB image file.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new > 1MB image via file uploader
Then i can see the error message for file size limit
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='larger_image.jpg')
self.assertEqual(profile_page.profile_image_message, "The file must be smaller than 1 MB in size.")
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_see_error_for_file_size_below_the_min_limit(self):
"""
Scenario: Upload profile image does not work for < 100 Bytes image file.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new < 100 Bytes image via file uploader
Then i can see the error message for minimum file size limit
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='list-icon-visited.png')
self.assertEqual(profile_page.profile_image_message, "The file must be at least 100 bytes in size.")
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_see_error_for_wrong_file_type(self):
"""
Scenario: Upload profile image does not work for wrong file types.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new csv file via file uploader
Then i can see the error message for wrong/unsupported file type
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='generic_csv.csv')
self.assertEqual(
profile_page.profile_image_message,
"The file must be one of the following types: .gif, .png, .jpeg, .jpg."
)
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_remove_profile_image(self):
"""
Scenario: Remove profile image works correctly.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i click on the remove image link
Then i can see the default image
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
self.assertTrue(profile_page.remove_profile_image())
self.assertTrue(profile_page.profile_has_default_image)
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
def test_user_cannot_remove_default_image(self):
"""
Scenario: Remove profile image does not works for default images.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see only the upload image text
And i cannot see the remove image text
"""
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
self.assertFalse(profile_page.remove_link_present)
def test_eventing_after_multiple_uploads(self):
"""
Scenario: An event is fired when a user with a profile image uploads another image
Given that I am on my profile page with public access
And I upload a new image via file uploader
When I upload another image via the file uploader
Then two upload events have been emitted
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg', wait_for_upload_button=False)
@attr(shard=4)
class DifferentUserLearnerProfilePageTest(LearnerProfileTestMixin, AcceptanceTest):
"""
Tests that verify viewing the profile page of a different user.
"""
def test_different_user_private_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at a different user's private profile.
Given that I am a registered user.
And I visit a different user's private profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then I see some of the profile fields are shown.
"""
different_username, different_user_id = self.initialize_different_user(privacy=self.PRIVACY_PRIVATE)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
self.verify_profile_page_is_private(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PRIVATE)
def test_different_user_under_age(self):
"""
Scenario: Verify that an under age user's profile is private to others.
Given that I am a registered user.
And I visit an under age user's profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then I see that only the private fields are shown.
"""
under_age_birth_year = datetime.now().year - 10
different_username, different_user_id = self.initialize_different_user(
privacy=self.PRIVACY_PUBLIC,
birth_year=under_age_birth_year
)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
self.verify_profile_page_is_private(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PRIVATE)
def test_different_user_public_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at a different user's public profile.
Given that I am a registered user.
And I visit a different user's public profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then all the profile fields are shown.
Then I shouldn't see the profile visibility selector dropdown.
Also `location`, `language` and `about me` fields are not editable.
"""
different_username, different_user_id = self.initialize_different_user(privacy=self.PRIVACY_PUBLIC)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
profile_page.wait_for_public_fields()
self.verify_profile_page_is_public(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PUBLIC)
def test_badge_share_modal(self):
username = 'testcert'
AutoAuthPage(self.browser, username=username).visit()
profile_page = self.visit_profile_page(username)
profile_page.display_accomplishments()
badge = profile_page.badges[0]
badge.display_modal()
badge.close_modal()
@attr('a11y')
class LearnerProfileA11yTest(LearnerProfileTestMixin, AcceptanceTest):
"""
Class to test learner profile accessibility.
"""
def test_editable_learner_profile_a11y(self):
"""
Test the accessibility of the editable version of the profile page
(user viewing her own public profile).
"""
username, _ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username)
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.make_field_editable('language_proficiencies')
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.make_field_editable('bio')
profile_page.a11y_audit.check_for_accessibility_errors()
def test_read_only_learner_profile_a11y(self):
"""
Test the accessibility of the read-only version of a public profile page
(user viewing someone else's profile page).
"""
# initialize_different_user should cause country, language, and bio to be filled out (since
# privacy is public). It doesn't appear that this is happening, although the method
# works in regular bokchoy tests. Perhaps a problem with phantomjs? So this test is currently
# only looking at a read-only profile page with a username.
different_username, _ = self.initialize_different_user(privacy=self.PRIVACY_PUBLIC)
self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
profile_page.a11y_audit.check_for_accessibility_errors()
def test_badges_accessibility(self):
"""
Test the accessibility of the badge listings and sharing modal.
"""
username = 'testcert'
AutoAuthPage(self.browser, username=username).visit()
profile_page = self.visit_profile_page(username)
profile_page.display_accomplishments()
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.badges[0].display_modal()
profile_page.a11y_audit.check_for_accessibility_errors()
|
caesar2164/edx-platform
|
common/test/acceptance/tests/lms/test_learner_profile.py
|
Python
|
agpl-3.0
| 34,815
|
[
"VisIt"
] |
2ac3ece5e93926ad2717de188aee5e5cb8c3ca79dfabb85ba294ef9749a32810
|
import os
import collections
import mdtraj as md
import copy
# pylint: disable=wildcard-import, missing-docstring, protected-access
# pylint: disable=attribute-defined-outside-init, invalid-name, no-self-use
# pylint: disable=wrong-import-order, unused-wildcard-import
# includes pytest
from .utils import *
# stuff to be testing in this file
from contact_map.contact_map import *
from contact_map.contact_count import HAS_MATPLOTLIB, ContactCount
traj = md.load(find_testfile("trajectory.pdb"))
traj_atom_contact_count = {
frozenset([0, 8]): 1,
frozenset([0, 9]): 1,
frozenset([1, 4]): 4,
frozenset([1, 5]): 1,
frozenset([1, 8]): 1,
frozenset([1, 9]): 1,
frozenset([4, 6]): 5,
frozenset([4, 7]): 2,
frozenset([4, 8]): 1,
frozenset([5, 6]): 5,
frozenset([5, 7]): 2,
frozenset([5, 8]): 1
}
traj_residue_contact_count = {
frozenset([0, 2]): 5,
frozenset([0, 4]): 1,
frozenset([2, 3]): 5,
frozenset([2, 4]): 1
}
test_file = "test_file.p"
def pdb_topology_dict():
serial = {str(i): i+1 for i in range(10)}
name = {str(i): "C" + str(i % 2 + 1) for i in range(10)}
element = {str(i): "C" for i in range(10)}
res_seq = {str(i): str(i/2 + 1) for i in range(10)}
res_name = {str(i): "XXX" for i in range(10)}
chain_id = {str(i): 0 for i in range(10)}
seg_id = {str(i): "" for i in range(10)}
dct = {'serial': serial,
'name': name,
'element': element,
'resSeq': res_seq,
'resName': res_name,
'chainID': chain_id,
'segmentID': seg_id}
return dct
def counter_of_inner_list(ll):
return collections.Counter([frozenset(i) for i in ll])
def check_most_common_order(most_common):
for i in range(len(most_common) - 1):
assert most_common[i][1] >= most_common[i+1][1]
def check_use_atom_slice(m, use_atom_slice, expected):
if use_atom_slice is not None:
assert m._use_atom_slice == use_atom_slice
else:
assert m._use_atom_slice == expected[m]
def _contact_object_compare(m, m2):
"""Compare two contact objects (with asserts).
May later become pytest fanciness (pytest_assertrepr_compare)
"""
assert m.cutoff == m2.cutoff
assert m.query == m2.query
assert m.haystack == m2.haystack
assert m.n_neighbors_ignored == m2.n_neighbors_ignored
assert m.topology == m2.topology
if hasattr(m, '_atom_contacts') or hasattr(m2, '_atom_contacts'):
assert m._atom_contacts == m2._atom_contacts
if hasattr(m, '_residue_contacts') or hasattr(m2, '_residue_contacts'):
assert m._residue_contacts == m2._residue_contacts
def _check_contacts_dict_names(contact_object):
aliases = {
contact_object.residue_contacts: ['residue', 'residues', 'res'],
contact_object.atom_contacts: ['atom', 'atoms']
}
for (contacts, names) in aliases.items():
for name in names:
assert contacts.counter == contact_object.contacts[name].counter
def test_residue_neighborhood():
top = traj.topology
residues = list(top.residues)
for res in residues:
assert residue_neighborhood(res, n=0) == [res.index]
for n in range(5):
from_bottom = -min(0, res.index - n)
from_top = max(0, res.index + n - (len(residues) - 1))
len_n = 2*n + 1 - from_top - from_bottom
assert len(residue_neighborhood(res, n=n)) == len_n
@pytest.mark.parametrize("idx", [0, 4])
class TestContactObject(object):
# note: these used to be the tests for the separate single-frame
# ContactMap class; however, it includes a lot of good unit tests for
# ContactObject
def setup(self):
self.topology = traj.topology
self.map0 = ContactFrequency(traj[0], cutoff=0.075,
n_neighbors_ignored=0)
self.map4 = ContactFrequency(traj[4], cutoff=0.075,
n_neighbors_ignored=0)
self.maps = {0: self.map0, 4: self.map4}
self.expected_atom_contacts = {
self.map0: [[1, 4], [4, 6], [5, 6]],
self.map4: [[0, 9], [0, 8], [1, 8], [1, 9], [1, 4], [8, 4],
[8, 5], [4, 6], [4, 7], [5, 6], [5, 7]]
}
self.expected_residue_contacts = {
self.map0: [[0, 2], [2, 3]],
self.map4: [[0, 2], [2, 3], [0, 4], [2, 4]]
}
def test_initialization(self, idx):
m = self.maps[idx]
assert set(m.query) == set(range(10))
assert set(m.haystack) == set(range(10))
assert set(m.all_atoms) == set(range(10))
assert set(r.index for r in m.query_residues) == set(range(5))
assert set(r.index for r in m.haystack_residues) == set(range(5))
assert m.haystack_residue_range == (0, 5)
assert m.query_residue_range == (0, 5)
assert m.n_neighbors_ignored == 0
assert m.topology == self.topology
for res in m.topology.residues:
ignored_atoms = m._residue_ignore_atom_idxs[res.index]
assert ignored_atoms == set([a.index for a in res.atoms])
def test_counters(self, idx):
# tests the counters generated in the contact_map method
m = self.maps[idx]
expected = counter_of_inner_list(self.expected_atom_contacts[m])
assert m._atom_contacts == expected
assert m.atom_contacts.counter == expected
expected = counter_of_inner_list(self.expected_residue_contacts[m])
assert m._residue_contacts == expected
assert m.residue_contacts.counter == expected
def test_to_dict(self, idx):
m = self.maps[idx]
dct = m.to_dict()
# NOTE: topology only tested in a cycle; JSON order not guaranteed
assert dct['cutoff'] == 0.075
assert dct['query'] == list(range(10))
assert dct['haystack'] == list(range(10))
assert dct['all_atoms'] == tuple(range(10))
assert dct['n_neighbors_ignored'] == 0
def test_topology_serialization_cycle(self, idx):
top = self.maps[idx].topology
serialized_topology = ContactFrequency._serialize_topology(top)
new_top = ContactFrequency._deserialize_topology(serialized_topology)
assert top == new_top
def test_counter_serialization_cycle(self, idx):
m = self.maps[idx]
serialize = ContactFrequency._serialize_contact_counter
deserialize = ContactFrequency._deserialize_contact_counter
serialized_atom_counter = serialize(m._atom_contacts)
serialized_residue_counter = serialize(m._residue_contacts)
new_atom_counter = deserialize(serialized_atom_counter)
new_residue_counter = deserialize(serialized_residue_counter)
assert new_atom_counter == m._atom_contacts
assert new_residue_counter == m._residue_contacts
def test_dict_serialization_cycle(self, idx):
m = self.maps[idx]
dct = m.to_dict()
m2 = ContactFrequency.from_dict(dct)
_contact_object_compare(m, m2)
assert m == m2
def test_json_serialization_cycle(self, idx):
m = self.maps[idx]
json_str = m.to_json()
m2 = ContactFrequency.from_json(json_str)
_contact_object_compare(m, m2)
assert m == m2
def test_with_ignores(self, idx):
m = ContactFrequency(traj[idx], cutoff=0.075, n_neighbors_ignored=1)
expected_atom_contacts = {
0: [[1, 4]],
4: [[0, 9], [0, 8], [1, 8], [1, 9], [1, 4], [8, 4], [8, 5]]
}
expected_residue_contacts = {
0: [[0, 2]],
4: [[0, 2], [0, 4], [2, 4]]
}
expected = counter_of_inner_list(expected_atom_contacts[idx])
assert m._atom_contacts == expected
assert m.atom_contacts.counter == expected
expected = counter_of_inner_list(expected_residue_contacts[idx])
assert m._residue_contacts == expected
assert m.residue_contacts.counter == expected
def test_most_common_atoms_for_residue(self, idx):
m = self.maps[idx]
top = self.topology
expected_atom_indices_for_res_2 = { # atoms 4, 5
self.map0: {4: [1, 6], 5: [6]},
self.map4: {4: [1, 8, 6, 7], 5: [6, 7, 8]}
}
most_common_atoms = m.most_common_atoms_for_residue(top.residue(2))
most_common_idx = m.most_common_atoms_for_residue(2)
beauty = {frozenset(ll[0]): ll[1] for ll in most_common_atoms}
beauty_idx = {frozenset(ll[0]): ll[1] for ll in most_common_idx}
truth = {frozenset([top.atom(k), top.atom(a)]): 1
for (k, v) in expected_atom_indices_for_res_2[m].items()
for a in v}
assert beauty == truth
assert beauty_idx == truth
def test_most_common_atoms_for_contact(self, idx):
m = self.maps[idx]
top = self.topology
expected_atom_indices_contact_0_2 = {
self.map0: [[1, 4]],
self.map4: [[1, 4]]
}
contact_pair = [top.residue(0), top.residue(2)]
most_common_atoms = m.most_common_atoms_for_contact(contact_pair)
most_common_idx = m.most_common_atoms_for_contact([0, 2])
beauty = {frozenset(ll[0]): ll[1] for ll in most_common_atoms}
beauty_idx = {frozenset(ll[0]): ll[1] for ll in most_common_idx}
truth = {frozenset([top.atom(a) for a in ll]): 1
for ll in expected_atom_indices_contact_0_2[m]}
assert beauty == truth
assert beauty_idx == truth
def test_saving(self, idx):
m = self.maps[idx]
m.save_to_file(test_file)
m2 = ContactFrequency.from_file(test_file)
assert m.atom_contacts.counter == m2.atom_contacts.counter
os.remove(test_file)
@pytest.mark.parametrize("use_atom_slice", [True, False, None])
def test_atom_slice(self, idx, use_atom_slice):
# Set class variable before init
class_default = ContactFrequency._class_use_atom_slice
ContactFrequency._class_use_atom_slice = use_atom_slice
map0q = ContactFrequency(traj[0], query=[1, 4, 5, 6], cutoff=0.075,
n_neighbors_ignored=0)
map0h = ContactFrequency(traj[0], haystack=[1, 4, 5, 6],
cutoff=0.075, n_neighbors_ignored=0)
map0b = ContactFrequency(traj[0], query=[1, 4, 5, 6],
haystack=[1, 4, 5, 6], cutoff=0.075,
n_neighbors_ignored=0)
maps = [map0q, map0h, map0b]
atoms = {map0q: list(range(10)),
map0h: list(range(10)),
map0b: [1, 4, 5, 6]}
expected_atom_slice = {map0q: False,
map0h: False,
map0b: True}
# Only test for map 0 for now
m0 = self.maps[0]
# Test init
for m in maps:
assert m.all_atoms == atoms[m]
check_use_atom_slice(m, use_atom_slice, expected_atom_slice)
# Test results compared to m0
expected = counter_of_inner_list(self.expected_atom_contacts[m0])
assert m._atom_contacts == expected
assert m.atom_contacts.counter == expected
expected_residue_contacts = self.expected_residue_contacts[m0]
expected = counter_of_inner_list(expected_residue_contacts)
assert m._residue_contacts == expected
assert m.residue_contacts.counter == expected
# Test sliced indices
sliced_idx = [0, 1, 2, 3]
real_idx = [map0b.indexer.real_idx[i] for i in sliced_idx]
if map0b._use_atom_slice:
assert real_idx == [1, 4, 5, 6]
else:
assert real_idx == sliced_idx
# Reset class variable (as imports are not redone between function
# calls)
ContactFrequency._class_use_atom_slice = class_default
def test_contacts_dict(self, idx):
_check_contacts_dict_names(self.maps[idx])
def test_no_unitcell(self, idx):
temptraj = copy.deepcopy(traj)
# Strip unitcell
temptraj.unitcell_vectors = None
# Activate atom_slice
atoms = [1, 4, 5, 6]
mapi = ContactFrequency(temptraj[idx], cutoff=0.075,
n_neighbors_ignored=0, query=atoms,
haystack=atoms)
expected_atom_contacts = {0: [[1, 4], [4, 6], [5, 6]],
4: [[1, 4], [4, 6], [5, 6]]}
expected = counter_of_inner_list(expected_atom_contacts[idx])
assert mapi._atom_contacts == expected
# TODO: add tests for ContactObject._check_consistency
class TestContactFrequency(object):
def setup(self):
self.atoms = [0, 1, 4, 5, 6, 7, 8, 9]
self.map = ContactFrequency(trajectory=traj,
cutoff=0.075,
n_neighbors_ignored=0)
self.expected_atom_contact_count = traj_atom_contact_count
self.expected_residue_contact_count = traj_residue_contact_count
self.expected_n_frames = 5
def test_initialization(self):
assert self.map.n_frames == len(traj)
assert self.map.topology == traj.topology
assert set(self.map.query) == set(range(10))
assert set(self.map.haystack) == set(range(10))
assert set(self.map.all_atoms) == set(range(10))
assert self.map.n_neighbors_ignored == 0
for res in self.map.topology.residues:
ignored_atoms = self.map._residue_ignore_atom_idxs[res.index]
assert ignored_atoms == set([a.index for a in res.atoms])
def test_counters(self):
assert self.map.n_frames == self.expected_n_frames
atom_contacts = self.map.atom_contacts
expected_atom_contacts = {
k: float(v) / self.expected_n_frames
for (k, v) in self.expected_atom_contact_count.items()
}
assert atom_contacts.counter == expected_atom_contacts
residue_contacts = self.map.residue_contacts
expected_residue_contacts = {
k: float(v) / self.expected_n_frames
for (k, v) in self.expected_residue_contact_count.items()
}
assert residue_contacts.counter == expected_residue_contacts
def test_contacts_dict(self):
_check_contacts_dict_names(self.map)
@pytest.mark.parametrize('contactcount', [True, False])
def test_from_contacts(self, contactcount):
atom_contacts = self.expected_atom_contact_count
residue_contacts = self.expected_residue_contact_count
top = traj.topology
if contactcount:
atom_contacts = ContactCount(atom_contacts, top.atom,
10, 10)
residue_contacts = ContactCount(residue_contacts, top.residue,
5, 5)
cmap = ContactFrequency.from_contacts(atom_contacts,
residue_contacts,
n_frames=5,
topology=top,
cutoff=0.075,
n_neighbors_ignored=0)
_contact_object_compare(cmap, self.map)
def test_check_compatibility_true(self):
map2 = ContactFrequency(trajectory=traj[0:2],
cutoff=0.075,
n_neighbors_ignored=0)
assert self.map._check_compatibility(map2) == {}
@pytest.mark.parametrize("diff", [
{'trajectory': traj.atom_slice([0, 1, 2, 3])},
{'cutoff': 0.45},
{'n_neighbors_ignored': 2},
{'query': [1, 2, 3, 4]},
{'haystack': [1, 2, 3, 4]}
])
def test_check_compatibility_assertion_error(self, diff):
params = {'trajectory': traj[0:2],
'cutoff': 0.075,
'n_neighbors_ignored': 0}
params.update(diff)
map2 = ContactFrequency(**params)
with pytest.raises(AssertionError):
self.map._check_compatibility(map2)
def test_check_compatibility_runtime_error(self):
map2 = ContactFrequency(trajectory=traj,
cutoff=0.45,
n_neighbors_ignored=2)
with pytest.raises(RuntimeError):
self.map._check_compatibility(map2, err=RuntimeError)
@pytest.mark.parametrize("intermediate", ["dict", "json"])
def test_serialization_cycle(self, intermediate):
serializer, deserializer = {
'json': (self.map.to_json, ContactFrequency.from_json),
'dict': (self.map.to_dict, ContactFrequency.from_dict)
}[intermediate]
serialized = serializer()
reloaded = deserializer(serialized)
_contact_object_compare(self.map, reloaded)
assert self.map == reloaded
def test_hash(self):
map2 = ContactFrequency(trajectory=traj,
cutoff=0.075,
n_neighbors_ignored=0)
map3 = ContactFrequency(trajectory=traj[:2],
cutoff=0.075,
n_neighbors_ignored=0)
assert hash(self.map) == hash(map2)
assert hash(self.map) != hash(map3)
def test_saving(self):
m = self.map
m.save_to_file(test_file)
m2 = ContactFrequency.from_file(test_file)
assert m.atom_contacts.counter == m2.atom_contacts.counter
os.remove(test_file)
@pytest.mark.parametrize('select_by', ['res', 'idx'])
def test_most_common_atoms_for_residue(self, select_by):
if select_by == 'res':
res_2 = self.map.topology.residue(2)
elif select_by == 'idx':
res_2 = 2
else:
raise RuntimeError("This should not happen")
# call both by residue and residue number
most_common_2 = self.map.most_common_atoms_for_residue(res_2)
check_most_common_order(most_common_2)
most_common_numbers_2 = {frozenset([k[0].index, k[1].index]): v
for (k, v) in most_common_2}
# check contents are correct; residue 2 is atoms [4, 5]
expected_numbers_2 = {
frozenset([1, 4]): 0.8,
frozenset([1, 5]): 0.2,
frozenset([4, 6]): 1.0,
frozenset([4, 7]): 0.4,
frozenset([4, 8]): 0.2,
frozenset([5, 6]): 1.0,
frozenset([5, 7]): 0.4,
frozenset([5, 8]): 0.2
}
assert most_common_numbers_2 == expected_numbers_2
@pytest.mark.parametrize('select_by', ['res', 'idx'])
def test_most_common_atoms_for_contact(self, select_by):
top = self.map.topology
if select_by == 'res':
pair = [top.residue(2), top.residue(3)]
elif select_by == 'idx':
pair = [2, 3]
else:
raise RuntimeError("This should not happen")
most_common_2_3 = self.map.most_common_atoms_for_contact(pair)
check_most_common_order(most_common_2_3)
most_common_2_3_frozenset = [(frozenset(ll[0]), ll[1])
for ll in most_common_2_3]
# residue 2: atoms 4, 5; residue 3: atoms 6, 7
expected_2_3 = [
(frozenset([top.atom(4), top.atom(6)]), 1.0),
(frozenset([top.atom(5), top.atom(6)]), 1.0),
(frozenset([top.atom(4), top.atom(7)]), 0.4),
(frozenset([top.atom(5), top.atom(7)]), 0.4)
]
assert set(most_common_2_3_frozenset) == set(expected_2_3)
def test_add_contact_frequency(self):
# self.map has all 5 frames
# we can figure out what the [0:4] would look like
start = ContactFrequency(trajectory=traj[:4],
cutoff=0.075,
n_neighbors_ignored=0)
add_in = ContactFrequency(trajectory=traj[4:],
cutoff=0.075,
n_neighbors_ignored=0)
start.add_contact_frequency(add_in)
assert start.atom_contacts.counter == \
self.map.atom_contacts.counter
assert start.residue_contacts.counter == \
self.map.residue_contacts.counter
def test_subtract_contact_frequency(self):
first_four = ContactFrequency(trajectory=traj[:4],
cutoff=0.075,
n_neighbors_ignored=0)
last_frame = ContactFrequency(trajectory=traj[4:],
cutoff=0.075,
n_neighbors_ignored=0)
test_subject = ContactFrequency(trajectory=traj,
cutoff=0.075,
n_neighbors_ignored=0)
test_subject.subtract_contact_frequency(first_four)
assert test_subject.atom_contacts.counter == \
last_frame.atom_contacts.counter
assert test_subject.residue_contacts.counter == \
last_frame.residue_contacts.counter
@pytest.mark.parametrize("use_atom_slice", [True, False, None])
def test_use_atom_slice(self, use_atom_slice):
# Set class default before init
class_default = ContactFrequency._class_use_atom_slice
ContactFrequency._class_use_atom_slice = use_atom_slice
mapq = ContactFrequency(trajectory=traj, cutoff=0.075,
n_neighbors_ignored=0, query=self.atoms)
maph = ContactFrequency(trajectory=traj, cutoff=0.075,
n_neighbors_ignored=0, haystack=self.atoms)
mapb = ContactFrequency(trajectory=traj, cutoff=0.075,
n_neighbors_ignored=0, query=self.atoms,
haystack=self.atoms)
maps = [mapq, maph, mapb]
atoms = {mapq: list(range(10)),
maph: list(range(10)),
mapb: self.atoms}
expected_atom_slice = {mapq: False,
maph: False,
mapb: True}
# Test init
for m in maps:
self.map = m
assert m.all_atoms == atoms[m]
atom_list = [traj.topology.atom(i) for i in m.all_atoms]
check_use_atom_slice(m, use_atom_slice, expected_atom_slice)
sliced_traj = m.indexer.slice_trajectory(traj)
if m.use_atom_slice:
assert sliced_traj.topology.n_atoms == len(m.all_atoms)
else:
assert sliced_traj is traj
# Test counters
self.test_counters()
# Reset class default as pytest does not re-import
ContactFrequency._class_use_atom_slice = class_default
class TestContactDifference(object):
def test_diff_traj_frame(self):
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame = ContactFrequency(traj[4], cutoff=0.075,
n_neighbors_ignored=0)
expected_atom_count = {
frozenset([0, 8]): -1.0,
frozenset([0, 9]): -1.0,
frozenset([1, 4]): 0.75 - 1.0,
frozenset([1, 5]): 0.25,
frozenset([1, 8]): -1.0,
frozenset([1, 9]): -1.0,
frozenset([4, 6]): 0.0,
frozenset([4, 7]): 0.25 - 1.0,
frozenset([4, 8]): -1.0,
frozenset([5, 6]): 0.0,
frozenset([5, 7]): 0.25 - 1.0,
frozenset([5, 8]): -1.0
}
expected_residue_count = {
frozenset([0, 2]): 0.0,
frozenset([0, 4]): -1.0,
frozenset([2, 3]): 0.0,
frozenset([2, 4]): -1.0
}
diff_1 = ttraj - frame
diff_2 = frame - ttraj
assert diff_1.atom_contacts.counter == expected_atom_count
assert diff_2.atom_contacts.counter == \
{k: -v for (k, v) in expected_atom_count.items()}
assert diff_1.residue_contacts.counter == expected_residue_count
assert diff_2.residue_contacts.counter == \
{k: -v for (k, v) in expected_residue_count.items()}
@pytest.mark.parametrize("intermediate", ["dict", "json"])
def test_serialization_cycle(self, intermediate):
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame = ContactFrequency(traj[4], cutoff=0.075,
n_neighbors_ignored=0)
diff = ttraj - frame
serializer, deserializer = {
'json': (diff.to_json, ContactDifference.from_json),
'dict': (diff.to_dict, ContactDifference.from_dict)
}[intermediate]
serialized = serializer()
reloaded = deserializer(serialized)
_contact_object_compare(diff, reloaded)
assert diff == reloaded
def test_diff_frame_frame(self):
m0 = ContactFrequency(traj[0], cutoff=0.075, n_neighbors_ignored=0)
m4 = ContactFrequency(traj[4], cutoff=0.075, n_neighbors_ignored=0)
# one of these simply has more contacts than the other, so to test
# both positive diff and negative diff we flip the sign
diff_1 = m4 - m0
diff_2 = m0 - m4
# expected diffs are present in m4, not in m0
expected_atom_diff = [[0, 9], [0, 8], [1, 8], [1, 9], [4, 8],
[5, 8], [4, 7], [5, 7]]
expected_atom_common = [[1, 4], [4, 6], [5, 6]]
expected_residue_diff = [[0, 4], [2, 4]]
expected_residue_common = [[0, 2], [2, 3]]
expected_atoms_1 = counter_of_inner_list(expected_atom_diff)
# add the zeros
expected_atoms_1.update({frozenset(pair): 0
for pair in expected_atom_common})
assert diff_1.atom_contacts.counter == expected_atoms_1
expected_atoms_2 = {k: -v for (k, v) in expected_atoms_1.items()}
assert diff_2.atom_contacts.counter == expected_atoms_2
expected_residues_1 = counter_of_inner_list(expected_residue_diff)
expected_residues_1.update({frozenset(pair): 0
for pair in expected_residue_common})
assert diff_1.residue_contacts.counter == expected_residues_1
expected_residues_2 = {k: -v
for (k, v) in expected_residues_1.items()}
assert diff_2.residue_contacts.counter == expected_residues_2
def test_contacts_dict(self):
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame = ContactFrequency(traj[4], cutoff=0.075,
n_neighbors_ignored=0)
_check_contacts_dict_names(ttraj - frame)
def test_diff_traj_traj(self):
traj_1 = ContactFrequency(trajectory=traj[0:2],
cutoff=0.075,
n_neighbors_ignored=0)
traj_2 = ContactFrequency(trajectory=traj[3:5],
cutoff=0.075,
n_neighbors_ignored=0)
expected_atom_count = {
frozenset([0, 8]): -0.5,
frozenset([0, 9]): -0.5,
frozenset([1, 4]): 0.5 - 1.0,
frozenset([1, 5]): 0.5,
frozenset([1, 8]): -0.5,
frozenset([1, 9]): -0.5,
frozenset([4, 6]): 0.0,
frozenset([4, 7]): -1.0,
frozenset([4, 8]): -0.5,
frozenset([5, 6]): 0.0,
frozenset([5, 7]): -1.0,
frozenset([5, 8]): -0.5
}
expected_residue_count = {
frozenset([0, 2]): 0,
frozenset([0, 4]): -0.5,
frozenset([2, 3]): 0,
frozenset([2, 4]): -0.5
}
diff_1 = traj_1 - traj_2
diff_2 = traj_2 - traj_1
assert diff_1.atom_contacts.counter == expected_atom_count
assert diff_2.atom_contacts.counter == \
{k: -v for (k, v) in expected_atom_count.items()}
assert diff_1.residue_contacts.counter == expected_residue_count
assert diff_2.residue_contacts.counter == \
{k: -v for (k, v) in expected_residue_count.items()}
def test_saving(self):
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame = ContactFrequency(traj[4], cutoff=0.075,
n_neighbors_ignored=0)
diff = ttraj - frame
diff.save_to_file(test_file)
reloaded = ContactDifference.from_file(test_file)
assert diff.atom_contacts.counter == reloaded.atom_contacts.counter
os.remove(test_file)
@pytest.mark.skipif(not HAS_MATPLOTLIB, reason="Missing matplotlib")
def test_plot(self):
# smoke test; checks that we cover negative counts in plotting
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame = ContactFrequency(traj[4], cutoff=0.075,
n_neighbors_ignored=0)
diff = ttraj - frame
diff.residue_contacts.plot()
@pytest.mark.parametrize("attr", [('query',[0]), ('haystack',[1]),
('cutoff', 0.07),
('n_neighbors_ignored',1)])
def test_non_important_attributes(self, attr):
kwargs = {'cutoff':0.075,
'n_neighbors_ignored':0}
kwargs[attr[0]] = attr[1]
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame = ContactFrequency(traj[4], **kwargs)
diff = ttraj - frame
# Make sure the attributes are dead
if attr[0] in {'query','haystack'}:
assert getattr(diff, attr[0]) == attr[1]
else:
assert getattr(diff, attr[0]) is None
# Make sure we can still do the maps
assert diff.atom_contacts is not None
assert diff.residue_contacts is not None
def test_non_important_atoms(self):
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame = ContactFrequency(traj[4], cutoff=0.075,
n_neighbors_ignored=0)
# Now we are going to add an atom, which should not be part of the maps
res = list(frame.topology.residues)[-1]
elem = md.element.get_by_symbol("H")
_ = frame.topology.add_atom(name='test',
element=elem,
residue=res,
serial=42)
diff = ttraj - frame
# Make sure the topology does not fail here
assert diff.atom_contacts is not None
assert diff.residue_contacts is not None
def test_non_important_residues(self):
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame = ContactFrequency(traj[4], cutoff=0.075,
n_neighbors_ignored=0)
# Now we are going to add a residue,
# which should not be part of the maps
chain = list(frame.topology.chains)[-1]
_ = frame.topology.add_residue(name='test',
chain=chain)
diff = ttraj - frame
# Make sure the topology does not fail here
assert diff.atom_contacts is not None
assert diff.residue_contacts is not None
def test_truncated_diffs_residues(self):
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
traj_trunc = traj.atom_slice(range(6))
ttraj_trunc = ContactFrequency(traj_trunc[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame_trunc = ContactFrequency(traj_trunc[4], cutoff=0.075,
n_neighbors_ignored=0)
# The diff should only be made for atoms and residues in both maps
diff1 = ttraj - frame_trunc
diff2 = ttraj_trunc - frame_trunc
# Make sure the topology does not fail here
assert diff1.atom_contacts is not None
assert diff1.residue_contacts is not None
# Make sure the diffs are equal
assert diff1.atom_contacts.counter == diff2.atom_contacts.counter
assert diff1.residue_contacts.counter == diff2.residue_contacts.counter
def test_residue_rename_gives_different_atoms(self):
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame = ContactFrequency(traj[4], cutoff=0.075,
n_neighbors_ignored=0)
# Now we are going to rename a residue,
frame.topology.residue(0).name = 'test'
# Make sure the topology does not fail here
diff = ttraj - frame
assert diff.atom_contacts is not None
assert diff.residue_contacts is not None
assert diff.topology.residue(0).name == "XXX/test"
def test_broken_atoms_altered(self):
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame = ContactFrequency(traj[4], cutoff=0.075,
n_neighbors_ignored=0)
# Now we are going to change an element,
elem = md.element.get_by_symbol("H")
frame.topology.atom(0).element = elem
# Make sure the topology does not fail here
with pytest.raises(RuntimeError) as e:
diff = ttraj - frame
assert "AtomMismatch" in str(e.value)
def test_broken_atoms_missing(self):
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame = ContactFrequency(traj[4], cutoff=0.075,
n_neighbors_ignored=0)
# Now we are going to delete an atom,
frame.topology.delete_atom_by_index(3)
with pytest.raises(RuntimeError) as e:
diff = ttraj - frame
assert "AtomMismatch" in str(e.value)
def test_broken_residues(self):
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame = ContactFrequency(traj[4], cutoff=0.075,
n_neighbors_ignored=0)
# Somehow only altering residues per atom is not an unequal
# topology in mdtraj, so we just add one for mdtraj to trigger
chain = frame.topology.chain(0)
_ = frame.topology.add_residue(name='test',
chain=chain)
frame.topology.residue(0).resSeq = "test"
with pytest.raises(RuntimeError) as e:
diff = ttraj - frame
assert "ResidueMismatch" in str(e.value)
def test_broken_residues_missing(self):
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame = ContactFrequency(traj[4], cutoff=0.075,
n_neighbors_ignored=0)
# Grab atom info to add 'equal' atoms at the end
element8 = frame.topology.atom(8).element
element9 = frame.topology.atom(9).element
name8 = frame.topology.atom(8).name
name9 = frame.topology.atom(9).name
serial8 = frame.topology.atom(8).serial
serial9 = frame.topology.atom(9).serial
# Remove residue from the internals of the topology
frame._topology = frame.topology.subset(range(8))
assert frame.topology.n_residues == 4
# Add the original atoms again
res = frame.topology.atom(7).residue
frame.topology.add_atom(name8, element8, res, serial8)
frame.topology.add_atom(name9, element9, res, serial9)
with pytest.raises(RuntimeError) as e:
diff = ttraj - frame
assert "ResidueMismatch" in str(e.value)
def test_broken_everything_missing(self):
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame = ContactFrequency(traj[4], cutoff=0.075,
n_neighbors_ignored=0)
# subset the topology
frame._topology = frame.topology.subset(range(8))
assert frame.topology.n_residues == 4
with pytest.raises(RuntimeError) as e:
diff = ttraj - frame
assert "OverrideTopology" in str(e.value)
class TestAtomMismatchedContactDifference(object):
def test_disabled_functions(self):
disabled_funcs = ['atom_contacts',
'most_common_atoms_for_contact',
'most_common_atoms_for_residue',
'haystack_residues',
'query_residues']
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame = ContactFrequency(traj[4], cutoff=0.075,
n_neighbors_ignored=0)
diff = AtomMismatchedContactDifference(ttraj, frame)
for f in disabled_funcs:
with pytest.raises(RuntimeError) as e:
func = getattr(diff, f)
# This should only trigger if f is not a property
# (otherwise the previous line should raise)
func()
# test that 'atoms' is in the error
assert 'atom' in str(e.value)
# Make sure residue contacts are still accessible
_ = diff.residue_contacts
def test_broken_atoms_altered(self):
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame = ContactFrequency(traj[4], cutoff=0.075,
n_neighbors_ignored=0)
# Now we are going to change an element,
elem = md.element.get_by_symbol("H")
frame.topology.atom(0).element = elem
# Make sure this now works
diff = AtomMismatchedContactDifference(ttraj, frame)
assert diff.residue_contacts is not None
def test_broken_atoms_missing(self):
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame = ContactFrequency(traj[4], cutoff=0.075,
n_neighbors_ignored=0)
# Now we are going to delete an atom; this needs to be the last atom
# otherwise we trigger on a short-circuit
frame.topology.delete_atom_by_index(9)
# Also need to make sure the name of the residue is different to not
# trigger a missing residue
frame.topology.residue(4).name = 'test'
# Make sure this now works
diff = AtomMismatchedContactDifference(ttraj, frame)
assert diff.residue_contacts is not None
class TestResidueMismatchedContactDifference(object):
def test_disabled_functions(self):
disabled_funcs = ['residue_contacts',
'_residue_ignore_atom_idxs',
'most_common_atoms_for_contact',
'most_common_atoms_for_residue',
'haystack_residues',
'query_residues']
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame = ContactFrequency(traj[4], cutoff=0.075,
n_neighbors_ignored=0)
diff = ResidueMismatchedContactDifference(ttraj, frame)
for f in disabled_funcs:
with pytest.raises(RuntimeError) as e:
func = getattr(diff, f)
# This should only trigger if f is not a property
# (otherwise the previous line should raise)
func()
# test that 'atoms' is in the error
assert 'residue' in str(e.value)
# Make sure residue contacts are still accessible
_ = diff.atom_contacts
def test_broken_residues_altred(self):
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame = ContactFrequency(traj[4], cutoff=0.075,
n_neighbors_ignored=0)
# Somehow only altering residues per atom is not an unequal
# topology in mdtraj, so we just add one for mdtraj to trigger
chain = frame.topology.chain(0)
_ = frame.topology.add_residue(name='test',
chain=chain)
res = frame.topology.residue(0)
frame.topology.atom(9).residue = res
frame.topology.atom(8).residue = res
assert frame.topology.atom(9).residue.index == 0
assert frame.topology.atom(8).residue.index == 0
assert ttraj.topology.atom(9).residue.index == 4
assert ttraj.topology.atom(8).residue.index == 4
diff = ResidueMismatchedContactDifference(ttraj, frame)
assert diff.atom_contacts is not None
def test_broken_residues_missing(self):
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame = ContactFrequency(traj[4], cutoff=0.075,
n_neighbors_ignored=0)
# Grab atom info to add 'equal' atoms at the end
element8 = frame.topology.atom(8).element
element9 = frame.topology.atom(9).element
name8 = frame.topology.atom(8).name
name9 = frame.topology.atom(9).name
serial8 = frame.topology.atom(8).serial
serial9 = frame.topology.atom(9).serial
# subset the topology
frame._topology = frame.topology.subset(range(8))
assert frame.topology.n_residues == 4
# Add the original atoms again
res = frame.topology.atom(7).residue
frame.topology.add_atom(name8, element8, res, serial8)
frame.topology.add_atom(name9, element9, res, serial9)
diff = ResidueMismatchedContactDifference(ttraj, frame)
assert diff.atom_contacts is not None
class TestOverrideTopologyContactDifference(object):
def test_broken_atoms_and_residues_missing(self):
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame = ContactFrequency(traj[4], cutoff=0.075,
n_neighbors_ignored=0)
# Now we are going to delete an atom; this needs to be the last atom
# otherwise we trigger on a short-circuit
frame.topology.delete_atom_by_index(9)
# Make sure this now works
diff = OverrideTopologyContactDifference(ttraj, frame, ttraj.topology)
assert diff.residue_contacts is not None
assert diff.atom_contacts is not None
assert diff.topology == ttraj.topology
assert diff.topology != frame.topology
def test_still_broken_atoms_and_residues_missing(self):
ttraj = ContactFrequency(traj[0:4], cutoff=0.075,
n_neighbors_ignored=0)
frame = ContactFrequency(traj[4], cutoff=0.075,
n_neighbors_ignored=0)
frame.topology.delete_atom_by_index(9)
# Make sure still break
with pytest.raises(RuntimeError) as e:
diff = OverrideTopologyContactDifference(ttraj, frame,
frame.topology)
assert 'AtomMismatched' in str(e.value)
|
dwhswenson/contact_map
|
contact_map/tests/test_contact_map.py
|
Python
|
lgpl-2.1
| 43,947
|
[
"MDTraj"
] |
58d1c616e01f54ddd5083bda1b769ccbc6bcfffe7985c899700eb2481fb38a38
|
try:
import scipy
except ImportError:
from ase.test import NotAvailable
raise NotAvailable('This test needs scipy module.')
import numpy as np
from ase.io import read, PickleTrajectory
from ase.lattice import bulk
from ase.calculators.emt import EMT
a0 = 3.52 / np.sqrt(2)
c0 = np.sqrt(8 / 3.0) * a0
print '%.4f %.3f' % (a0, c0 / a0)
for i in range(3):
traj = PickleTrajectory('Ni.traj', 'w')
eps = 0.01
for a in a0 * np.linspace(1 - eps, 1 + eps, 4):
for c in c0 * np.linspace(1 - eps, 1 + eps, 4):
ni = bulk('Ni', 'hcp', a=a, covera=c / a)
ni.set_calculator(EMT())
ni.get_potential_energy()
traj.write(ni)
configs = read('Ni.traj@:')
energies = [config.get_potential_energy() for config in configs]
ac = [(config.cell[0, 0], config.cell[2, 2]) for config in configs]
from ase.optimize import polyfit
p = polyfit(ac, energies)
from scipy.optimize import fmin_bfgs
a0, c0 = fmin_bfgs(p, (a0, c0))
print '%.4f %.3f' % (a0, c0 / a0)
assert abs(a0 - 2.466) < 0.001
assert abs(c0 / a0 - 1.632) < 0.005
|
JConwayAWT/PGSS14CC
|
lib/python/multimetallics/ase/test/hcp.py
|
Python
|
gpl-2.0
| 1,115
|
[
"ASE"
] |
ec70ad6a80ad367571da37d04e2dcc91c9a220ffcd302a275e34e62d6baccf7a
|
from click.testing import CliRunner
from perses.app.cli import cli
test_yaml = """
protein_pdb: Tyk2_protein.pdb
ligand_file: Tyk2_ligands_shifted.sdf
old_ligand_index: 0
new_ligand_index: 3
forcefield_files:
- amber/ff14SB.xml
- amber/tip3p_standard.xml
- amber/tip3p_HFE_multivalent.xml
- amber/phosaa10.xml
small_molecule_forcefield: openff-1.0.0
pressure: 1
temperature: 300
solvent_padding: 9
atom_expression:
- IntType
bond_expession:
- DefaultBonds
n_steps_per_move_application: 1
fe_type: repex
checkpoint_interval: 50
n_cycles: 1
n_states: 3
n_equilibration_iterations: 0
trajectory_directory: temp/offlig10to24
trajectory_prefix: out
atom_selection: not water
phases:
- complex
- solvent
- vacuum
timestep: 4
h_constraints: true
"""
def test_dummy_cli(in_tmpdir):
runner = CliRunner()
with runner.isolated_filesystem():
with open("test.yaml", "w") as f:
f.write(test_yaml)
result = runner.invoke(cli, ["--yaml", "test.yaml"])
print(result)
print(result.output)
assert result.exit_code == 0
|
choderalab/perses
|
perses/tests/test_cli.py
|
Python
|
mit
| 1,082
|
[
"Amber"
] |
0957e6ee9e7fb92f0a03cd7f53abc7362562efb695e62e64a5b47fc002a5061b
|
##############################################################################
# Copyright 2018 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from typing import Dict, List, Optional, cast
from requests.exceptions import MissingSchema
from pyquil.api._base_connection import get_json, get_session, ForestConnection
from pyquil.api._config import PyquilConfig
from pyquil.device._main import Device
def list_devices(connection: Optional[ForestConnection] = None) -> List[str]:
"""
Query the Forest 2.0 server for a list of underlying QPU devices.
NOTE: These can't directly be used to manufacture pyQuil Device objects, but this gives a list
of legal values that can be supplied to list_lattices to filter its (potentially very
noisy) output.
:return: A list of device names.
"""
# For the record, the dictionary stored in "devices" that we're getting back is keyed on device
# names and has this structure in its values:
#
# {
# "is_online": a boolean indicating the availability of the device,
# "is_retuning": a boolean indicating whether the device is busy retuning,
# "specs": a Specs object describing the entire device, serialized as a dictionary,
# "isa": an ISA object describing the entire device, serialized as a dictionary,
# "noise_model": a NoiseModel object describing the entire device, serialized as a dictionary
# }
if connection is None:
connection = ForestConnection()
session = connection.session
assert connection.forest_cloud_endpoint is not None
url = connection.forest_cloud_endpoint + "/devices"
return sorted(get_json(session, url)["devices"].keys())
def list_lattices(
device_name: Optional[str] = None,
num_qubits: Optional[int] = None,
connection: Optional[ForestConnection] = None,
) -> Dict[str, str]:
"""
Query the Forest 2.0 server for its knowledge of lattices. Optionally filters by underlying
device name and lattice qubit count.
:return: A dictionary keyed on lattice names and valued in dictionaries of the
form::
{
"device_name": device_name,
"qubits": num_qubits
}
"""
if connection is None:
connection = ForestConnection()
session = connection.session
assert connection.forest_cloud_endpoint is not None
url = connection.forest_cloud_endpoint + "/lattices"
try:
response = get_json(
session, url, params={"device_name": device_name, "num_qubits": num_qubits}
)
return cast(Dict[str, str], response["lattices"])
except Exception as e:
raise ValueError(
"""
list_lattices encountered an error when querying the Forest 2.0 endpoint.
Some common causes for this error include:
* You don't have valid user authentication information. Very likely this is because you
haven't yet been invited to try QCS. We plan on making our device information publicly
accessible soon, but in the meanwhile, you'll have to use default QVM configurations and
to use `list_quantum_computers` with `qpus = False`.
* You do have user authentication credentials, but they are invalid. You can visit
https://qcs.rigetti.com/auth/token and save to ~/.qcs/user_auth_token to update your
authentication credentials. Alternatively, you may provide the path to your credentials in
your config file or with the USER_AUTH_TOKEN_PATH environment variable::
[Rigetti Forest]
user_auth_token_path = ~/.qcs/my_auth_credentials
* You're missing an address for the Forest 2.0 server endpoint, or the address is invalid.
This too can be set through the environment variable FOREST_URL or by changing the
following lines in the QCS config file::
[Rigetti Forest]
url = https://forest-server.qcs.rigetti.com
For the record, here's the original exception: {}
""".format(
repr(e)
)
)
def get_lattice(lattice_name: Optional[str] = None) -> Device:
"""
Construct a Device object to match the Forest 2.0 server's understanding of the named lattice.
:param lattice_name: Name of the desired lattice.
:return: A Device object.
"""
raw_lattice = _get_raw_lattice_data(lattice_name)
return Device(raw_lattice["name"], raw_lattice)
def _get_raw_lattice_data(lattice_name: Optional[str] = None) -> Dict[str, str]:
"""
Produces a dictionary of raw data for a lattice as queried from the Forest 2.0 server.
Returns a dictionary of the form::
{
"name": the name of the lattice as a string,
"device_name": the name of the device, given as a string, that the lattice lies on,
"specs": a Specs object, serialized as a dictionary,
"isa": an ISA object, serialized as a dictionary,
"noise_model": a NoiseModel object, serialized as a dictionary
}
"""
config = PyquilConfig()
session = get_session(config=config)
try:
res = get_json(session, f"{config.forest_url}/lattices/{lattice_name}")
except MissingSchema:
raise ValueError(
f"Error finding lattice `{lattice_name}` at Forest 2.0 server "
f"""endpoint `{config.forest_url}`.
Most likely, you're missing an address for the Forest 2.0 server endpoint, or the
address is invalid. This can be set through the environment variable FOREST_URL or
by changing the following lines in the QCS config file (by default, at ~/.qcs_config)::
[Rigetti Forest]
url = https://rigetti.com/valid/forest/url"""
)
return cast(Dict[str, str], res["lattice"])
|
rigetticomputing/pyquil
|
pyquil/api/_devices.py
|
Python
|
apache-2.0
| 6,508
|
[
"VisIt"
] |
514781aaae72c6b4115fda429cb221b2d230e61214396e28f8ed2a6889872221
|
#!/usr/bin/env python
"""
C.7 Mathematical Formulas (p187)
"""
from Arrays import Array
from plasTeX import Command, Environment, sourceChildren
from plasTeX import DimenCommand, GlueCommand
from plasTeX.Logging import getLogger
#
# C.7.1
#
# These space commands are only documented as being available in math mode,
# but it was requested to have them be in the global namespace.
class ThinSpace(Command):
macroName = '.'
unicode = u'\u2009'
class NegativeThinSpace(Command):
macroName = '!'
class MediumSpace(Command):
macroName = ':'
unicode = u'\u8196'
class ThickSpace(Command):
macroName = ';'
unicode = u'\u8194'
class ThinSpace_(Command):
macroName = '/'
unicode = u'\u2009'
class MathEnvironment(Environment):
mathMode = True
# Need \newcommand\({\begin{math}} and \newcommand\){\end{math}}
class math(MathEnvironment):
@property
def source(self):
if self.hasChildNodes():
return '$%s$' % sourceChildren(self)
return '$'
class displaymath(MathEnvironment):
blockType = True
@property
def source(self):
if self.hasChildNodes():
return r'\[ %s \]' % sourceChildren(self)
if self.macroMode == Command.MODE_END:
return r'\]'
return r'\['
class BeginDisplayMath(Command):
macroName = '['
def invoke(self, tex):
o = self.ownerDocument.createElement('displaymath')
o.macroMode = Command.MODE_BEGIN
self.ownerDocument.context.push(o)
return [o]
class EndDisplayMath(Command):
macroName = ']'
def invoke(self, tex):
o = self.ownerDocument.createElement('displaymath')
o.macroMode = Command.MODE_END
self.ownerDocument.context.pop(o)
return [o]
class BeginMath(Command):
macroName = '('
def invoke(self, tex):
o = self.ownerDocument.createElement('math')
o.macroMode = Command.MODE_BEGIN
self.ownerDocument.context.push(o)
return [o]
class EndMath(Command):
macroName = ')'
def invoke(self, tex):
o = self.ownerDocument.createElement('math')
o.macroMode = Command.MODE_END
self.ownerDocument.context.pop(o)
return [o]
class ensuremath(Command):
args = 'self'
class equation(MathEnvironment):
blockType = True
counter = 'equation'
class EqnarrayStar(Array):
macroName = 'eqnarray*'
blockType = True
mathMode = True
class lefteqn(Command):
args = 'self'
def digest(self, tokens):
res = Command.digest(self, tokens)
obj = self.parentNode
while obj is not None and not isinstance(obj, Array.ArrayCell):
obj = obj.parentNode
if obj is not None:
obj.attributes['colspan'] = 3
obj.style['text-align'] = 'left'
return res
class ArrayCell(Array.ArrayCell):
@property
def source(self):
return '$\\displaystyle %s $' % sourceChildren(self, par=False)
class eqnarray(EqnarrayStar):
macroName = None
counter = 'equation'
class EndRow(Array.EndRow):
""" End of a row """
counter = 'equation'
def invoke(self, tex):
res = Array.EndRow.invoke(self, tex)
res[1].ref = self.ref
self.ownerDocument.context.currentlabel = res[1]
return res
def invoke(self, tex):
res = EqnarrayStar.invoke(self, tex)
if self.macroMode == self.MODE_END:
return res
res[1].ref = self.ref
return res
class nonumber(Command):
def invoke(self, tex):
self.ownerDocument.context.counters['equation'].addtocounter(-1)
def digest(self, tokens):
try:
row = self.parentNode
while not isinstance(row, Array.ArrayRow):
row = row.parentNode
row.ref = None
except AttributeError as e:
print 'problem encountered %s' % e
class notag(nonumber):
pass
class lefteqn(Command):
args = 'self'
#
# Style Parameters
#
class jot(DimenCommand):
value = DimenCommand.new(0)
class mathindent(DimenCommand):
value = DimenCommand.new(0)
class abovedisplayskip(GlueCommand):
value = GlueCommand.new(0)
class belowdisplayskip(GlueCommand):
value = GlueCommand.new(0)
class abovedisplayshortskip(GlueCommand):
value = GlueCommand.new(0)
class belowdisplayshortskip(GlueCommand):
value = GlueCommand.new(0)
#
# C.7.2 Common Structures
#
# _
# ^
# '
class frac(Command):
args = 'numer denom'
class sqrt(Command):
args = '[ n ] self'
class ldots(Command):
unicode = u'\u2026'
class cdots(Command):
pass
class vdots(Command):
pass
class ddots(Command):
pass
#
# C.7.3 Mathematical Symbols
#
#
# Table 3.3: Greek Letters
#
class MathSymbol(Command):
pass
# Lowercase
class alpha(MathSymbol): unicode = unichr(945)
class beta(MathSymbol): unicode = unichr(946)
class gamma(MathSymbol): unicode = unichr(947)
class delta(MathSymbol): unicode = unichr(948)
class epsilon(MathSymbol): unicode = unichr(949)
class varepsilon(MathSymbol): unicode = unichr(949)
class zeta(MathSymbol): unicode = unichr(950)
class eta(MathSymbol): unicode = unichr(951)
class theta(MathSymbol): unicode = unichr(952)
class vartheta(MathSymbol): unicode = unichr(977)
class iota(MathSymbol): unicode = unichr(953)
class kappa(MathSymbol): unicode = unichr(954)
class GreekLamda(MathSymbol):
macroName = 'lambda'
unicode = unichr(955)
class mu(MathSymbol): unicode = unichr(956)
class nu(MathSymbol): unicode = unichr(957)
class xi(MathSymbol): unicode = unichr(958)
class pi(MathSymbol): unicode = unichr(960)
class varpi(MathSymbol): unicode = unichr(982)
class rho(MathSymbol): unicode = unichr(961)
class varrho(MathSymbol): unicode = unichr(1009)
class sigma(MathSymbol): unicode = unichr(963)
class varsigma(MathSymbol): unicode = unichr(962)
class tau(MathSymbol): unicode = unichr(964)
class upsilon(MathSymbol): unicode = unichr(965)
class phi(MathSymbol): unicode = unichr(966)
class varphi(MathSymbol): unicode = unichr(981)
class chi(MathSymbol): unicode = unichr(967)
class psi(MathSymbol): unicode = unichr(968)
class omega(MathSymbol): unicode = unichr(969)
# Uppercase
class Gamma(MathSymbol): unicode = unichr(915)
class Delta(MathSymbol): unicode = unichr(916)
class Theta(MathSymbol): unicode = unichr(920)
class Lambda(MathSymbol): unicode = unichr(923)
class Xi(MathSymbol): unicode = unichr(926)
class Pi(MathSymbol): unicode = unichr(928)
class Sigma(MathSymbol): unicode = unichr(931)
class Upsilon(MathSymbol): unicode = unichr(978)
class Phi(MathSymbol): unicode = unichr(934)
class Psi(MathSymbol): unicode = unichr(936)
class Omega(MathSymbol): unicode = unichr(8486)
#
# Table 3.4: Binary Operation Symbols
#
class pm(MathSymbol): unicode = unichr(177)
class mp(MathSymbol): unicode = unichr(8723)
class times(MathSymbol): unicode = unichr(215)
class div(MathSymbol): unicode = unichr(247)
class ast(MathSymbol): unicode = unichr(42)
class star(MathSymbol): unicode = unichr(8902)
class circ(MathSymbol): unicode = unichr(9675)
class bullet(MathSymbol): unicode = unichr(8226)
class cdot(MathSymbol): unicode = unichr(183)
class cap(MathSymbol): unicode = unichr(8745)
class cup(MathSymbol): unicode = unichr(8746)
class uplus(MathSymbol): unicode = unichr(8846)
class sqcap(MathSymbol): unicode = unichr(8851)
class sqcup(MathSymbol): unicode = unichr(8852)
class vee(MathSymbol): unicode = unichr(8744)
class wedge(MathSymbol): unicode = unichr(8743)
class setminus(MathSymbol): unicode = unichr(8726)
class wr(MathSymbol): unicode = unichr(8768)
class diamond(MathSymbol): unicode = unichr(8900)
class bigtriangleup(MathSymbol): unicode = unichr(9651)
class bigtriangledown(MathSymbol): unicode = unichr(9661)
class triangleleft(MathSymbol): unicode = unichr(9667)
class triangleright(MathSymbol): unicode = unichr(9657)
class lhd(MathSymbol): pass
class rhd(MathSymbol): pass
class unlhd(MathSymbol): pass
class unrhd(MathSymbol): pass
class oplus(MathSymbol): unicode = unichr(8853)
class ominus(MathSymbol): unicode = unichr(8854)
class otimes(MathSymbol): unicode = unichr(8855)
class oslash(MathSymbol): unicode = unichr(8856)
class odot(MathSymbol): unicode = unichr(8857)
class bigcirc(MathSymbol): unicode = unichr(9711)
class dagger(MathSymbol): unicode = unichr(8224)
class ddagger(MathSymbol): unicode = unichr(8225)
class amalg(MathSymbol): unicode = unichr(8720)
#
# Table 3.5: Relation Symbols
#
class Not(MathSymbol):
macroName = 'not'
args = 'symbol'
class leq(MathSymbol): unicode = unichr(8804)
class le(MathSymbol): unicode = unichr(8804)
class prec(MathSymbol): unicode = unichr(8826)
class preceq(MathSymbol): unicode = unichr(8828)
class ll(MathSymbol): unicode = unichr(8810)
class subset(MathSymbol): unicode = unichr(8834)
class subseteq(MathSymbol): unicode = unichr(8838)
class sqsubseteq(MathSymbol): unicode = unichr(8849)
class In(MathSymbol):
macroName = 'in'
class vdash(MathSymbol): unicode = unichr(8866)
class geq(MathSymbol): unicode = unichr(8805)
class ge(MathSymbol): unicode = unichr(8805)
class succ(MathSymbol): unicode = unichr(8827)
class succeq(MathSymbol): unicode = unichr(8829)
class gg(MathSymbol): unicode = unichr(8811)
class supset(MathSymbol): unicode = unichr(8835)
class supseteq(MathSymbol): unicode = unichr(8839)
class sqsupset(MathSymbol): unicode = unichr(8848)
class sqsupseteq(MathSymbol): unicode = unichr(8850)
class ni(MathSymbol): unicode = unichr(8715)
class dashv(MathSymbol): unicode = unichr(8867)
class equiv(MathSymbol): unicode = unichr(8801)
class sim(MathSymbol): unicode = unichr(8764)
class simeq(MathSymbol): unicode = unichr(8771)
class asymp(MathSymbol): unicode = unichr(8781)
class approx(MathSymbol): unicode = unichr(8776)
class cong(MathSymbol): unicode = unichr(8773)
class neq(MathSymbol): unicode = unichr(8800)
class ne(MathSymbol): unicode = unichr(8800)
class doteq(MathSymbol): unicode = unichr(8784)
class notin(MathSymbol): pass
class models(MathSymbol): unicode = unichr(8871)
class perp(MathSymbol): unicode = unichr(8869)
class mid(MathSymbol): unicode = unichr(8739)
class parallel(MathSymbol): unicode = unichr(8741)
class bowtie(MathSymbol): unicode = unichr(8904)
class Join(MathSymbol): pass
class smile(MathSymbol): unicode = unichr(8995)
class frown(MathSymbol): unicode = unichr(8994)
class propto(MathSymbol): unicode = unichr(8733)
#
# Table 3.6: Arrow Symbols
#
class leftarrow(MathSymbol): unicode = unichr(8592)
class Leftarrow(MathSymbol): unicode = unichr(8656)
class rightarrow(MathSymbol): unicode = unichr(8594)
class Rightarrow(MathSymbol): unicode = unichr(8658)
class leftrightarrow(MathSymbol): unicode = unichr(8596)
class Leftrightarrow(MathSymbol): unicode = unichr(8660)
class mapsto(MathSymbol): unicode = unichr(8614)
class hookleftarrow(MathSymbol): unicode = unichr(8617)
class leftharpoonup(MathSymbol): unicode = unichr(8636)
class leftharpoondown(MathSymbol): unicode = unichr(8637)
class rightleftharpoons(MathSymbol): unicode = unichr(8652)
class longleftarrow(MathSymbol): pass
class Longleftarrow(MathSymbol): pass
class longrightarrow(MathSymbol): pass
class Longrightarrow(MathSymbol): pass
class longleftrightarrow(MathSymbol): pass
class Longleftrightarrow(MathSymbol): pass
class longmapsto(MathSymbol): pass
class hookrightarrow(MathSymbol): unicode = unichr(8618)
class rightharpoonup(MathSymbol): unicode = unichr(8640)
class rightharpoondown(MathSymbol): unicode = unichr(8641)
class leadsto(MathSymbol): pass
class uparrow(MathSymbol): unicode = unichr(8593)
class Uparrow(MathSymbol): unicode = unichr(8657)
class downarrow(MathSymbol): unicode = unichr(8595)
class Downarrow(MathSymbol): unicode = unichr(8659)
class updownarrow(MathSymbol): unicode = unichr(8597)
class Updownarrow(MathSymbol): unicode = unichr(8661)
class nearrow(MathSymbol): unicode = unichr(8599)
class searrow(MathSymbol): unicode = unichr(8600)
class swarrow(MathSymbol): unicode = unichr(8601)
class nwarrow(MathSymbol): unicode = unichr(8598)
#
# Table 3.7: Miscellaneous Symbols
#
class aleph(MathSymbol): unicode = unichr(8501)
class hbar(MathSymbol): unicode = unichr(8463)
class imath(MathSymbol): pass
class jmath(MathSymbol): pass
class ell(MathSymbol): unicode = unichr(8467)
class wp(MathSymbol): unicode = unichr(8472)
class Re(MathSymbol): unicode = unichr(8476)
class Im(MathSymbol): unicode = unichr(8465)
class mho(MathSymbol): unicode = unichr(8487)
class prime(MathSymbol): unicode = unichr(8242)
class emptyset(MathSymbol): unicode = unichr(8709)
class nabla(MathSymbol): unicode = unichr(8711)
class surd(MathSymbol): unicode = unichr(8730)
class top(MathSymbol): unicode = unichr(8868)
class bot(MathSymbol): unicode = unichr(8869)
class VerticalBar(MathSymbol):
macroName = '|'
class forall(MathSymbol): unicode = unichr(8704)
class exists(MathSymbol): unicode = unichr(8707)
class neg(MathSymbol): pass
class flat(MathSymbol): unicode = unichr(9837)
class natural(MathSymbol): unicode = unichr(9838)
class sharp(MathSymbol): unicode = unichr(9839)
class backslash(MathSymbol): unicode = unichr(92)
class partial(MathSymbol): unicode = unichr(8706)
class infty(MathSymbol): unicode = unichr(8734)
class Box(MathSymbol): pass
class Diamond(MathSymbol): pass
class triangle(MathSymbol): unicode = unichr(9653)
class clubsuit(MathSymbol): unicode = unichr(9827)
class diamondsuit(MathSymbol): unicode = unichr(9830)
class heartsuit(MathSymbol): unicode = unichr(9829)
class spadesuit(MathSymbol): unicode = unichr(9824)
#
# Table 3.8: Variable-sized Symbols
#
class sum(MathSymbol): unicode = unichr(8721)
class prod(MathSymbol): unicode = unichr(8719)
class coprod(MathSymbol): unicode = unichr(8720)
class int(MathSymbol): unicode = unichr(8747)
class oint(MathSymbol): unicode = unichr(8750)
class bigcap(MathSymbol): pass
class bigcup(MathSymbol): pass
class bigsqcup(MathSymbol): pass
class bigvee(MathSymbol): pass
class bigwedge(MathSymbol): pass
class bigodot(MathSymbol): pass
class bigotimes(MathSymbol): pass
class bigoplus(MathSymbol): pass
class biguplus(MathSymbol): pass
#
# Table 3.9: Log-like Functions
#
class Logarithm(MathSymbol):
macroName = 'log'
class bmod(MathSymbol): pass
class pmod(MathSymbol):
args = 'self'
class arccos(MathSymbol): pass
class arcsin(MathSymbol): pass
class arctan(MathSymbol): pass
class arg(MathSymbol): pass
class cos(MathSymbol): pass
class cosh(MathSymbol): pass
class cot(MathSymbol): pass
class coth(MathSymbol): pass
class csc(MathSymbol): pass
class deg(MathSymbol): pass
class det(MathSymbol): pass
class dim(MathSymbol): pass
class exp(MathSymbol): pass
class gcd(MathSymbol): pass
class hom(MathSymbol): pass
class inf(MathSymbol): pass
class ker(MathSymbol): pass
class lg(MathSymbol): pass
class lim(MathSymbol): pass
class liminf(MathSymbol): pass
class limsup(MathSymbol): pass
class ln(MathSymbol): pass
class log(MathSymbol): pass
class max(MathSymbol): pass
class min(MathSymbol): pass
class Pr(MathSymbol): pass
class sec(MathSymbol): pass
class sin(MathSymbol): pass
class sinh(MathSymbol): pass
class sup(MathSymbol): pass
class tan(MathSymbol): pass
class tanh(MathSymbol): pass
#
# C.7.4 Arrays (see Arrays.py)
#
#
# C.7.5 Delimiters
#
class left(Command):
args = 'delim'
class right(Command):
args = 'delim'
# Table 3.10: Delimiters and TeXbook (p359)
class Delimiter(Command):
pass
class langle(Delimiter): pass
class rangle(Delimiter): pass
class lbrace(Delimiter): pass
class rbrace(Delimiter): pass
class lceil(Delimiter): pass
class rceil(Delimiter): pass
class lfloor(Delimiter): pass
class rfloor(Delimiter): pass
class lgroup(Delimiter): pass
class rgroup(Delimiter): pass
class lmoustache(Delimiter): pass
class rmoustache(Delimiter): pass
class uparrow(Delimiter): pass
class Uparrow(Delimiter): pass
class downarrow(Delimiter): pass
class Downarrow(Delimiter): pass
class updownarrow(Delimiter): pass
class Updownarrow(Delimiter): pass
class arrowvert(Delimiter): pass
class Arrowvert(Delimiter): pass
class vert(Delimiter): pass
class Vert(Delimiter): pass
class backslash(Delimiter): pass
class bracevert(Delimiter): pass
class bigl(Delimiter): pass
class bigm(Delimiter): pass
class bigr(Delimiter): pass
class Bigl(Delimiter): pass
class Bigm(Delimiter): pass
class Bigr(Delimiter): pass
class biggl(Delimiter): pass
class biggr(Delimiter): pass
class Biggl(Delimiter): pass
class Biggr(Delimiter): pass
class biggm(Delimiter): pass
class Biggm(Delimiter): pass
class Big(Delimiter):
args = 'char'
class bigg(Delimiter):
args = 'char'
class Bigg(Delimiter):
args = 'char'
class choose(Command):
pass
class brack(Command):
pass
class brace(Command):
pass
#class sqrt(Command):
# pass
#
# C.7.6 Putting One Thing Above Another
#
class overline(Command):
args = 'self'
class underline(Command):
args = 'self'
class overbrace(Command):
args = 'self'
class underbrace(Command):
args = 'self'
# Accents
class MathAccent(Command):
args = 'self'
class hat(MathAccent): pass
class check(MathAccent): pass
class breve(MathAccent): pass
class acute(MathAccent): pass
class grave(MathAccent): pass
class tilde(MathAccent): pass
class bar(MathAccent): pass
class vec(MathAccent): pass
class dot(MathAccent): pass
class ddot(MathAccent): pass
class widehat(MathAccent): pass
class widetilde(MathAccent): pass
class imath(MathAccent): pass
class jmath(MathAccent): pass
class stackrel(MathAccent):
args = 'top bottom'
#
# C.7.7 Spacing
#
# These are nested inside the MathEnvironemnt
#
# C.7.8 Changing Style
#
# Type Style
class mathrm(Command):
args = 'self'
class mathit(Command):
args = 'self'
class mathbf(Command):
args = 'self'
class mathsf(Command):
args = 'self'
class mathtt(Command):
args = 'self'
class mathcal(Command):
args = 'self'
class boldmath(Command):
pass
class unboldmath(Command):
pass
# Math Style
class displaystyle(Command):
pass
class textstyle(Command):
pass
class scriptstyle(Command):
pass
class scriptscriptstyle(Command):
pass
|
themutt/plastex
|
plasTeX/Base/LaTeX/Math.py
|
Python
|
mit
| 18,213
|
[
"Bowtie"
] |
efadb11fc38c8caddcf2da754d54a8ae800145f5ce8f98f822516c7cd418e323
|
#!/usr/bin/env python
# Computes and plots exact solution for "test V", in preparation for
# implementing C version for PISM verification.
from pylab import *
import subprocess
try:
from netCDF4 import Dataset as NC
except:
from netCDF3 import Dataset as NC
def permute(variable, output_order = ('time', 'z', 'zb', 'y', 'x')):
"""Permute dimensions of a NetCDF variable to match the output storage order."""
input_dimensions = variable.dimensions
# filter out irrelevant dimensions
dimensions = filter(lambda(x): x in input_dimensions,
output_order)
# create the mapping
mapping = map(lambda(x): dimensions.index(x),
input_dimensions)
if mapping:
return np.transpose(variable[:], mapping)
else:
return variable[:] # so that it does not break processing "mapping"
### Setup
secpera = 3.15569259747e7 # seconds per year
rho_sw = 1028.0 # sea water density
rho_ice = 910.0 # ice density
standard_gravity = 9.81 # g
B0 = 1.9e8 # ice hardness
# "typical constant ice parameter" as defined in the paper and in Van der
# Veen's "Fundamentals of Glacier Dynamics", 1999
C = (rho_ice * standard_gravity * (1.0 - rho_ice/rho_sw) / (4 * B0))**3
# upstream ice thickness
H0 = 600.0 # meters
# upstream ice velocity
v0 = 300.0/secpera # 300 meters/year
# upstream ice flux
Q0 = H0 * v0;
Mx = 201
x = linspace(0, 400e3, Mx)
def H(x):
"""Ice thickness."""
return (4 * C / Q0 * x + 1 / H0**4)**(-0.25)
def v(x):
"""Ice velocity."""
return Q0 / H(x)
def x_c(t):
"""Location of the calving front."""
return Q0 / (4*C) * ((3*C*t + 1/H0**3)**(4.0/3.0) - 1/H0**4)
def plot_xc(t_years):
"""Plot the location of the calving front."""
x = x_c(t_years * secpera)/1000.0 # convert to km
a = axis()
y_min = a[2]
y_max = a[3]
hold(True)
plot([x, x], [y_min, y_max], '--g')
def run_pismv(Mx, run_length, options, output):
command = "pismv -test V -y %f -Mx %d %s -o %s" % (run_length, Mx, options, output)
print "Running %s" % command
subprocess.call(command, shell=True)
def plot_pism_results(figure_number, filename, figure_title, color):
nc = NC(filename)
time = nc.variables['time'][0]/secpera # convert to years
thk = permute(nc.variables['thk'])[0,1,2:]
ubar_ssa = permute(nc.variables['cbar'])[0,1,2:]
x = nc.variables['x'][:]
dx = x[1] - x[0]
Lx = (x[-1] - x[0]) / 2.0
x_nc = (x[2:] + Lx - 2*dx) / 1000.0
hold(True)
figure(figure_number)
subplot(211)
title(figure_title)
plot(x_nc, H(x_nc*1000.0), color='black', linestyle='dashed')
plot(x_nc, thk, color=color, linewidth=2)
plot_xc(time)
ylabel("Ice thickness, m")
axis(xmin=0, xmax=400, ymax=600)
grid(True)
subplot(212)
plot(x_nc, v(x_nc*1000.0) * secpera, color='black', linestyle='dashed')
plot(x_nc, ubar_ssa, color=color, linewidth=2)
plot_xc(time)
axis(xmin=0, xmax=400, ymax=1000)
xlabel("km")
ylabel("ice velocity, m/year")
grid(True)
nc.close()
options = "-ssa_method fd -cfbc -part_grid -Lx 250"
run_pismv(101, 300, options, "out.nc")
plot_pism_results(1, "out.nc", "Figure 6 (b) (-part_grid)", 'blue')
run_pismv(101, 300, options + " -max_dt 1", "out.nc")
plot_pism_results(1, "out.nc", "Figure 6 (b) (-part_grid)", 'green')
run_pismv(101, 300, options + " -part_redist", "out.nc")
plot_pism_results(2, "out.nc", "Figure 6 (c) (-part_grid -part_redist)", 'blue')
run_pismv(101, 300, options + " -part_redist -max_dt 1", "out.nc")
plot_pism_results(2, "out.nc", "Figure 6 (c) (-part_grid -part_redist)", 'green')
show()
|
JohannesFeldmann/pism
|
src/verif/tests/exactV.py
|
Python
|
gpl-2.0
| 3,843
|
[
"NetCDF"
] |
3c3ac55825c9750ad2ec07fdd3553669b36b0b881361e753333ec921085aeaaf
|
import functools
from wadl2rst.nodes.responses import ResponsesNode
def wrap_response_elements(tree):
""" Find all the <response> nodes and make sure they are wrapped by <responses>
nodes. """
# grab all the response nodes
response_nodes = []
response_vistor = functools.partial(get_response_nodes, response_nodes)
tree.visit(response_vistor)
for node in response_nodes:
parent = node.parent
responses_node = None
if node.parent.name == "responses":
continue
# try to find a responses node on the parent
for sibling in parent.children:
if sibling.name == "responses":
responses_node = sibling
break
# no responses node, so add one
if responses_node is None:
responses_node = ResponsesNode(parent, "responses", {})
parent.add_child(responses_node)
# now move the node under the responses node
node.parent = responses_node
responses_node.add_child(node)
parent.remove_child(node)
def get_response_nodes(memory, node):
if node.name == "response" and node.parent.name != "responses":
memory.append(node)
|
annegentle/wadl2rst
|
wadl2rst/transformations/wrap_response_elements.py
|
Python
|
apache-2.0
| 1,220
|
[
"VisIt"
] |
ab1bbc9b78e313fa5464308fe07e304c6debcd9f1a8b84acca07e0422b68a705
|
# S.D. Peckham
# May 2010
import os
import sys
import time
import numpy
import file_utils
# import Nio # (a module in the PyNIO package)
#---------------------------------------------------------------------
# This class is for I/O of time-indexed 1D profiles to netCDF files.
#---------------------------------------------------------------------
#
# unit_test()
# unit_test2()
# save_as_text() # (not ready yet)
#
# class ncps_file():
#
# import_nio()
# open_file()
# get_nio_type_map()
# open_new_file()
# update_time_index()
#-------------------------------
# add_profile()
# get_profile()
#-------------------------------
# profiles_at_IDs()
# add_profiles_at_IDs()
#-------------------------------
# close_file()
# close()
#
#-------------------------------------------------------------------
def unit_test(n_times=5, nz=10, VERBOSE=False,
file_name="NCPS_Profile_Test.nc"):
#--------------------------------------------------------
# Notes: This test uses add_profile() and get_profile()
# to add and retrieve a time-indexed set of 1D
# profiles to/from a file. An example would be
# a set of soil-moisture profiles that vary with
# both depth, z, and time.
#--------------------------------------------------------
print ' '
print 'Running unit_test()...'
#-------------------------------------
# Make instance of ncps_file() class
#-------------------------------------
ncps = ncps_file()
var_names = ['theta']
z_values = numpy.arange( nz, dtype='Float64' )
z_units = 'm'
OK = ncps.open_new_file( file_name,
z_values=z_values,
z_units=z_units,
var_names=var_names,
long_names=['soil_water_content'],
units_names=['none'],
dtypes=['float64'],
time_units='minutes',
comment="Created by TopoFlow 3.0.")
###############################################
# WHAT ABOUT LONG_NAME for the TIME VALUES ??
###############################################
if not(OK):
print 'ERROR during open_new_file().'
return
profile = numpy.exp(-0.1 * z_values)
times = numpy.arange( n_times, dtype='Float64') * 0.1
#-----------------------------------
# Add a series of profiles to file
#-----------------------------------
print 'Writing profiles to ncps file...'
for time_index in xrange(n_times):
time = times[ time_index ]
ncps.add_profile( profile, var_names[0], time )
#----------------------------------------------
ncps.update_time_index()
profile += 1 ## (make profile change in time)
if (VERBOSE):
print self.ncps_unit # (print a summary)
ncps.close_file()
print 'Finished writing ncps file: ' + file_name
print ' '
#-----------------------------------------
# Re-open the file and read the profiles
#-----------------------------------------
OK = ncps.open_file( ncps.file_name )
if not(OK): return
print 'Reading values from ncps file: '
for time_index in xrange(n_times):
profile, time = ncps.get_profile(var_names[0], time_index)
ti_str = str(time_index)
print 'time[' + ti_str + '] =', time
print 'profile[' + ti_str + '] =', profile
print '-----------------------------------------------'
#-----------------
# Close the file
#-----------------
ncps.close_file()
print 'Finished reading ncps file: ' + file_name
print ' '
# unit_test()
#-------------------------------------------------------------------
def unit_test2(n_times=5, nz=10, VERBOSE=False,
file_name="NCPS_Profile_Test2.nc"):
#--------------------------------------------------------
# Notes: This test uses add_profile() and get_profile()
# to add and retrieve a time-indexed set of 1D
# profiles to/from a file. An example would be
# a set of soil-moisture profiles that vary with
# both depth, z, and time.
#--------------------------------------------------------
print ' '
print 'Running unit_test2()...'
#-------------------------------------
# Make instance of ncps_file() class
#-------------------------------------
ncps = ncps_file()
var_name = 'theta'
z_values = numpy.arange( nz, dtype='Float64' )
z_units = 'm'
IDs = ([1,2,3], [1,2,3])
var_names = ['theta_1_1', 'theta_2_2', 'theta_3_3']
long_names = ['soil_water_content_profile_at_1_1',
'soil_water_content_profile_at_2_2',
'soil_water_content_profile_at_3_3']
units_names = ['none', 'none', 'none']
dtypes = ['float64']
# dtypes = ['float64', 'float64', 'float64']
OK = ncps.open_new_file( file_name,
z_values=z_values,
z_units=z_units,
var_names=var_names,
long_names=long_names,
units_names=units_names,
dtypes=dtypes,
time_units='minutes',
comment="Created by TopoFlow 3.0.")
###############################################
# WHAT ABOUT LONG_NAME for the TIME VALUES ??
###############################################
if not(OK):
print 'ERROR during open_new_file().'
return
profile = numpy.exp(-0.1 * z_values)
times = numpy.arange( n_times, dtype='Float64') * 0.1
print 'z_values =', z_values
print 'profile =', profile
print 'times =', times
print ' '
ny = 5
nx = 5
var = numpy.zeros([nz,ny,nx], dtype='float64')
for k in xrange(nz):
var[k,:,:] = profile[k]
#-----------------------------------
# Add a series of profiles to file
#-----------------------------------
print 'Writing profiles to ncps file...'
for time_index in xrange(n_times):
time = times[ time_index ]
ncps.add_profiles_at_IDs(var, var_name, IDs, time )
#-------------------------------------------------
# Don't need to update_time_index, done already.
#-------------------------------------------------
#### ncps.update_time_index()
var += 1 ## (make profiles change in time)
if (VERBOSE):
print self.ncps_unit # (print a summary)
ncps.close_file()
print 'Finished writing ncps file: ' + file_name
print ' '
#-----------------------------------------
# Re-open the file and read the profiles
#-----------------------------------------
OK = ncps.open_file( ncps.file_name )
if not(OK): return
print 'Reading values from ncps file: '
for time_index in xrange(n_times):
profile, time = ncps.get_profile(var_names[0], time_index)
ti_str = str(time_index)
print 'time[' + ti_str + '] =', time
print 'profile[' + ti_str + '] =', profile
print '-----------------------------------------------'
#-----------------
# Close the file
#-----------------
ncps.close_file()
print 'Finished reading ncps file: ' + file_name
print ' '
# unit_test2()
#-------------------------------------------------------------------
def save_as_text(ncps_file_name=None, text_file_name=None):
ncps = ncps_file()
OK = ncps.open_file( ncps_file_name )
if not(OK): return
var_name = 'theta'
data = ncps.get_profile( var_name )
ncps.close()
data = numpy.array( data )
print 'min(data), max(data) =', data.min(), data.max()
text_unit = open( text_file_name, 'w' )
data.tofile( unit ) ###### CHECK THIS #######
text_unit.close()
# save_as_text()
#-------------------------------------------------------------------
class ncps_file():
#----------------------------------------------------------
# Note: ncps = NetCDF Time Series (used by CSDMS)
#----------------------------------------------------------
def import_nio(self):
try:
import Nio # (a module in the PyNIO package)
# print 'Imported Nio version: ' + Nio.__version__
return Nio
except:
## python_version = sys.version[:3]
## print ' '
## print 'SORRY, Cannot write netCDF files because'
## print 'the "Nio" package cannot be imported.'
## print ' '
## if (python_version != '2.6'):
## print 'Note that "PyNIO" is only installed for'
## print 'Python version 2.6 on "beach".'
## print 'The current Python version is:', python_version
## print ' '
return False
# import_nio()
#----------------------------------------------------------
def open_file(self, file_name):
#--------------------------------------------------
# Try to import the Nio module from PyNIO package
#--------------------------------------------------
Nio = self.import_nio()
if not(Nio): return
#-------------------------
# Open file to read only
#-------------------------
try:
ncps_unit = Nio.open_file(file_name, mode="r")
self.ncps_unit = ncps_unit
### return ncps_unit
return True
except:
return False
# open_file()
#----------------------------------------------------------
def get_nio_type_map(self):
#----------------------------------------
# Possible settings for "nio_type_code"
#-------------------------------------------
# nio_type_code = "d" # (double, Float64)
# nio_type_code = "f" # (float, Float32)
# nio_type_code = "l" # (long, Int64)
# nio_type_code = "i" # (int, Int32)
# nio_type_code = "h" # (short, Int16)
# nio_type_code = "b" # (byte, Int8)
# nio_type_code = "S1" # (char)
#-------------------------------------------
nio_type_map = {'float64':'d', 'float32':'f',
'int64':'l', 'int32':'i',
'int16':'s', 'int8':'b',
'S|100':'S1'} # (check last entry)
return nio_type_map
# get_nio_type_map()
#----------------------------------------------------------
def open_new_file(self, file_name,
z_values=numpy.arange(10),
z_units='m',
var_names=['X'],
long_names=[None],
units_names=['None'],
dtypes=['float64'],
time_units='minutes',
comment=''):
#----------------------------------------------------
# Notes: It might be okay to have "nz" be an
# unlimited dimension, like "time". This
# would mean replacing "int(profile_length)"
# with "None".
#----------------------------------------------------
#--------------------------------------------------
# Try to import the Nio module from PyNIO package
#--------------------------------------------------
Nio = self.import_nio()
if not(Nio): return False
#----------------------------
# Does file already exist ?
#----------------------------
file_name = file_utils.check_overwrite( file_name )
self.file_name = file_name
#---------------------------------------
# Check and store the time series info
#---------------------------------------
self.format = 'ncps'
self.file_name = file_name
self.time_index = 0
if (long_names[0] is None):
long_names = var_names
#-------------------------------------------
self.z_values = z_values
self.z_units = z_units
nz = numpy.size(z_values)
#-------------------------------------------
# We may not need to save these in self.
# I don't think they're used anywhere yet.
#-------------------------------------------
self.var_names = var_names
self.long_names = long_names
self.units_names = units_names
self.dtypes = dtypes
#---------------------------------------------
# Create array of Nio type codes from dtypes
#---------------------------------------------
nio_type_map = self.get_nio_type_map()
nio_type_codes = []
if (len(dtypes) == len(var_names)):
for dtype in dtypes:
nio_type_code = nio_type_map[ dtype.lower() ]
nio_type_codes.append( nio_type_code )
else:
dtype = dtypes[0]
nio_type_code = nio_type_map[ dtype.lower() ]
for k in xrange(len(var_names)):
nio_type_codes.append( nio_type_code )
self.nio_type_codes = nio_type_codes
#-------------------------------------
# Open a new netCDF file for writing
#-------------------------------------
# Sample output from time.asctime():
# "Thu Oct 8 17:10:18 2009"
#-------------------------------------
opt = Nio.options()
opt.PreFill = False # (for efficiency)
opt.HeaderReserveSpace = 4000 # (4000 bytes, for efficiency)
history = "Created using PyNIO " + Nio.__version__ + " on "
history = history + time.asctime() + ". "
history = history + comment
try:
ncps_unit = Nio.open_file(file_name, mode="w",
options=opt, history=history )
OK = True
except:
OK = False
return OK
#------------------------------------------------
# Create an unlimited time dimension (via None)
#------------------------------------------------
# Without using "int()" here, we get this:
# TypeError: size must be None or integer
#------------------------------------------------
ncps_unit.create_dimension("nz", int(nz))
ncps_unit.create_dimension("time", None)
#-------------------------
# Create a time variable
#---------------------------------------------------
#('f' = float32; must match in add_values_at_IDs()
#---------------------------------------------------
# NB! Can't use "time" vs. "tvar" here unless we
# add "import time" inside this function.
#---------------------------------------------------
tvar = ncps_unit.create_variable('time', 'd', ("time",))
ncps_unit.variables['time'].units = time_units
#--------------------------------------
# Create a distance/depth variable, z
#--------------------------------------
zvar = ncps_unit.create_variable('z', 'd', ("nz",))
zvar[ : ] = z_values # (store the z-values)
ncps_unit.variables['z'].units = z_units
#-----------------------------------
# Create variables using var_names
#-----------------------------------
# Returns "var" as a PyNIO object
#---------------------------------------------------
# NB! The 3rd argument here (dimension), must be a
# tuple. If there is only one dimension, then
# we need to add a comma, as shown.
#---------------------------------------------------
for k in xrange(len(var_names)):
var_name = var_names[k]
var = ncps_unit.create_variable(var_name, nio_type_codes[k],
("time", "nz"))
#------------------------------------
# Create attributes of the variable
#------------------------------------
ncps_unit.variables[var_name].long_name = long_names[k]
ncps_unit.variables[var_name].units = units_names[k]
#----------------------------------
# Specify a "nodata" fill value ?
#----------------------------------
var._FillValue = -9999.0 ## Does this jive with Prefill above ??
self.ncps_unit = ncps_unit
return OK
# open_new_file()
#----------------------------------------------------------
def update_time_index(self, step=1):
#-----------------------------------------------------
# We shouldn't update clock in every add_profile()
# call because different profiles (for same time)
# may be written with different add_profile() calls.
#-----------------------------------------------------
#------------------------------------
# Increment the internal time index
#------------------------------------
self.time_index += step
# update_time_index()
#----------------------------------------------------------
def add_profile(self, profile, var_name, time=None,
time_index=-1):
#-----------------------------------------------------
# Note: "time_index" allows insertion/overwrite
# of a profile at a particular time index.
#-----------------------------------------------------
# This syntax works for scalars and grids
# nc_unit.variables[var_name].assign_value( values )
#-----------------------------------------------------
#-------------------------------------
# Can use time_index to overwrite an
# existing grid vs. simple append.
#-------------------------------------
if (time_index == -1):
time_index = self.time_index
if (time is None):
time = numpy.float64( time_index )
#---------------------------------------------
# Write a data value to existing netCDF file
#---------------------------------------------
profiles = self.ncps_unit.variables[ var_name ]
profiles[ time_index ] = profile
#------------------------------------------------
times = self.ncps_unit.variables[ 'time' ]
times[ time_index ] = time
######################################################
# We shouldn't update clock in every add_profile()
# call because different profiles (for same time)
# may be written with different add_profile() calls.
######################################################
#------------------------------------
# Increment the internal time index
#------------------------------------
# self.time_index += numpy.size(values)
# add_profile()
#----------------------------------------------------------
def get_profile(self, var_name, time_index):
profiles = self.ncps_unit.variables[ var_name ]
times = self.ncps_unit.variables[ 'time' ]
return (profiles[ time_index ], times[ time_index ])
# get_profile()
#-------------------------------------------------------------------
def profiles_at_IDs(self, var, IDs):
#---------------------------
# Get the dimensions, etc.
#---------------------------
ndims = numpy.ndim(var)
dtype = self.dtypes[0]
nz = var.shape[0]
# nz = numpy.size(var, 0) # (also works)
n_IDs = numpy.size(IDs[0])
profiles = numpy.zeros([n_IDs, nz], dtype=dtype)
if (ndims == 1):
#------------------------------
# Variable is a 1D profile,
# and is the same for all IDs
#------------------------------
for k in xrange(n_IDs):
profiles[k, :] = var.astype(dtype)
else:
#---------------------------------
# Variable is a 3D array; return
# a profile for each ID
#---------------------------------
for k in xrange(nz):
layer = var[k,:,:].astype(dtype)
profiles[:, k] = layer[ IDs ]
return profiles
# profiles_at_IDs()
#-------------------------------------------------------------------
def add_profiles_at_IDs(self, var, var_name, IDs, time=None,
time_index=-1):
#-------------------------------------
# Can use time_index to overwrite an
# existing grid vs. simple append.
#-------------------------------------
if (time_index == -1):
time_index = self.time_index
if (time is None):
time = numpy.float64( time_index )
#---------------------------------------------
# Write current time to existing netCDF file
#---------------------------------------------
times = self.ncps_unit.variables[ 'time' ]
times[ time_index ] = time
#--------------------------------------------
# Write data values to existing netCDF file
#--------------------------------------------
profiles = self.profiles_at_IDs( var, IDs )
rows = IDs[0]
cols = IDs[1]
n_IDs = numpy.size(rows)
for k in xrange(n_IDs):
#----------------------------------------
# Construct var_name of form: Q[24,32]
# or, if necessary, Q_24_32
#----------------------------------------
row_str = '_' + str(rows[k])
col_str = '_' + str(cols[k])
#--------------------------------------------------
# Must match with model_output.open_new_ps_file()
#--------------------------------------------------
## row_str = '[' + str(rows[k]) + ','
## col_str = str(cols[k]) + ']'
vname = var_name + row_str + col_str
profile_series = self.ncps_unit.variables[ vname ]
profile_series[ time_index ] = profiles[k,:]
## print 'added profile =', profiles[k,:] ###########
#---------------------------
# Increment the time index
#---------------------------
self.time_index += 1
# add_profiles_at_IDs()
#-------------------------------------------------------------------
def close_file(self):
self.ncps_unit.close()
# close_file()
#-------------------------------------------------------------------
def close(self):
self.ncps_unit.close()
# close()
#-------------------------------------------------------------------
|
mdpiper/topoflow
|
topoflow/utils/ncps_files_Nio.py
|
Python
|
mit
| 23,399
|
[
"NetCDF"
] |
0826095298a952ae7a7f06c6f84058190a1e646309b1d0a16baa53be366fc74e
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vispy: gallery 30
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Just a very fake galaxy.
Astronomers and cosmologists will kill me !
"""
import numpy as np
from vispy import gloo
from vispy import app
from vispy.util.transforms import perspective, translate, rotate
# Manual galaxy creation
# (did you really expect a simulation in less than 250 python lines ?)
def make_arm(n, angle):
R = np.linspace(10, 450 + 50 * np.random.uniform(.5, 1.), n)
R += 40 * np.random.normal(0, 2., n) * np.linspace(1, .1, n)
T = angle + np.linspace(0, 2.5 * np.pi, n) + \
np.pi / 6 * np.random.normal(0, .5, n)
S = 8 + 2 * np.abs(np.random.normal(0, 1, n))
S *= np.linspace(1, .85, n)
P = np.zeros((n, 3), dtype=np.float32)
X, Y, Z = P[:, 0], P[:, 1], P[:, 2]
X[...] = R * np.cos(T)
Y[...] = R * np.sin(T) * 1.1
D = np.sqrt(X * X + Y * Y)
Z[...] = 8 * np.random.normal(0, 2 - D / 512., n)
X += (D * np.random.uniform(0, 1, n) > 250) * \
(.05 * D * np.random.uniform(-1, 1, n))
Y += (D * np.random.uniform(0, 1, n) > 250) * \
(.05 * D * np.random.uniform(-1, 1, n))
Z += (D * np.random.uniform(0, 1, n) > 250) * \
(.05 * D * np.random.uniform(-1, 1, n))
D = (D - D.min()) / (D.max() - D.min())
return P / 256, S / 2, D
p = 50000
n = 3 * p
# Very simple colormap
cmap = np.array([[255, 124, 0], [255, 163, 76],
[255, 192, 130], [255, 214, 173],
[255, 232, 212], [246, 238, 237],
[237, 240, 253], [217, 228, 255],
[202, 219, 255], [191, 212, 255],
[182, 206, 255], [174, 202, 255],
[168, 198, 255], [162, 195, 255],
[158, 192, 255], [155, 189, 255],
[151, 187, 255], [148, 185, 255],
[145, 183, 255], [143, 182, 255],
[141, 181, 255], [140, 179, 255],
[139, 179, 255],
[137, 177, 255]], dtype=np.uint8).reshape(1, 24, 3)
VERT_SHADER = """
#version 120
// Uniforms
// ------------------------------------
uniform mat4 u_model;
uniform mat4 u_view;
uniform mat4 u_projection;
uniform float u_size;
// Attributes
// ------------------------------------
attribute vec3 a_position;
attribute float a_size;
attribute float a_dist;
// Varyings
// ------------------------------------
varying float v_size;
varying float v_dist;
void main (void) {
v_size = a_size*u_size*.75;
v_dist = a_dist;
gl_Position = u_projection * u_view * u_model * vec4(a_position,1.0);
gl_PointSize = v_size;
}
"""
FRAG_SHADER = """
#version 120
// Uniforms
// ------------------------------------
uniform sampler2D u_colormap;
// Varyings
// ------------------------------------
varying float v_size;
varying float v_dist;
// Main
// ------------------------------------
void main()
{
float a = 2*(length(gl_PointCoord.xy - vec2(0.5,0.5)) / sqrt(2.0));
vec3 color = texture2D(u_colormap, vec2(v_dist,.5)).rgb;
gl_FragColor = vec4(color,(1-a)*.25);
}
"""
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, keys='interactive', size=(800, 600))
ps = self.pixel_scale
self.title = "A very fake galaxy [mouse scroll to zoom]"
data = np.zeros(n, [('a_position', np.float32, 3),
('a_size', np.float32),
('a_dist', np.float32)])
for i in range(3):
P, S, D = make_arm(p, i * 2 * np.pi / 3)
data['a_dist'][(i + 0) * p:(i + 1) * p] = D
data['a_position'][(i + 0) * p:(i + 1) * p] = P
data['a_size'][(i + 0) * p:(i + 1) * p] = S*ps
self.program = gloo.Program(VERT_SHADER, FRAG_SHADER)
self.model = np.eye(4, dtype=np.float32)
self.projection = np.eye(4, dtype=np.float32)
self.theta, self.phi = 0, 0
self.translate = 5
self.view = translate((0, 0, -self.translate))
self.program.bind(gloo.VertexBuffer(data))
self.program['u_colormap'] = gloo.Texture2D(cmap)
self.program['u_size'] = 5. / self.translate
self.program['u_model'] = self.model
self.program['u_view'] = self.view
self.apply_zoom()
gloo.set_state(depth_test=False, blend=True,
blend_func=('src_alpha', 'one'), clear_color='black')
# Start the timer upon initialization.
self.timer = app.Timer('auto', connect=self.on_timer)
self.timer.start()
self.show()
def on_key_press(self, event):
if event.text == ' ':
if self.timer.running:
self.timer.stop()
else:
self.timer.start()
def on_timer(self, event):
self.theta += .11
self.phi += .13
self.model = np.dot(rotate(self.theta, (0, 0, 1)),
rotate(self.phi, (0, 1, 0)))
self.program['u_model'] = self.model
self.update()
def on_resize(self, event):
self.apply_zoom()
def on_mouse_wheel(self, event):
self.translate -= event.delta[1]
self.translate = max(2, self.translate)
self.view = translate((0, 0, -self.translate))
self.program['u_view'] = self.view
self.program['u_size'] = 5 / self.translate
self.update()
def on_draw(self, event):
gloo.clear()
self.program.draw('points')
def apply_zoom(self):
gloo.set_viewport(0, 0, self.physical_size[0], self.physical_size[1])
self.projection = perspective(45.0, self.size[0] /
float(self.size[1]), 1.0, 1000.0)
self.program['u_projection'] = self.projection
if __name__ == '__main__':
c = Canvas()
app.run()
|
Eric89GXL/vispy
|
examples/demo/gloo/galaxy.py
|
Python
|
bsd-3-clause
| 5,930
|
[
"Galaxy"
] |
b1a518f1b0310cd84c8a5943f5237a24a8e87b5c454adcabed12b76c09987266
|
from __future__ import print_function, division
inp = """
#File names and Format
#Directory to save output files
direc 'easy1000'
stem 'easy1000'
make_image 0
output '.gve' '.flt' '.par' '.ubi'
# Structural parameters
structure_phase_0 'Al.cif'
# Crystal/grains parameters
no_grains 1000
# Total number of grains summed over all phases to be simulated.
# Need to match the number of U_Grains_X key word
gen_size 1 -0.05 0.01 0.2
#Grain phase : If you want to let the PolyXSim appoint which grain
# belongs to which phase the following keyword can be used.
gen_phase 1 1 0
# Grain orientation : random (1) or specify specific orientation matrices (0)
gen_U 0
# 1 random, box or cylinder
gen_pos 0 0
# pos_grains_0 0.1 -0.1 0.05
#sample_xyz 1.0 1.0 1.0
gen_eps 1 0 0 0 0
# Instrumentation
# Detector pixel size [mm]
y_size 0.05
z_size 0.05
#Detector size [pixels]
dety_size 2048
detz_size 2048
#Distance from sample to detector [mm]
distance 300
#Detector tilt
tilt_x 0.005
tilt_y 0.01
tilt_z 0.008
#Detector orientation
o11 1
o12 0
o21 0
o22 -1
#Noise
noise 0
#Reflection
intensity_const 1
lorentz_apply 1
beampol_apply 1
peakshape 0
#Instrumental
#Beam specs, Pt edge
wavelength 0.158154
beamflux 1e-12
beampol_factor 1
beampol_direct 0
#Beam center on detector [pixels]
dety_center 1022.1
detz_center 1028.3
#Omega scan range [degrees]
omega_start 0
omega_end 180
#Omega step size [degrees]
omega_step 0.25
#Omega rotation direction [degrees]
omega_sign 1
#Wedge angle of omega axis
wedge 0.0
"""
"U_grains_0 7.712806e-01 -6.337184e-01 5.939117e-02 6.130920e-01 7.146102e-01 -3.368241e-01 1.710101e-01 2.961981e-01 9.396926e-01"
import numpy as np, xfab.tools
# generate a 10x10x10 grid of grains.
# add something to this
x,y,z = np.mgrid[0:10,0:10,0:10]-5
dx = np.sin(np.arange(1000))/3 # +/- 1
dy = np.cos(np.arange(1000))/5
dz = np.sin(np.arange(1000))/7
t = np.array( (x.ravel()+dx, y.ravel()+dy, z.ravel()+dz ) )/10
#print t.shape
np.savetxt("t",t.T)
# orientations....
# t is in range [-0.5 -> 0.5] - make it rod also.
u = [xfab.tools.rod_to_u( v.astype(float)) for v in t.T]
f=open("easy1000.inp","w")
f.write(inp)
for i,v in enumerate(t.T):
f.write("pos_grains_%d %f %f %f\n"%(i,v[0]/10,v[1]/10,v[2]/10))
for i,v in enumerate(u):
f.write( ("U_grains_%d"+ " %.12f"*9+'\n')%( i,
v[0,0],v[0,1],v[0,2],v[1,0],v[1,1],v[1,2],v[2,0],v[2,1],v[2,2] ))
|
jonwright/ImageD11
|
test/simul_1000_grains/make_u_t_easy.py
|
Python
|
gpl-2.0
| 2,398
|
[
"CRYSTAL"
] |
41ddb6ec28dc6f3b6982f46cc7d746734ff7c1f25af3f9bccfdb5856a2941eec
|
"""
NetCDF reader/writer module.
This module is used to read and create NetCDF files. NetCDF files are
accessed through the `netcdf_file` object. Data written to and from NetCDF
files are contained in `netcdf_variable` objects. Attributes are given
as member variables of the `netcdf_file` and `netcdf_variable` objects.
This module implements the Scientific.IO.NetCDF API to read and create
NetCDF files. The same API is also used in the PyNIO and pynetcdf
modules, allowing these modules to be used interchangeably when working
with NetCDF files.
Only NetCDF3 is supported here; for NetCDF4 see
`netCDF4-python <http://unidata.github.io/netcdf4-python/>`__,
which has a similar API.
"""
from __future__ import division, print_function, absolute_import
# TODO:
# * properly implement ``_FillValue``.
# * fix character variables.
# * implement PAGESIZE for Python 2.6?
# The Scientific.IO.NetCDF API allows attributes to be added directly to
# instances of ``netcdf_file`` and ``netcdf_variable``. To differentiate
# between user-set attributes and instance attributes, user-set attributes
# are automatically stored in the ``_attributes`` attribute by overloading
#``__setattr__``. This is the reason why the code sometimes uses
#``obj.__dict__['key'] = value``, instead of simply ``obj.key = value``;
# otherwise the key would be inserted into userspace attributes.
__all__ = ['netcdf_file']
import warnings
import weakref
from operator import mul
import mmap as mm
import numpy as np
from numpy.compat import asbytes, asstr
from numpy import fromstring, dtype, empty, array, asarray
from numpy import little_endian as LITTLE_ENDIAN
from functools import reduce
from scipy._lib.six import integer_types, text_type, binary_type
ABSENT = b'\x00\x00\x00\x00\x00\x00\x00\x00'
ZERO = b'\x00\x00\x00\x00'
NC_BYTE = b'\x00\x00\x00\x01'
NC_CHAR = b'\x00\x00\x00\x02'
NC_SHORT = b'\x00\x00\x00\x03'
NC_INT = b'\x00\x00\x00\x04'
NC_FLOAT = b'\x00\x00\x00\x05'
NC_DOUBLE = b'\x00\x00\x00\x06'
NC_DIMENSION = b'\x00\x00\x00\n'
NC_VARIABLE = b'\x00\x00\x00\x0b'
NC_ATTRIBUTE = b'\x00\x00\x00\x0c'
TYPEMAP = {NC_BYTE: ('b', 1),
NC_CHAR: ('c', 1),
NC_SHORT: ('h', 2),
NC_INT: ('i', 4),
NC_FLOAT: ('f', 4),
NC_DOUBLE: ('d', 8)}
REVERSE = {('b', 1): NC_BYTE,
('B', 1): NC_CHAR,
('c', 1): NC_CHAR,
('h', 2): NC_SHORT,
('i', 4): NC_INT,
('f', 4): NC_FLOAT,
('d', 8): NC_DOUBLE,
# these come from asarray(1).dtype.char and asarray('foo').dtype.char,
# used when getting the types from generic attributes.
('l', 4): NC_INT,
('S', 1): NC_CHAR}
class netcdf_file(object):
"""
A file object for NetCDF data.
A `netcdf_file` object has two standard attributes: `dimensions` and
`variables`. The values of both are dictionaries, mapping dimension
names to their associated lengths and variable names to variables,
respectively. Application programs should never modify these
dictionaries.
All other attributes correspond to global attributes defined in the
NetCDF file. Global file attributes are created by assigning to an
attribute of the `netcdf_file` object.
Parameters
----------
filename : string or file-like
string -> filename
mode : {'r', 'w', 'a'}, optional
read-write-append mode, default is 'r'
mmap : None or bool, optional
Whether to mmap `filename` when reading. Default is True
when `filename` is a file name, False when `filename` is a
file-like object. Note that when mmap is in use, data arrays
returned refer directly to the mmapped data on disk, and the
file cannot be closed as long as references to it exist.
version : {1, 2}, optional
version of netcdf to read / write, where 1 means *Classic
format* and 2 means *64-bit offset format*. Default is 1. See
`here <http://www.unidata.ucar.edu/software/netcdf/docs/netcdf/Which-Format.html>`__
for more info.
maskandscale : bool, optional
Whether to automatically scale and/or mask data based on attributes.
Default is False.
Notes
-----
The major advantage of this module over other modules is that it doesn't
require the code to be linked to the NetCDF libraries. This module is
derived from `pupynere <https://bitbucket.org/robertodealmeida/pupynere/>`_.
NetCDF files are a self-describing binary data format. The file contains
metadata that describes the dimensions and variables in the file. More
details about NetCDF files can be found `here
<http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html>`__. There
are three main sections to a NetCDF data structure:
1. Dimensions
2. Variables
3. Attributes
The dimensions section records the name and length of each dimension used
by the variables. The variables would then indicate which dimensions it
uses and any attributes such as data units, along with containing the data
values for the variable. It is good practice to include a
variable that is the same name as a dimension to provide the values for
that axes. Lastly, the attributes section would contain additional
information such as the name of the file creator or the instrument used to
collect the data.
When writing data to a NetCDF file, there is often the need to indicate the
'record dimension'. A record dimension is the unbounded dimension for a
variable. For example, a temperature variable may have dimensions of
latitude, longitude and time. If one wants to add more temperature data to
the NetCDF file as time progresses, then the temperature variable should
have the time dimension flagged as the record dimension.
In addition, the NetCDF file header contains the position of the data in
the file, so access can be done in an efficient manner without loading
unnecessary data into memory. It uses the ``mmap`` module to create
Numpy arrays mapped to the data on disk, for the same purpose.
Note that when `netcdf_file` is used to open a file with mmap=True
(default for read-only), arrays returned by it refer to data
directly on the disk. The file should not be closed, and cannot be cleanly
closed when asked, if such arrays are alive. You may want to copy data arrays
obtained from mmapped Netcdf file if they are to be processed after the file
is closed, see the example below.
Examples
--------
To create a NetCDF file:
>>> from scipy.io import netcdf
>>> f = netcdf.netcdf_file('simple.nc', 'w')
>>> f.history = 'Created for a test'
>>> f.createDimension('time', 10)
>>> time = f.createVariable('time', 'i', ('time',))
>>> time[:] = np.arange(10)
>>> time.units = 'days since 2008-01-01'
>>> f.close()
Note the assignment of ``range(10)`` to ``time[:]``. Exposing the slice
of the time variable allows for the data to be set in the object, rather
than letting ``range(10)`` overwrite the ``time`` variable.
To read the NetCDF file we just created:
>>> from scipy.io import netcdf
>>> f = netcdf.netcdf_file('simple.nc', 'r')
>>> print(f.history)
Created for a test
>>> time = f.variables['time']
>>> print(time.units)
days since 2008-01-01
>>> print(time.shape)
(10,)
>>> print(time[-1])
9
NetCDF files, when opened read-only, return arrays that refer
directly to memory-mapped data on disk:
>>> data = time[:]
>>> data.base.base
<mmap.mmap object at 0x7fe753763180>
If the data is to be processed after the file is closed, it needs
to be copied to main memory:
>>> data = time[:].copy()
>>> f.close()
>>> data.mean()
4.5
A NetCDF file can also be used as context manager:
>>> from scipy.io import netcdf
>>> with netcdf.netcdf_file('simple.nc', 'r') as f:
... print(f.history)
Created for a test
"""
def __init__(self, filename, mode='r', mmap=None, version=1,
maskandscale=False):
"""Initialize netcdf_file from fileobj (str or file-like)."""
if mode not in 'rwa':
raise ValueError("Mode must be either 'r', 'w' or 'a'.")
if hasattr(filename, 'seek'): # file-like
self.fp = filename
self.filename = 'None'
if mmap is None:
mmap = False
elif mmap and not hasattr(filename, 'fileno'):
raise ValueError('Cannot use file object for mmap')
else: # maybe it's a string
self.filename = filename
omode = 'r+' if mode == 'a' else mode
self.fp = open(self.filename, '%sb' % omode)
if mmap is None:
mmap = True
if mode != 'r':
# Cannot read write-only files
mmap = False
self.use_mmap = mmap
self.mode = mode
self.version_byte = version
self.maskandscale = maskandscale
self.dimensions = {}
self.variables = {}
self._dims = []
self._recs = 0
self._recsize = 0
self._mm = None
self._mm_buf = None
if self.use_mmap:
self._mm = mm.mmap(self.fp.fileno(), 0, access=mm.ACCESS_READ)
self._mm_buf = np.frombuffer(self._mm, dtype=np.int8)
self._attributes = {}
if mode in 'ra':
self._read()
def __setattr__(self, attr, value):
# Store user defined attributes in a separate dict,
# so we can save them to file later.
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def close(self):
"""Closes the NetCDF file."""
if not self.fp.closed:
try:
self.flush()
finally:
self.variables = {}
if self._mm_buf is not None:
ref = weakref.ref(self._mm_buf)
self._mm_buf = None
if ref() is None:
# self._mm_buf is gc'd, and we can close the mmap
self._mm.close()
else:
# we cannot close self._mm, since self._mm_buf is
# alive and there may still be arrays referring to it
warnings.warn((
"Cannot close a netcdf_file opened with mmap=True, when "
"netcdf_variables or arrays referring to its data still exist. "
"All data arrays obtained from such files refer directly to "
"data on disk, and must be copied before the file can be cleanly "
"closed. (See netcdf_file docstring for more information on mmap.)"
), category=RuntimeWarning)
self._mm = None
self.fp.close()
__del__ = close
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def createDimension(self, name, length):
"""
Adds a dimension to the Dimension section of the NetCDF data structure.
Note that this function merely adds a new dimension that the variables can
reference. The values for the dimension, if desired, should be added as
a variable using `createVariable`, referring to this dimension.
Parameters
----------
name : str
Name of the dimension (Eg, 'lat' or 'time').
length : int
Length of the dimension.
See Also
--------
createVariable
"""
if length is None and self._dims:
raise ValueError("Only first dimension may be unlimited!")
self.dimensions[name] = length
self._dims.append(name)
def createVariable(self, name, type, dimensions):
"""
Create an empty variable for the `netcdf_file` object, specifying its data
type and the dimensions it uses.
Parameters
----------
name : str
Name of the new variable.
type : dtype or str
Data type of the variable.
dimensions : sequence of str
List of the dimension names used by the variable, in the desired order.
Returns
-------
variable : netcdf_variable
The newly created ``netcdf_variable`` object.
This object has also been added to the `netcdf_file` object as well.
See Also
--------
createDimension
Notes
-----
Any dimensions to be used by the variable should already exist in the
NetCDF data structure or should be created by `createDimension` prior to
creating the NetCDF variable.
"""
shape = tuple([self.dimensions[dim] for dim in dimensions])
shape_ = tuple([dim or 0 for dim in shape]) # replace None with 0 for numpy
type = dtype(type)
typecode, size = type.char, type.itemsize
if (typecode, size) not in REVERSE:
raise ValueError("NetCDF 3 does not support type %s" % type)
data = empty(shape_, dtype=type.newbyteorder("B")) # convert to big endian always for NetCDF 3
self.variables[name] = netcdf_variable(
data, typecode, size, shape, dimensions,
maskandscale=self.maskandscale)
return self.variables[name]
def flush(self):
"""
Perform a sync-to-disk flush if the `netcdf_file` object is in write mode.
See Also
--------
sync : Identical function
"""
if hasattr(self, 'mode') and self.mode in 'wa':
self._write()
sync = flush
def _write(self):
self.fp.seek(0)
self.fp.write(b'CDF')
self.fp.write(array(self.version_byte, '>b').tostring())
# Write headers and data.
self._write_numrecs()
self._write_dim_array()
self._write_gatt_array()
self._write_var_array()
def _write_numrecs(self):
# Get highest record count from all record variables.
for var in self.variables.values():
if var.isrec and len(var.data) > self._recs:
self.__dict__['_recs'] = len(var.data)
self._pack_int(self._recs)
def _write_dim_array(self):
if self.dimensions:
self.fp.write(NC_DIMENSION)
self._pack_int(len(self.dimensions))
for name in self._dims:
self._pack_string(name)
length = self.dimensions[name]
self._pack_int(length or 0) # replace None with 0 for record dimension
else:
self.fp.write(ABSENT)
def _write_gatt_array(self):
self._write_att_array(self._attributes)
def _write_att_array(self, attributes):
if attributes:
self.fp.write(NC_ATTRIBUTE)
self._pack_int(len(attributes))
for name, values in attributes.items():
self._pack_string(name)
self._write_values(values)
else:
self.fp.write(ABSENT)
def _write_var_array(self):
if self.variables:
self.fp.write(NC_VARIABLE)
self._pack_int(len(self.variables))
# Sort variable names non-recs first, then recs.
def sortkey(n):
v = self.variables[n]
if v.isrec:
return (-1,)
return v._shape
variables = sorted(self.variables, key=sortkey, reverse=True)
# Set the metadata for all variables.
for name in variables:
self._write_var_metadata(name)
# Now that we have the metadata, we know the vsize of
# each record variable, so we can calculate recsize.
self.__dict__['_recsize'] = sum([
var._vsize for var in self.variables.values()
if var.isrec])
# Set the data for all variables.
for name in variables:
self._write_var_data(name)
else:
self.fp.write(ABSENT)
def _write_var_metadata(self, name):
var = self.variables[name]
self._pack_string(name)
self._pack_int(len(var.dimensions))
for dimname in var.dimensions:
dimid = self._dims.index(dimname)
self._pack_int(dimid)
self._write_att_array(var._attributes)
nc_type = REVERSE[var.typecode(), var.itemsize()]
self.fp.write(asbytes(nc_type))
if not var.isrec:
vsize = var.data.size * var.data.itemsize
vsize += -vsize % 4
else: # record variable
try:
vsize = var.data[0].size * var.data.itemsize
except IndexError:
vsize = 0
rec_vars = len([v for v in self.variables.values()
if v.isrec])
if rec_vars > 1:
vsize += -vsize % 4
self.variables[name].__dict__['_vsize'] = vsize
self._pack_int(vsize)
# Pack a bogus begin, and set the real value later.
self.variables[name].__dict__['_begin'] = self.fp.tell()
self._pack_begin(0)
def _write_var_data(self, name):
var = self.variables[name]
# Set begin in file header.
the_beguine = self.fp.tell()
self.fp.seek(var._begin)
self._pack_begin(the_beguine)
self.fp.seek(the_beguine)
# Write data.
if not var.isrec:
self.fp.write(var.data.tostring())
count = var.data.size * var.data.itemsize
self.fp.write(b'0' * (var._vsize - count))
else: # record variable
# Handle rec vars with shape[0] < nrecs.
if self._recs > len(var.data):
shape = (self._recs,) + var.data.shape[1:]
var.data.resize(shape)
pos0 = pos = self.fp.tell()
for rec in var.data:
# Apparently scalars cannot be converted to big endian. If we
# try to convert a ``=i4`` scalar to, say, '>i4' the dtype
# will remain as ``=i4``.
if not rec.shape and (rec.dtype.byteorder == '<' or
(rec.dtype.byteorder == '=' and LITTLE_ENDIAN)):
rec = rec.byteswap()
self.fp.write(rec.tostring())
# Padding
count = rec.size * rec.itemsize
self.fp.write(b'0' * (var._vsize - count))
pos += self._recsize
self.fp.seek(pos)
self.fp.seek(pos0 + var._vsize)
def _write_values(self, values):
if hasattr(values, 'dtype'):
nc_type = REVERSE[values.dtype.char, values.dtype.itemsize]
else:
types = [(t, NC_INT) for t in integer_types]
types += [
(float, NC_FLOAT),
(str, NC_CHAR)
]
# bytes index into scalars in py3k. Check for "string" types
if isinstance(values, text_type) or isinstance(values, binary_type):
sample = values
else:
try:
sample = values[0] # subscriptable?
except TypeError:
sample = values # scalar
for class_, nc_type in types:
if isinstance(sample, class_):
break
typecode, size = TYPEMAP[nc_type]
dtype_ = '>%s' % typecode
# asarray() dies with bytes and '>c' in py3k. Change to 'S'
dtype_ = 'S' if dtype_ == '>c' else dtype_
values = asarray(values, dtype=dtype_)
self.fp.write(asbytes(nc_type))
if values.dtype.char == 'S':
nelems = values.itemsize
else:
nelems = values.size
self._pack_int(nelems)
if not values.shape and (values.dtype.byteorder == '<' or
(values.dtype.byteorder == '=' and LITTLE_ENDIAN)):
values = values.byteswap()
self.fp.write(values.tostring())
count = values.size * values.itemsize
self.fp.write(b'0' * (-count % 4)) # pad
def _read(self):
# Check magic bytes and version
magic = self.fp.read(3)
if not magic == b'CDF':
raise TypeError("Error: %s is not a valid NetCDF 3 file" %
self.filename)
self.__dict__['version_byte'] = fromstring(self.fp.read(1), '>b')[0]
# Read file headers and set data.
self._read_numrecs()
self._read_dim_array()
self._read_gatt_array()
self._read_var_array()
def _read_numrecs(self):
self.__dict__['_recs'] = self._unpack_int()
def _read_dim_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_DIMENSION]:
raise ValueError("Unexpected header.")
count = self._unpack_int()
for dim in range(count):
name = asstr(self._unpack_string())
length = self._unpack_int() or None # None for record dimension
self.dimensions[name] = length
self._dims.append(name) # preserve order
def _read_gatt_array(self):
for k, v in self._read_att_array().items():
self.__setattr__(k, v)
def _read_att_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_ATTRIBUTE]:
raise ValueError("Unexpected header.")
count = self._unpack_int()
attributes = {}
for attr in range(count):
name = asstr(self._unpack_string())
attributes[name] = self._read_values()
return attributes
def _read_var_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_VARIABLE]:
raise ValueError("Unexpected header.")
begin = 0
dtypes = {'names': [], 'formats': []}
rec_vars = []
count = self._unpack_int()
for var in range(count):
(name, dimensions, shape, attributes,
typecode, size, dtype_, begin_, vsize) = self._read_var()
# http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
# Note that vsize is the product of the dimension lengths
# (omitting the record dimension) and the number of bytes
# per value (determined from the type), increased to the
# next multiple of 4, for each variable. If a record
# variable, this is the amount of space per record. The
# netCDF "record size" is calculated as the sum of the
# vsize's of all the record variables.
#
# The vsize field is actually redundant, because its value
# may be computed from other information in the header. The
# 32-bit vsize field is not large enough to contain the size
# of variables that require more than 2^32 - 4 bytes, so
# 2^32 - 1 is used in the vsize field for such variables.
if shape and shape[0] is None: # record variable
rec_vars.append(name)
# The netCDF "record size" is calculated as the sum of
# the vsize's of all the record variables.
self.__dict__['_recsize'] += vsize
if begin == 0:
begin = begin_
dtypes['names'].append(name)
dtypes['formats'].append(str(shape[1:]) + dtype_)
# Handle padding with a virtual variable.
if typecode in 'bch':
actual_size = reduce(mul, (1,) + shape[1:]) * size
padding = -actual_size % 4
if padding:
dtypes['names'].append('_padding_%d' % var)
dtypes['formats'].append('(%d,)>b' % padding)
# Data will be set later.
data = None
else: # not a record variable
# Calculate size to avoid problems with vsize (above)
a_size = reduce(mul, shape, 1) * size
if self.use_mmap:
data = self._mm_buf[begin_:begin_+a_size].view(dtype=dtype_)
data.shape = shape
else:
pos = self.fp.tell()
self.fp.seek(begin_)
data = fromstring(self.fp.read(a_size), dtype=dtype_)
data.shape = shape
self.fp.seek(pos)
# Add variable.
self.variables[name] = netcdf_variable(
data, typecode, size, shape, dimensions, attributes,
maskandscale=self.maskandscale)
if rec_vars:
# Remove padding when only one record variable.
if len(rec_vars) == 1:
dtypes['names'] = dtypes['names'][:1]
dtypes['formats'] = dtypes['formats'][:1]
# Build rec array.
if self.use_mmap:
rec_array = self._mm_buf[begin:begin+self._recs*self._recsize].view(dtype=dtypes)
rec_array.shape = (self._recs,)
else:
pos = self.fp.tell()
self.fp.seek(begin)
rec_array = fromstring(self.fp.read(self._recs*self._recsize), dtype=dtypes)
rec_array.shape = (self._recs,)
self.fp.seek(pos)
for var in rec_vars:
self.variables[var].__dict__['data'] = rec_array[var]
def _read_var(self):
name = asstr(self._unpack_string())
dimensions = []
shape = []
dims = self._unpack_int()
for i in range(dims):
dimid = self._unpack_int()
dimname = self._dims[dimid]
dimensions.append(dimname)
dim = self.dimensions[dimname]
shape.append(dim)
dimensions = tuple(dimensions)
shape = tuple(shape)
attributes = self._read_att_array()
nc_type = self.fp.read(4)
vsize = self._unpack_int()
begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]()
typecode, size = TYPEMAP[nc_type]
dtype_ = '>%s' % typecode
return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize
def _read_values(self):
nc_type = self.fp.read(4)
n = self._unpack_int()
typecode, size = TYPEMAP[nc_type]
count = n*size
values = self.fp.read(int(count))
self.fp.read(-count % 4) # read padding
if typecode is not 'c':
values = fromstring(values, dtype='>%s' % typecode)
if values.shape == (1,):
values = values[0]
else:
values = values.rstrip(b'\x00')
return values
def _pack_begin(self, begin):
if self.version_byte == 1:
self._pack_int(begin)
elif self.version_byte == 2:
self._pack_int64(begin)
def _pack_int(self, value):
self.fp.write(array(value, '>i').tostring())
_pack_int32 = _pack_int
def _unpack_int(self):
return int(fromstring(self.fp.read(4), '>i')[0])
_unpack_int32 = _unpack_int
def _pack_int64(self, value):
self.fp.write(array(value, '>q').tostring())
def _unpack_int64(self):
return fromstring(self.fp.read(8), '>q')[0]
def _pack_string(self, s):
count = len(s)
self._pack_int(count)
self.fp.write(asbytes(s))
self.fp.write(b'0' * (-count % 4)) # pad
def _unpack_string(self):
count = self._unpack_int()
s = self.fp.read(count).rstrip(b'\x00')
self.fp.read(-count % 4) # read padding
return s
class netcdf_variable(object):
"""
A data object for the `netcdf` module.
`netcdf_variable` objects are constructed by calling the method
`netcdf_file.createVariable` on the `netcdf_file` object. `netcdf_variable`
objects behave much like array objects defined in numpy, except that their
data resides in a file. Data is read by indexing and written by assigning
to an indexed subset; the entire array can be accessed by the index ``[:]``
or (for scalars) by using the methods `getValue` and `assignValue`.
`netcdf_variable` objects also have attribute `shape` with the same meaning
as for arrays, but the shape cannot be modified. There is another read-only
attribute `dimensions`, whose value is the tuple of dimension names.
All other attributes correspond to variable attributes defined in
the NetCDF file. Variable attributes are created by assigning to an
attribute of the `netcdf_variable` object.
Parameters
----------
data : array_like
The data array that holds the values for the variable.
Typically, this is initialized as empty, but with the proper shape.
typecode : dtype character code
Desired data-type for the data array.
size : int
Desired element size for the data array.
shape : sequence of ints
The shape of the array. This should match the lengths of the
variable's dimensions.
dimensions : sequence of strings
The names of the dimensions used by the variable. Must be in the
same order of the dimension lengths given by `shape`.
attributes : dict, optional
Attribute values (any type) keyed by string names. These attributes
become attributes for the netcdf_variable object.
maskandscale : bool, optional
Whether to automatically scale and/or mask data based on attributes.
Default is False.
Attributes
----------
dimensions : list of str
List of names of dimensions used by the variable object.
isrec, shape
Properties
See also
--------
isrec, shape
"""
def __init__(self, data, typecode, size, shape, dimensions,
attributes=None,
maskandscale=False):
self.data = data
self._typecode = typecode
self._size = size
self._shape = shape
self.dimensions = dimensions
self.maskandscale = maskandscale
self._attributes = attributes or {}
for k, v in self._attributes.items():
self.__dict__[k] = v
def __setattr__(self, attr, value):
# Store user defined attributes in a separate dict,
# so we can save them to file later.
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def isrec(self):
"""Returns whether the variable has a record dimension or not.
A record dimension is a dimension along which additional data could be
easily appended in the netcdf data structure without much rewriting of
the data file. This attribute is a read-only property of the
`netcdf_variable`.
"""
return bool(self.data.shape) and not self._shape[0]
isrec = property(isrec)
def shape(self):
"""Returns the shape tuple of the data variable.
This is a read-only attribute and can not be modified in the
same manner of other numpy arrays.
"""
return self.data.shape
shape = property(shape)
def getValue(self):
"""
Retrieve a scalar value from a `netcdf_variable` of length one.
Raises
------
ValueError
If the netcdf variable is an array of length greater than one,
this exception will be raised.
"""
return self.data.item()
def assignValue(self, value):
"""
Assign a scalar value to a `netcdf_variable` of length one.
Parameters
----------
value : scalar
Scalar value (of compatible type) to assign to a length-one netcdf
variable. This value will be written to file.
Raises
------
ValueError
If the input is not a scalar, or if the destination is not a length-one
netcdf variable.
"""
if not self.data.flags.writeable:
# Work-around for a bug in NumPy. Calling itemset() on a read-only
# memory-mapped array causes a seg. fault.
# See NumPy ticket #1622, and SciPy ticket #1202.
# This check for `writeable` can be removed when the oldest version
# of numpy still supported by scipy contains the fix for #1622.
raise RuntimeError("variable is not writeable")
self.data.itemset(value)
def typecode(self):
"""
Return the typecode of the variable.
Returns
-------
typecode : char
The character typecode of the variable (eg, 'i' for int).
"""
return self._typecode
def itemsize(self):
"""
Return the itemsize of the variable.
Returns
-------
itemsize : int
The element size of the variable (eg, 8 for float64).
"""
return self._size
def __getitem__(self, index):
if not self.maskandscale:
return self.data[index]
data = self.data[index].copy()
missing_value = self._get_missing_value()
data = self._apply_missing_value(data, missing_value)
scale_factor = self._attributes.get('scale_factor')
add_offset = self._attributes.get('add_offset')
if add_offset is not None or scale_factor is not None:
data = data.astype(np.float64)
if scale_factor is not None:
data = data * scale_factor
if add_offset is not None:
data += add_offset
return data
def __setitem__(self, index, data):
if self.maskandscale:
missing_value = (
self._get_missing_value() or
getattr(data, 'fill_value', 999999))
self._attributes.setdefault('missing_value', missing_value)
self._attributes.setdefault('_FillValue', missing_value)
data = ((data - self._attributes.get('add_offset', 0.0)) /
self._attributes.get('scale_factor', 1.0))
data = np.ma.asarray(data).filled(missing_value)
if self._typecode not in 'fd' and data.dtype.kind == 'f':
data = np.round(data)
# Expand data for record vars?
if self.isrec:
if isinstance(index, tuple):
rec_index = index[0]
else:
rec_index = index
if isinstance(rec_index, slice):
recs = (rec_index.start or 0) + len(data)
else:
recs = rec_index + 1
if recs > len(self.data):
shape = (recs,) + self._shape[1:]
self.data.resize(shape)
self.data[index] = data
def _get_missing_value(self):
"""
Returns the value denoting "no data" for this variable.
If this variable does not have a missing/fill value, returns None.
If both _FillValue and missing_value are given, give precedence to
_FillValue. The netCDF standard gives special meaning to _FillValue;
missing_value is just used for compatibility with old datasets.
"""
if '_FillValue' in self._attributes:
missing_value = self._attributes['_FillValue']
elif 'missing_value' in self._attributes:
missing_value = self._attributes['missing_value']
else:
missing_value = None
return missing_value
@staticmethod
def _apply_missing_value(data, missing_value):
"""
Applies the given missing value to the data array.
Returns a numpy.ma array, with any value equal to missing_value masked
out (unless missing_value is None, in which case the original array is
returned).
"""
if missing_value is None:
newdata = data
else:
try:
missing_value_isnan = np.isnan(missing_value)
except (TypeError, NotImplementedError):
# some data types (e.g., characters) cannot be tested for NaN
missing_value_isnan = False
if missing_value_isnan:
mymask = np.isnan(data)
else:
mymask = (data == missing_value)
newdata = np.ma.masked_where(mymask, data)
return newdata
NetCDFFile = netcdf_file
NetCDFVariable = netcdf_variable
|
yuanagain/seniorthesis
|
venv/lib/python2.7/site-packages/scipy/io/netcdf.py
|
Python
|
mit
| 36,978
|
[
"NetCDF"
] |
9f83bdad9a508a5a73299074ee285bf1a290a52cc2c42f661b5dab96487b35a0
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# This program is published under a GPLv2 license
"""
Packet sending and receiving with libdnet and libpcap/WinPcap.
"""
import time
import struct
import sys
import platform
import socket
from scapy.data import *
from scapy.compat import *
from scapy.config import conf
from scapy.consts import WINDOWS
from scapy.utils import mac2str
from scapy.supersocket import SuperSocket
from scapy.error import Scapy_Exception, log_loading, warning
from scapy.pton_ntop import inet_ntop
from scapy.automaton import SelectableObject
import scapy.arch
import scapy.consts
if not scapy.consts.WINDOWS:
from fcntl import ioctl
############
# COMMON #
############
# From BSD net/bpf.h
# BIOCIMMEDIATE = 0x80044270
BIOCIMMEDIATE = -2147204496
class PcapTimeoutElapsed(Scapy_Exception):
pass
class _L2pcapdnetSocket(SuperSocket, SelectableObject):
def check_recv(self):
return True
def recv_raw(self, x=MTU):
"""Receives a packet, then returns a tuple containing (cls, pkt_data, time)""" # noqa: E501
ll = self.ins.datalink()
if ll in conf.l2types:
cls = conf.l2types[ll]
else:
cls = conf.default_l2
warning("Unable to guess datalink type (interface=%s linktype=%i). Using %s", # noqa: E501
self.iface, ll, cls.name)
pkt = None
while pkt is None:
pkt = self.ins.next()
if pkt is not None:
ts, pkt = pkt
if pkt is None and scapy.consts.WINDOWS:
raise PcapTimeoutElapsed # To understand this behavior, have a look at L2pcapListenSocket's note # noqa: E501
if pkt is None:
return None, None, None
return cls, pkt, ts
def nonblock_recv(self):
"""Receives and dissect a packet in non-blocking mode.
Note: on Windows, this won't do anything."""
self.ins.setnonblock(1)
p = self.recv(MTU)
self.ins.setnonblock(0)
return p
###################
# WINPCAP/NPCAP #
###################
if conf.use_winpcapy:
NPCAP_PATH = os.environ["WINDIR"] + "\\System32\\Npcap"
# Part of the code from https://github.com/phaethon/scapy translated to python2.X # noqa: E501
try:
from scapy.modules.winpcapy import *
def load_winpcapy():
err = create_string_buffer(PCAP_ERRBUF_SIZE)
devs = POINTER(pcap_if_t)()
if_list = []
ip_addresses = {}
ip6_addresses = []
if pcap_findalldevs(byref(devs), err) < 0:
return ret
try:
p = devs
# Iterate through the different interfaces
while p:
if_list.append(plain_str(p.contents.name))
a = p.contents.addresses
while a:
# IPv4 address
if a.contents.addr.contents.sa_family == socket.AF_INET: # noqa: E501
ap = a.contents.addr
val = cast(ap, POINTER(sockaddr_in))
if_raw_addr = b"".join(chb(x) for x in val.contents.sin_addr[:4]) # noqa: E501
if if_raw_addr != b'\x00\x00\x00\x00':
ip_addresses[plain_str(p.contents.name)] = if_raw_addr # noqa: E501
# IPv6 address
if a.contents.addr.contents.sa_family == socket.AF_INET6: # noqa: E501
ap = a.contents.addr
val = cast(ap, POINTER(sockaddr_in6))
addr = inet_ntop(socket.AF_INET6, b"".join(chb(x) for x in val.contents.sin6_addr[:])) # noqa: E501
scope = scapy.utils6.in6_getscope(addr)
ip6_addresses.append((addr, scope, plain_str(p.contents.name))) # noqa: E501
a = a.contents.next
p = p.contents.next
conf.cache_iflist = if_list
conf.cache_ipaddrs = ip_addresses
conf.cache_in6_getifaddr = ip6_addresses
except:
raise
finally:
pcap_freealldevs(devs)
# Detect Pcap version
version = pcap_lib_version()
if b"winpcap" in version.lower():
if os.path.exists(NPCAP_PATH + "\\wpcap.dll"):
warning("Winpcap is installed over Npcap. Will use Winpcap (see 'Winpcap/Npcap conflicts' in scapy's docs)") # noqa: E501
elif platform.release() != "XP":
warning("WinPcap is now deprecated (not maintened). Please use Npcap instead") # noqa: E501
elif b"npcap" in version.lower():
conf.use_npcap = True
LOOPBACK_NAME = scapy.consts.LOOPBACK_NAME = "Npcap Loopback Adapter" # noqa: E501
except OSError as e:
conf.use_winpcapy = False
if conf.interactive:
log_loading.warning("wpcap.dll is not installed. You won't be able to send/receive packets. Visit the scapy's doc to install it") # noqa: E501
if conf.use_winpcapy:
def get_if_raw_addr(iff): # noqa: F811
"""Returns the raw ip address corresponding to the NetworkInterface.""" # noqa: E501
if not conf.cache_ipaddrs:
load_winpcapy()
return conf.cache_ipaddrs.get(iff.pcap_name, None)
def get_if_list():
"""Returns all pcap names"""
if not conf.cache_iflist:
load_winpcapy()
return conf.cache_iflist
def in6_getifaddr_raw():
"""Returns all available IPv6 on the computer, read from winpcap.""" # noqa: E501
if not conf.cache_in6_getifaddr:
load_winpcapy()
return conf.cache_in6_getifaddr
else:
get_if_raw_addr = lambda x: None
get_if_list = lambda: []
in6_getifaddr_raw = lambda: []
from ctypes import POINTER, byref, create_string_buffer
class _PcapWrapper_pypcap: # noqa: F811
"""Wrapper for the WinPcap calls"""
def __init__(self, device, snaplen, promisc, to_ms, monitor=None):
self.errbuf = create_string_buffer(PCAP_ERRBUF_SIZE)
self.iface = create_string_buffer(device.encode("utf8"))
if monitor:
self.pcap = pcap_create(self.iface, self.errbuf)
pcap_set_snaplen(self.pcap, snaplen)
pcap_set_promisc(self.pcap, promisc)
pcap_set_timeout(self.pcap, to_ms)
if pcap_set_rfmon(self.pcap, 1) != 0:
warning("Could not set monitor mode")
if pcap_activate(self.pcap) != 0:
raise OSError("Could not activate the pcap handler")
else:
self.pcap = pcap_open_live(self.iface, snaplen, promisc, to_ms, self.errbuf) # noqa: E501
# Winpcap exclusive: make every packet to be instantly
# returned, and not buffered within Winpcap
pcap_setmintocopy(self.pcap, 0)
self.header = POINTER(pcap_pkthdr)()
self.pkt_data = POINTER(c_ubyte)()
self.bpf_program = bpf_program()
def next(self):
c = pcap_next_ex(self.pcap, byref(self.header), byref(self.pkt_data)) # noqa: E501
if not c > 0:
return
ts = self.header.contents.ts.tv_sec + float(self.header.contents.ts.tv_usec) / 1000000 # noqa: E501
pkt = b"".join(chb(i) for i in self.pkt_data[:self.header.contents.len]) # noqa: E501
return ts, pkt
__next__ = next
def datalink(self):
return pcap_datalink(self.pcap)
def fileno(self):
if WINDOWS:
log_loading.error("Cannot get selectable PCAP fd on Windows")
return 0
return pcap_get_selectable_fd(self.pcap)
def setfilter(self, f):
filter_exp = create_string_buffer(f.encode("utf8"))
if pcap_compile(self.pcap, byref(self.bpf_program), filter_exp, 0, -1) == -1: # noqa: E501
log_loading.error("Could not compile filter expression %s", f)
return False
else:
if pcap_setfilter(self.pcap, byref(self.bpf_program)) == -1:
log_loading.error("Could not install filter %s", f)
return False
return True
def setnonblock(self, i):
pcap_setnonblock(self.pcap, i, self.errbuf)
def send(self, x):
pcap_sendpacket(self.pcap, x, len(x))
def close(self):
pcap_close(self.pcap)
open_pcap = lambda *args, **kargs: _PcapWrapper_pypcap(*args, **kargs)
################
# PCAP/PCAPY #
################
if conf.use_pcap:
try:
import pcap # python-pypcap
_PCAP_MODE = "pypcap"
except ImportError as e:
try:
import libpcap as pcap # python-libpcap
_PCAP_MODE = "libpcap"
except ImportError as e2:
try:
import pcapy as pcap # python-pcapy
_PCAP_MODE = "pcapy"
except ImportError as e3:
if conf.interactive:
log_loading.error("Unable to import pcap module: %s/%s", e, e2) # noqa: E501
conf.use_pcap = False
else:
raise
if conf.use_pcap:
if _PCAP_MODE == "pypcap": # python-pypcap
class _PcapWrapper_pypcap: # noqa: F811
def __init__(self, device, snaplen, promisc, to_ms, monitor=False): # noqa: E501
try:
self.pcap = pcap.pcap(device, snaplen, promisc, immediate=1, timeout_ms=to_ms, rfmon=monitor) # noqa: E501
except TypeError:
try:
if monitor:
warning("Your pypcap version is too old to support monitor mode, Please use pypcap 1.2.1+ !") # noqa: E501
self.pcap = pcap.pcap(device, snaplen, promisc, immediate=1, timeout_ms=to_ms) # noqa: E501
except TypeError:
# Even older pypcap versions do not support the timeout_ms argument # noqa: E501
self.pcap = pcap.pcap(device, snaplen, promisc, immediate=1) # noqa: E501
def __getattr__(self, attr):
return getattr(self.pcap, attr)
def setnonblock(self, i):
self.pcap.setnonblock(i)
def __del__(self):
try:
self.pcap.close()
except AttributeError:
warning("__del__: don't know how to close the file "
"descriptor. Bugs ahead! Please use python-pypcap 1.2.1+") # noqa: E501
def send(self, x):
self.pcap.sendpacket(x)
def next(self):
c = self.pcap.next()
if c is None:
return
ts, pkt = c
return ts, raw(pkt)
__next__ = next
open_pcap = lambda *args, **kargs: _PcapWrapper_pypcap(*args, **kargs) # noqa: E501
elif _PCAP_MODE == "libpcap": # python-libpcap
class _PcapWrapper_libpcap:
def __init__(self, device, snaplen, promisc, to_ms, monitor=False): # noqa: E501
self.errbuf = create_string_buffer(PCAP_ERRBUF_SIZE)
if monitor:
self.pcap = pcap.pcap_create(device, self.errbuf)
pcap.pcap_set_snaplen(self.pcap, snaplen)
pcap.pcap_set_promisc(self.pcap, promisc)
pcap.pcap_set_timeout(self.pcap, to_ms)
if pcap.pcap_set_rfmon(self.pcap, 1) != 0:
warning("Could not set monitor mode")
if pcap.pcap_activate(self.pcap) != 0:
raise OSError("Could not activate the pcap handler") # noqa: E501
else:
self.pcap = pcap.open_live(device, snaplen, promisc, to_ms) # noqa: E501
def setfilter(self, filter):
self.pcap.setfilter(filter, 0, 0)
def next(self):
c = self.pcap.next()
if c is None:
return
l, pkt, ts = c
return ts, pkt
__next__ = next
def setnonblock(self, i):
pcap.pcap_setnonblock(self.pcap, i, self.errbuf)
def __getattr__(self, attr):
return getattr(self.pcap, attr)
def send(self, x):
pcap.pcap_sendpacket(self.pcap, x, len(x))
def __del__(self):
pcap.close(self.pcap)
open_pcap = lambda *args, **kargs: _PcapWrapper_libpcap(*args, **kargs) # noqa: E501
elif _PCAP_MODE == "pcapy": # python-pcapy
class _PcapWrapper_pcapy:
def __init__(self, device, snaplen, promisc, to_ms, monitor=False): # noqa: E501
if monitor:
try:
self.pcap = pcap.create(device)
self.pcap.set_snaplen(snaplen)
self.pcap.set_promisc(promisc)
self.pcap.set_timeout(to_ms)
if self.pcap.set_rfmon(1) != 0:
warning("Could not set monitor mode")
if self.pcap.activate() != 0:
raise OSError("Could not activate the pcap handler") # noqa: E501
except AttributeError:
raise OSError("Your pcapy version does not support"
"monitor mode ! Use pcapy 0.11.4+")
else:
self.pcap = pcap.open_live(device, snaplen, promisc, to_ms) # noqa: E501
def next(self):
try:
c = self.pcap.next()
except pcap.PcapError:
return None
else:
h, p = c
if h is None:
return
s, us = h.getts()
return (s + 0.000001 * us), p
__next__ = next
def fileno(self):
try:
return self.pcap.getfd()
except AttributeError:
warning("fileno: getfd() does not exist. Please use "
"pcapy 0.11.3+ !")
def setnonblock(self, i):
self.pcap.setnonblock(i)
def __getattr__(self, attr):
return getattr(self.pcap, attr)
def send(self, x):
self.pcap.sendpacket(x)
def __del__(self):
try:
self.pcap.close()
except AttributeError:
warning("__del__: don't know how to close the file "
"descriptor. Bugs ahead! Please update pcapy!")
open_pcap = lambda *args, **kargs: _PcapWrapper_pcapy(*args, **kargs) # noqa: E501
#################
# PCAP/WINPCAPY #
#################
if conf.use_pcap or conf.use_winpcapy:
class L2pcapListenSocket(_L2pcapdnetSocket):
desc = "read packets at layer 2 using libpcap"
def __init__(self, iface=None, type=ETH_P_ALL, promisc=None, filter=None, monitor=None): # noqa: E501
self.type = type
self.outs = None
self.iface = iface
if iface is None:
iface = conf.iface
if promisc is None:
promisc = conf.sniff_promisc
self.promisc = promisc
# Note: Timeout with Winpcap/Npcap
# The 4th argument of open_pcap corresponds to timeout. In an ideal world, we would # noqa: E501
# set it to 0 ==> blocking pcap_next_ex.
# However, the way it is handled is very poor, and result in a jerky packet stream. # noqa: E501
# To fix this, we set 100 and the implementation under windows is slightly different, as # noqa: E501
# everything is always received as non-blocking
self.ins = open_pcap(iface, MTU, self.promisc, 100, monitor=monitor) # noqa: E501
try:
ioctl(self.ins.fileno(), BIOCIMMEDIATE, struct.pack("I", 1))
except:
pass
if type == ETH_P_ALL: # Do not apply any filter if Ethernet type is given # noqa: E501
if conf.except_filter:
if filter:
filter = "(%s) and not (%s)" % (filter, conf.except_filter) # noqa: E501
else:
filter = "not (%s)" % conf.except_filter
if filter:
self.ins.setfilter(filter)
def close(self):
self.ins.close()
def send(self, x):
raise Scapy_Exception("Can't send anything with L2pcapListenSocket") # noqa: E501
conf.L2listen = L2pcapListenSocket
class L2pcapSocket(_L2pcapdnetSocket):
desc = "read/write packets at layer 2 using only libpcap"
def __init__(self, iface=None, type=ETH_P_ALL, promisc=None, filter=None, nofilter=0, # noqa: E501
monitor=None):
if iface is None:
iface = conf.iface
self.iface = iface
if promisc is None:
promisc = 0
self.promisc = promisc
# See L2pcapListenSocket for infos about this line
self.ins = open_pcap(iface, MTU, self.promisc, 100, monitor=monitor) # noqa: E501
# We need to have a different interface open because of an
# access violation in Npcap that occurs in multi-threading
# (see https://github.com/nmap/nmap/issues/982)
self.outs = open_pcap(iface, MTU, self.promisc, 100)
try:
ioctl(self.ins.fileno(), BIOCIMMEDIATE, struct.pack("I", 1))
except:
pass
if nofilter:
if type != ETH_P_ALL: # PF_PACKET stuff. Need to emulate this for pcap # noqa: E501
filter = "ether proto %i" % type
else:
filter = None
else:
if conf.except_filter:
if filter:
filter = "(%s) and not (%s)" % (filter, conf.except_filter) # noqa: E501
else:
filter = "not (%s)" % conf.except_filter
if type != ETH_P_ALL: # PF_PACKET stuff. Need to emulate this for pcap # noqa: E501
if filter:
filter = "(ether proto %i) and (%s)" % (type, filter)
else:
filter = "ether proto %i" % type
if filter:
self.ins.setfilter(filter)
def send(self, x):
sx = raw(x)
if hasattr(x, "sent_time"):
x.sent_time = time.time()
return self.outs.send(sx)
def close(self):
if not self.closed:
if hasattr(self, "ins"):
self.ins.close()
if hasattr(self, "outs"):
self.outs.close()
self.closed = True
class L3pcapSocket(L2pcapSocket):
desc = "read/write packets at layer 3 using only libpcap"
# def __init__(self, iface = None, type = ETH_P_ALL, filter=None, nofilter=0): # noqa: E501
# L2pcapSocket.__init__(self, iface, type, filter, nofilter)
def recv(self, x=MTU):
r = L2pcapSocket.recv(self, x)
if r:
return r.payload
else:
return
def send(self, x):
# Makes send detects when it should add Loopback(), Dot11... instead of Ether() # noqa: E501
ll = self.ins.datalink()
if ll in conf.l2types:
cls = conf.l2types[ll]
else:
cls = conf.default_l2
warning("Unable to guess datalink type (interface=%s linktype=%i). Using %s", self.iface, ll, cls.name) # noqa: E501
sx = raw(cls() / x)
if hasattr(x, "sent_time"):
x.sent_time = time.time()
return self.ins.send(sx)
conf.L2socket = L2pcapSocket
conf.L3socket = L3pcapSocket
##########
# DNET #
##########
# DEPRECATED
if conf.use_dnet:
warning("dnet usage with scapy is deprecated, and will be removed in a future version.") # noqa: E501
try:
try:
# First try to import dnet
import dnet
except ImportError:
# Then, try to import dumbnet as dnet
import dumbnet as dnet
except ImportError as e:
if conf.interactive:
log_loading.error("Unable to import dnet module: %s", e)
conf.use_dnet = False
def get_if_raw_hwaddr(iff):
"dummy"
return (0, b"\0\0\0\0\0\0")
def get_if_raw_addr(iff): # noqa: F811
"dummy"
return b"\0\0\0\0"
def get_if_list():
"dummy"
return []
else:
raise
else:
def get_if_raw_hwaddr(iff):
"""Return a tuple containing the link type and the raw hardware
address corresponding to the interface 'iff'"""
if iff == scapy.arch.LOOPBACK_NAME:
return (ARPHDR_LOOPBACK, b'\x00' * 6)
# Retrieve interface information
try:
l = dnet.intf().get(iff)
link_addr = l["link_addr"]
except:
raise Scapy_Exception("Error in attempting to get hw address"
" for interface [%s]" % iff)
if hasattr(link_addr, "type"):
# Legacy dnet module
return link_addr.type, link_addr.data
else:
# dumbnet module
mac = mac2str(str(link_addr))
# Adjust the link type
if l["type"] == 6: # INTF_TYPE_ETH from dnet
return (ARPHDR_ETHER, mac)
return (l["type"], mac)
def get_if_raw_addr(ifname): # noqa: F811
i = dnet.intf()
try:
return i.get(ifname)["addr"].data
except (OSError, KeyError):
warning("No MAC address found on %s !" % ifname)
return b"\0\0\0\0"
def get_if_list():
return [i.get("name", None) for i in dnet.intf()]
def get_working_if():
"""Returns the first interface than can be used with dnet"""
if_iter = iter(dnet.intf())
try:
intf = next(if_iter)
except StopIteration:
return scapy.consts.LOOPBACK_NAME
return intf.get("name", scapy.consts.LOOPBACK_NAME)
|
smainand/scapy
|
scapy/arch/pcapdnet.py
|
Python
|
gpl-2.0
| 23,872
|
[
"VisIt"
] |
3a5d343c78157de947f1ddaac3cb7f8673f87e65a02719fd04984b4d961715d1
|
# -*- coding: utf-8 -*-
import datetime
import json
import os
import shutil
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.urlresolvers import reverse
import mock
from nose.tools import eq_, ok_
from pyquery import PyQuery as pq
import amo
import amo.tests
import mkt
from addons.models import (Addon, AddonCategory, AddonDeviceType, AddonUser,
Category)
from amo.tests import formset, initial
from amo.tests.test_helpers import get_image_path
from apps.users.models import UserNotification
from apps.users.notifications import app_surveys
from constants.applications import DEVICE_TYPES
from mkt.files.tests.test_models import UploadTest as BaseUploadTest
from mkt.site.fixtures import fixture
from mkt.submit.decorators import read_dev_agreement_required
from mkt.submit.forms import AppFeaturesForm, NewWebappVersionForm
from mkt.submit.models import AppSubmissionChecklist
from mkt.webapps.models import AppFeatures, Webapp
from translations.models import Translation
from users.models import UserProfile
class TestSubmit(amo.tests.TestCase):
fixtures = fixture('user_999')
def setUp(self):
self.fi_mock = mock.patch(
'mkt.developers.tasks.fetch_icon').__enter__()
self.user = self.get_user()
assert self.client.login(username=self.user.email, password='password')
def tearDown(self):
self.fi_mock.__exit__()
def get_user(self):
return UserProfile.objects.get(username='regularuser')
def get_url(self, url):
return reverse('submit.app.%s' % url, args=[self.webapp.app_slug])
def _test_anonymous(self):
self.client.logout()
r = self.client.get(self.url, follow=True)
self.assertLoginRedirects(r, self.url)
def _test_progress_display(self, completed, current):
"""Test that the correct steps are highlighted."""
r = self.client.get(self.url)
progress = pq(r.content)('#submission-progress')
# Check the completed steps.
completed_found = progress.find('.completed')
for idx, step in enumerate(completed):
li = completed_found.eq(idx)
eq_(li.text(), unicode(mkt.APP_STEPS_TITLE[step]))
# Check that we link back to the Developer Agreement.
terms_link = progress.find('.terms a')
if 'terms' in completed:
eq_(terms_link.attr('href'),
reverse('mkt.developers.docs', args=['policies', 'agreement']))
else:
eq_(terms_link.length, 0)
# Check the current step.
eq_(progress.find('.current').text(),
unicode(mkt.APP_STEPS_TITLE[current]))
class TestProceed(TestSubmit):
def setUp(self):
super(TestProceed, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app')
def test_is_authenticated(self):
# Redirect user to Terms.
r = self.client.get(self.url)
self.assert3xx(r, reverse('submit.app.terms'))
def test_is_anonymous(self):
# Show user to Terms page but with the login prompt.
self.client.logout()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(r.context['proceed'], True)
class TestTerms(TestSubmit):
def setUp(self):
super(TestTerms, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app.terms')
def test_anonymous(self):
self.client.logout()
r = self.client.get(self.url, follow=True)
self.assertLoginRedirects(r, self.url)
def test_jump_to_step(self):
r = self.client.get(reverse('submit.app'), follow=True)
self.assert3xx(r, self.url)
def test_page(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)('#submit-terms')
eq_(doc.length, 1)
eq_(doc.find('input[name=newsletter]').siblings('label').length, 1,
'Missing its <label>!')
def test_progress_display(self):
self._test_progress_display([], 'terms')
@mock.patch('basket.subscribe')
def test_agree(self, subscribe_mock):
self.client.post(self.url, {'read_dev_agreement': True})
dt = self.get_user().read_dev_agreement
self.assertCloseToNow(dt)
eq_(UserNotification.objects.count(), 0)
assert not subscribe_mock.called
@mock.patch('basket.subscribe')
def test_agree_and_sign_me_up(self, subscribe_mock):
self.client.post(self.url, {'read_dev_agreement':
datetime.datetime.now(),
'newsletter': True})
dt = self.get_user().read_dev_agreement
self.assertCloseToNow(dt)
eq_(UserNotification.objects.count(), 1)
notes = UserNotification.objects.filter(user=self.user, enabled=True,
notification_id=app_surveys.id)
eq_(notes.count(), 1, 'Expected to not be subscribed to newsletter')
subscribe_mock.assert_called_with(
self.user.email, 'app-dev', lang='en-US',
country='restofworld', format='H',
source_url='http://testserver/developers/submit')
def test_disagree(self):
r = self.client.post(self.url)
eq_(r.status_code, 200)
eq_(self.user.read_dev_agreement, None)
eq_(UserNotification.objects.count(), 0)
def test_read_dev_agreement_required(self):
f = mock.Mock()
f.__name__ = 'function'
request = mock.Mock()
request.amo_user.read_dev_agreement = None
request.get_full_path.return_value = self.url
func = read_dev_agreement_required(f)
res = func(request)
assert not f.called
eq_(res.status_code, 302)
eq_(res['Location'], reverse('submit.app'))
class TestManifest(TestSubmit):
def setUp(self):
super(TestManifest, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app')
def _step(self):
self.user.update(read_dev_agreement=datetime.datetime.now())
def test_anonymous(self):
r = self.client.get(self.url, follow=True)
eq_(r.context['step'], 'terms')
def test_cannot_skip_prior_step(self):
r = self.client.get(self.url, follow=True)
# And we start back at one...
self.assert3xx(r, reverse('submit.app.terms'))
def test_jump_to_step(self):
# I already read the Terms.
self._step()
# So jump me to the Manifest step.
r = self.client.get(reverse('submit.app'), follow=True)
eq_(r.context['step'], 'manifest')
def test_legacy_redirects(self):
def check():
for before, status in redirects:
r = self.client.get(before, follow=True)
self.assert3xx(r, dest, status)
# I haven't read the dev agreement.
redirects = (
('/developers/submit/', 302),
('/developers/submit/app', 302),
('/developers/submit/app/terms', 302),
('/developers/submit/app/manifest', 302),
)
dest = '/developers/submit/terms'
check()
# I have read the dev agreement.
self._step()
redirects = (
('/developers/submit/app', 302),
('/developers/submit/app/terms', 302),
('/developers/submit/app/manifest', 302),
('/developers/submit/manifest', 301),
)
dest = '/developers/submit/'
check()
def test_page(self):
self._step()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(pq(r.content)('#upload-file').length, 1)
def test_progress_display(self):
self._step()
self._test_progress_display(['terms'], 'manifest')
class UploadAddon(object):
def post(self, expect_errors=False, data=None):
if data is None:
data = {'free_platforms': ['free-desktop']}
data.update(upload=self.upload.pk)
r = self.client.post(self.url, data, follow=True)
eq_(r.status_code, 200)
if not expect_errors:
# Show any unexpected form errors.
if r.context and 'form' in r.context:
eq_(r.context['form'].errors, {})
return r
class BaseWebAppTest(BaseUploadTest, UploadAddon, amo.tests.TestCase):
fixtures = fixture('app_firefox', 'platform_all', 'user_999', 'user_10482')
def setUp(self):
super(BaseWebAppTest, self).setUp()
self.manifest = self.manifest_path('mozball.webapp')
self.manifest_url = 'http://allizom.org/mozball.webapp'
self.upload = self.get_upload(abspath=self.manifest)
self.upload.update(name=self.manifest_url, is_webapp=True)
self.url = reverse('submit.app')
assert self.client.login(username='regular@mozilla.com',
password='password')
def post_addon(self, data=None):
eq_(Addon.objects.count(), 0)
self.post(data=data)
return Addon.objects.get()
class TestCreateWebApp(BaseWebAppTest):
@mock.patch('mkt.developers.tasks.fetch_icon')
def test_post_app_redirect(self, fi_mock):
r = self.post()
webapp = Webapp.objects.get()
self.assert3xx(r,
reverse('submit.app.details', args=[webapp.app_slug]))
assert fi_mock.delay.called, (
'The fetch_icon task was expected to be called')
def test_no_hint(self):
self.post_addon()
self.upload = self.get_upload(abspath=self.manifest)
r = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=self.manifest_url), follow=True)
eq_(r.status_code, 200)
assert 'already submitted' not in r.content, (
'Unexpected helpful error (trap_duplicate)')
assert 'already exists' not in r.content, (
'Unexpected validation error (verify_app_domain)')
def test_no_upload(self):
data = {'free_platforms': ['free-desktop']}
res = self.client.post(self.url, data, follow=True)
eq_(res.context['form'].errors,
{'upload': NewWebappVersionForm.upload_error})
@mock.patch('mkt.developers.tasks.fetch_icon')
def test_bad_upload(self, fi_mock):
data = {'free_platforms': ['free-desktop'], 'upload': 'foo'}
res = self.client.post(self.url, data, follow=True)
eq_(res.context['form'].errors,
{'upload': NewWebappVersionForm.upload_error})
assert not fi_mock.delay.called, (
'The fetch_icon task was not expected to be called')
def test_hint_for_same_manifest(self):
self.create_switch(name='webapps-unique-by-domain')
self.post_addon()
self.upload = self.get_upload(abspath=self.manifest)
r = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=self.manifest_url))
data = json.loads(r.content)
assert 'Oops' in data['validation']['messages'][0]['message'], (
'Expected oops')
def test_no_hint_for_same_manifest_different_author(self):
self.create_switch(name='webapps-unique-by-domain')
self.post_addon()
# Submit same manifest as different user.
assert self.client.login(username='clouserw@gmail.com',
password='password')
self.upload = self.get_upload(abspath=self.manifest)
r = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=self.manifest_url))
data = json.loads(r.content)
eq_(data['validation']['messages'][0]['message'],
'An app already exists on this domain; only one app per domain is '
'allowed.')
def test_app_from_uploaded_manifest(self):
addon = self.post_addon()
eq_(addon.type, amo.ADDON_WEBAPP)
eq_(addon.is_packaged, False)
assert addon.guid is not None, (
'Expected app to have a UUID assigned to guid')
eq_(unicode(addon.name), u'MozillaBall ょ')
eq_(addon.slug, 'app-%s' % addon.id)
eq_(addon.app_slug, u'mozillaball-ょ')
eq_(addon.description, u'Exciting Open Web development action!')
eq_(addon.manifest_url, u'http://allizom.org/mozball.webapp')
eq_(addon.app_domain, u'http://allizom.org')
eq_(Translation.objects.get(id=addon.description.id, locale='it'),
u'Azione aperta emozionante di sviluppo di fotoricettore!')
eq_(addon.current_version.developer_name, 'Mozilla Labs')
eq_(addon.current_version.manifest,
json.loads(open(self.manifest).read()))
def test_manifest_with_any_extension(self):
self.manifest = os.path.join(settings.ROOT, 'mkt', 'developers',
'tests', 'addons', 'mozball.owa')
self.upload = self.get_upload(abspath=self.manifest, is_webapp=True)
addon = self.post_addon()
eq_(addon.type, amo.ADDON_WEBAPP)
def test_version_from_uploaded_manifest(self):
addon = self.post_addon()
eq_(addon.current_version.version, '1.0')
def test_file_from_uploaded_manifest(self):
addon = self.post_addon()
files = addon.current_version.files.all()
eq_(len(files), 1)
eq_(files[0].status, amo.STATUS_PENDING)
def test_set_platform(self):
app = self.post_addon(
{'free_platforms': ['free-android-tablet', 'free-desktop']})
self.assertSetEqual(app.device_types,
[amo.DEVICE_TABLET, amo.DEVICE_DESKTOP])
def test_free(self):
app = self.post_addon({'free_platforms': ['free-firefoxos']})
self.assertSetEqual(app.device_types, [amo.DEVICE_GAIA])
eq_(app.premium_type, amo.ADDON_FREE)
def test_premium(self):
self.create_flag('allow-b2g-paid-submission')
app = self.post_addon({'paid_platforms': ['paid-firefoxos']})
self.assertSetEqual(app.device_types, [amo.DEVICE_GAIA])
eq_(app.premium_type, amo.ADDON_PREMIUM)
def test_supported_locales(self):
addon = self.post_addon()
eq_(addon.default_locale, 'en-US')
eq_(addon.versions.latest().supported_locales, 'es,it')
def test_short_locale(self):
# This manifest has a locale code of "pt" which is in the
# SHORTER_LANGUAGES setting and should get converted to "pt-PT".
self.manifest = self.manifest_path('short-locale.webapp')
self.upload = self.get_upload(abspath=self.manifest)
addon = self.post_addon()
eq_(addon.default_locale, 'pt-PT')
eq_(addon.versions.latest().supported_locales, 'es')
def test_unsupported_detail_locale(self):
# This manifest has a locale code of "en-GB" which is unsupported, so
# we default to "en-US".
self.manifest = self.manifest_path('unsupported-default-locale.webapp')
self.upload = self.get_upload(abspath=self.manifest)
addon = self.post_addon()
eq_(addon.default_locale, 'en-US')
eq_(addon.versions.latest().supported_locales, 'es,it')
def test_appfeatures_creation(self):
addon = self.post_addon(data={
'free_platforms': ['free-desktop'],
'has_contacts': 'on'
})
features = addon.current_version.features
ok_(isinstance(features, AppFeatures))
field_names = [f.name for f in AppFeaturesForm().all_fields()]
for field in field_names:
expected = field == 'has_contacts'
eq_(getattr(features, field), expected)
class TestCreateWebAppFromManifest(BaseWebAppTest):
def setUp(self):
super(TestCreateWebAppFromManifest, self).setUp()
Webapp.objects.create(app_slug='xxx',
app_domain='http://existing-app.com')
def upload_webapp(self, manifest_url, **post_kw):
self.upload.update(name=manifest_url) # Simulate JS upload.
return self.post(**post_kw)
def post_manifest(self, manifest_url):
rs = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=manifest_url))
if 'json' in rs['content-type']:
rs = json.loads(rs.content)
return rs
def test_duplicate_domain(self):
self.create_switch(name='webapps-unique-by-domain')
rs = self.upload_webapp('http://existing-app.com/my.webapp',
expect_errors=True)
eq_(rs.context['form'].errors,
{'upload':
['An app already exists on this domain; only one '
'app per domain is allowed.']})
def test_allow_duplicate_domains(self):
self.upload_webapp('http://existing-app.com/my.webapp') # No errors.
def test_duplicate_domain_from_js(self):
self.create_switch(name='webapps-unique-by-domain')
data = self.post_manifest('http://existing-app.com/my.webapp')
eq_(data['validation']['errors'], 1)
eq_(data['validation']['messages'][0]['message'],
'An app already exists on this domain; '
'only one app per domain is allowed.')
def test_allow_duplicate_domains_from_js(self):
rs = self.post_manifest('http://existing-app.com/my.webapp')
eq_(rs.status_code, 302)
class BasePackagedAppTest(BaseUploadTest, UploadAddon, amo.tests.TestCase):
fixtures = fixture('webapp_337141', 'user_999')
def setUp(self):
super(BasePackagedAppTest, self).setUp()
self.app = Webapp.objects.get(pk=337141)
self.app.update(is_packaged=True)
self.version = self.app.current_version
self.file = self.version.all_files[0]
self.file.update(filename='mozball.zip')
self.package = self.packaged_app_path('mozball.zip')
self.upload = self.get_upload(abspath=self.package)
self.upload.update(name='mozball.zip', is_webapp=True)
self.url = reverse('submit.app')
assert self.client.login(username='regular@mozilla.com',
password='password')
def post_addon(self, data=None):
eq_(Addon.objects.count(), 1)
self.post(data=data)
return Addon.objects.order_by('-id')[0]
def setup_files(self, filename='mozball.zip'):
# Make sure the source file is there.
# Original packaged file.
if not storage.exists(self.file.file_path):
try:
# We don't care if these dirs exist.
os.makedirs(os.path.dirname(self.file.file_path))
except OSError:
pass
shutil.copyfile(self.packaged_app_path(filename),
self.file.file_path)
# Signed packaged file.
if not storage.exists(self.file.signed_file_path):
try:
# We don't care if these dirs exist.
os.makedirs(os.path.dirname(self.file.signed_file_path))
except OSError:
pass
shutil.copyfile(self.packaged_app_path(filename),
self.file.signed_file_path)
class TestCreatePackagedApp(BasePackagedAppTest):
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest')
def test_post_app_redirect(self, _mock):
res = self.post()
webapp = Webapp.objects.order_by('-created')[0]
self.assert3xx(res,
reverse('submit.app.details', args=[webapp.app_slug]))
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest')
@mock.patch('mkt.submit.forms.verify_app_domain')
def test_app_from_uploaded_package(self, _verify, _mock):
addon = self.post_addon(
data={'packaged': True, 'free_platforms': ['free-firefoxos']})
eq_(addon.type, amo.ADDON_WEBAPP)
eq_(addon.current_version.version, '1.0')
eq_(addon.is_packaged, True)
assert addon.guid is not None, (
'Expected app to have a UUID assigned to guid')
eq_(unicode(addon.name), u'Packaged MozillaBall ょ')
eq_(addon.slug, 'app-%s' % addon.id)
eq_(addon.app_slug, u'packaged-mozillaball-ょ')
eq_(addon.description, u'Exciting Open Web development action!')
eq_(addon.manifest_url, None)
eq_(addon.app_domain, 'app://hy.fr')
eq_(Translation.objects.get(id=addon.description.id, locale='it'),
u'Azione aperta emozionante di sviluppo di fotoricettore!')
eq_(addon.current_version.developer_name, 'Mozilla Labs')
assert _verify.called, (
'`verify_app_domain` should be called for packaged apps with '
'origins.')
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest')
def test_packaged_app_not_unique(self, _mock):
Webapp.objects.create(is_packaged=True, app_domain='app://hy.fr')
res = self.post(
data={'packaged': True, 'free_platforms': ['free-firefoxos']},
expect_errors=True)
eq_(res.context['form'].errors, {
'upload': ['An app already exists on this domain; only one app '
'per domain is allowed.']})
class TestDetails(TestSubmit):
fixtures = fixture('webapp_337141', 'user_999', 'user_10482')
def setUp(self):
super(TestDetails, self).setUp()
self.webapp = self.get_webapp()
self.webapp.update(status=amo.STATUS_NULL)
self.url = reverse('submit.app.details', args=[self.webapp.app_slug])
def get_webapp(self):
return Webapp.objects.get(id=337141)
def upload_preview(self, image_file=None):
if not image_file:
image_file = get_image_path('preview.jpg')
return self._upload_image(self.webapp.get_dev_url('upload_preview'),
image_file=image_file)
def upload_icon(self, image_file=None):
if not image_file:
image_file = get_image_path('mozilla-sq.png')
return self._upload_image(self.webapp.get_dev_url('upload_icon'),
image_file=image_file)
def _upload_image(self, url, image_file):
with open(image_file, 'rb') as data:
rp = self.client.post(url, {'upload_image': data})
eq_(rp.status_code, 200)
hash_ = json.loads(rp.content)['upload_hash']
assert hash_, 'No hash: %s' % rp.content
return hash_
def _step(self):
self.user.update(read_dev_agreement=datetime.datetime.now())
self.cl = AppSubmissionChecklist.objects.create(addon=self.webapp,
terms=True, manifest=True)
# Associate app with user.
AddonUser.objects.create(addon=self.webapp, user=self.user)
# Associate device type with app.
self.dtype = DEVICE_TYPES.values()[0]
AddonDeviceType.objects.create(addon=self.webapp,
device_type=self.dtype.id)
self.device_types = [self.dtype]
# Associate category with app.
self.cat1 = Category.objects.create(type=amo.ADDON_WEBAPP, name='Fun')
AddonCategory.objects.create(addon=self.webapp, category=self.cat1)
def test_anonymous(self):
self._test_anonymous()
def test_resume_later(self):
self._step()
self.webapp.appsubmissionchecklist.update(details=True)
r = self.client.get(reverse('submit.app.resume',
args=[self.webapp.app_slug]))
self.assert3xx(r, self.webapp.get_dev_url('edit'))
def test_not_owner(self):
self._step()
assert self.client.login(username='clouserw@gmail.com',
password='password')
eq_(self.client.get(self.url).status_code, 403)
def test_page(self):
self._step()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(pq(r.content)('#submit-details').length, 1)
def test_progress_display(self):
self._step()
self._test_progress_display(['terms', 'manifest'], 'details')
def new_preview_formset(self, *args, **kw):
ctx = self.client.get(self.url).context
blank = initial(ctx['form_previews'].forms[-1])
blank.update(**kw)
return blank
def preview_formset(self, *args, **kw):
kw.setdefault('initial_count', 0)
kw.setdefault('prefix', 'files')
fs = formset(*[a for a in args] + [self.new_preview_formset()], **kw)
return dict([(k, '' if v is None else v) for k, v in fs.items()])
def get_dict(self, **kw):
data = {
'app_slug': 'testname',
'description': 'desc',
'privacy_policy': 'XXX <script>alert("xss")</script>',
'homepage': 'http://www.goodreads.com/user/show/7595895-krupa',
'support_url': 'http://www.goodreads.com/user_challenges/351558',
'support_email': 'krupa+to+the+rescue@goodreads.com',
'categories': [self.cat1.id],
'flash': '1',
'publish': '1'
}
# Add the required screenshot.
data.update(self.preview_formset({
'upload_hash': '<hash>',
'position': 0
}))
data.update(**kw)
# Remove fields without values.
data = dict((k, v) for k, v in data.iteritems() if v is not None)
return data
def check_dict(self, data=None, expected=None):
if data is None:
data = self.get_dict()
addon = self.get_webapp()
# Build a dictionary of expected results.
expected_data = {
'app_slug': 'testname',
'description': 'desc',
'privacy_policy': 'XXX <script>alert("xss")</script>',
'uses_flash': True,
'make_public': amo.PUBLIC_IMMEDIATELY
}
if expected:
expected_data.update(expected)
for field, expected in expected_data.iteritems():
got = unicode(getattr(addon, field))
expected = unicode(expected)
eq_(got, expected,
'Expected %r for %r. Got %r.' % (expected, field, got))
self.assertSetEqual(addon.device_types, self.device_types)
@mock.patch('mkt.submit.views.record_action')
def test_success(self, record_action):
self._step()
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
eq_(self.webapp.status, amo.STATUS_PENDING)
assert record_action.called
@mock.patch('mkt.submit.views.record_action')
def test_success_iarc(self, record_action):
"""TODO: delete the above test when cleaning up waffle."""
self.create_switch('iarc')
self._step()
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
eq_(self.webapp.status, amo.STATUS_NULL)
assert record_action.called
def test_success_paid(self):
self._step()
self.webapp = self.get_webapp()
self.make_premium(self.webapp)
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
eq_(self.webapp.status, amo.STATUS_NULL)
eq_(self.webapp.highest_status, amo.STATUS_PENDING)
def test_success_prefill_device_types_if_empty(self):
"""
The new submission flow asks for device types at step one.
This ensures that existing incomplete apps still have device
compatibility.
"""
self._step()
AddonDeviceType.objects.all().delete()
self.device_types = amo.DEVICE_TYPES.values()
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
def test_success_for_public_waiting(self):
self._step()
data = self.get_dict()
del data['publish']
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data, expected={'make_public': amo.PUBLIC_WAIT})
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
def test_media_types(self):
self._step()
res = self.client.get(self.url)
doc = pq(res.content)
eq_(doc('.screenshot_upload').attr('data-allowed-types'),
'image/jpeg|image/png|video/webm')
eq_(doc('#id_icon_upload').attr('data-allowed-types'),
'image/jpeg|image/png')
def test_screenshot(self):
self._step()
im_hash = self.upload_preview()
data = self.get_dict()
data.update(self.preview_formset({
'upload_hash': im_hash,
'position': 0
}))
rp = self.client.post(self.url, data)
eq_(rp.status_code, 302)
ad = Addon.objects.get(pk=self.webapp.pk)
eq_(ad.previews.all().count(), 1)
def test_icon(self):
self._step()
im_hash = self.upload_icon()
data = self.get_dict()
data['icon_upload_hash'] = im_hash
data['icon_type'] = 'image/png'
rp = self.client.post(self.url, data)
eq_(rp.status_code, 302)
ad = self.get_webapp()
eq_(ad.icon_type, 'image/png')
for size in amo.ADDON_ICON_SIZES:
fn = '%s-%s.png' % (ad.id, size)
assert os.path.exists(os.path.join(ad.get_icon_dir(), fn)), (
'Expected %s in %s' % (fn, os.listdir(ad.get_icon_dir())))
def test_screenshot_or_video_required(self):
self._step()
data = self.get_dict()
for k in data:
if k.startswith('files') and k.endswith('upload_hash'):
data[k] = ''
rp = self.client.post(self.url, data)
eq_(rp.context['form_previews'].non_form_errors(),
['You must upload at least one screenshot or video.'])
def test_unsaved_screenshot(self):
self._step()
# If there are form errors we should still pass the previews URIs.
preview_type = 'video/webm'
preview_uri = 'moz-filedata:p00p'
data = self.preview_formset({
'position': 1,
'upload_hash': '<hash_one>',
'unsaved_image_type': preview_type,
'unsaved_image_data': preview_uri
})
r = self.client.post(self.url, data)
eq_(r.status_code, 200)
form = pq(r.content)('form')
eq_(form.find('input[name=files-0-unsaved_image_type]').val(),
preview_type)
eq_(form.find('input[name=files-0-unsaved_image_data]').val(),
preview_uri)
def test_unique_allowed(self):
self._step()
r = self.client.post(self.url, self.get_dict(name=self.webapp.name))
self.assertNoFormErrors(r)
app = Webapp.objects.exclude(app_slug=self.webapp.app_slug)[0]
self.assert3xx(r, reverse('submit.app.done', args=[app.app_slug]))
eq_(self.get_webapp().status, amo.STATUS_PENDING)
def test_unique_allowed_iarc(self):
"""TODO: delete the above test when cleaning up waffle."""
self.create_switch('iarc')
self._step()
r = self.client.post(self.url, self.get_dict(name=self.webapp.name))
self.assertNoFormErrors(r)
app = Webapp.objects.exclude(app_slug=self.webapp.app_slug)[0]
self.assert3xx(r, reverse('submit.app.done', args=[app.app_slug]))
eq_(self.get_webapp().status, amo.STATUS_NULL)
def test_slug_invalid(self):
self._step()
# Submit an invalid slug.
d = self.get_dict(app_slug='slug!!! aksl23%%')
r = self.client.post(self.url, d)
eq_(r.status_code, 200)
self.assertFormError(r, 'form_basic', 'app_slug',
"Enter a valid 'slug' consisting of letters, numbers, underscores "
"or hyphens.")
def test_slug_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(app_slug=''))
eq_(r.status_code, 200)
self.assertFormError(r, 'form_basic', 'app_slug',
'This field is required.')
def test_description_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(description=''))
eq_(r.status_code, 200)
self.assertFormError(r, 'form_basic', 'description',
'This field is required.')
def test_privacy_policy_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(privacy_policy=None))
self.assertFormError(r, 'form_basic', 'privacy_policy',
'This field is required.')
def test_clashing_locale(self):
self.webapp.default_locale = 'de'
self.webapp.save()
self._step()
self.client.cookies['current_locale'] = 'en-us'
data = self.get_dict(name=None, name_de='Test name',
privacy_policy=None,
**{'privacy_policy_en-us': 'XXX'})
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
def test_homepage_url_optional(self):
self._step()
r = self.client.post(self.url, self.get_dict(homepage=None))
self.assertNoFormErrors(r)
def test_homepage_url_invalid(self):
self._step()
r = self.client.post(self.url, self.get_dict(homepage='xxx'))
self.assertFormError(r, 'form_basic', 'homepage', 'Enter a valid URL.')
def test_support_url_optional(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_url=None))
self.assertNoFormErrors(r)
def test_support_url_invalid(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_url='xxx'))
self.assertFormError(r, 'form_basic', 'support_url',
'Enter a valid URL.')
def test_support_email_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_email=None))
self.assertFormError(r, 'form_basic', 'support_email',
'This field is required.')
def test_support_email_invalid(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_email='xxx'))
self.assertFormError(r, 'form_basic', 'support_email',
'Enter a valid email address.')
def test_categories_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(categories=[]))
eq_(r.context['form_cats'].errors['categories'],
['This field is required.'])
def test_categories_max(self):
self._step()
eq_(amo.MAX_CATEGORIES, 2)
cat2 = Category.objects.create(type=amo.ADDON_WEBAPP, name='bling')
cat3 = Category.objects.create(type=amo.ADDON_WEBAPP, name='blang')
cats = [self.cat1.id, cat2.id, cat3.id]
r = self.client.post(self.url, self.get_dict(categories=cats))
eq_(r.context['form_cats'].errors['categories'],
['You can have only 2 categories.'])
def _post_cats(self, cats):
self.client.post(self.url, self.get_dict(categories=cats))
eq_(sorted(self.get_webapp().categories.values_list('id', flat=True)),
sorted(cats))
def test_categories_add(self):
self._step()
cat2 = Category.objects.create(type=amo.ADDON_WEBAPP, name='bling')
self._post_cats([self.cat1.id, cat2.id])
def test_categories_add_and_remove(self):
self._step()
cat2 = Category.objects.create(type=amo.ADDON_WEBAPP, name='bling')
self._post_cats([cat2.id])
def test_categories_remove(self):
# Add another category here so it gets added to the initial formset.
cat2 = Category.objects.create(type=amo.ADDON_WEBAPP, name='bling')
AddonCategory.objects.create(addon=self.webapp, category=cat2)
self._step()
# `cat2` should get removed.
self._post_cats([self.cat1.id])
class TestDone(TestSubmit):
fixtures = fixture('user_999', 'webapp_337141')
def setUp(self):
super(TestDone, self).setUp()
self.webapp = self.get_webapp()
self.url = reverse('submit.app.done', args=[self.webapp.app_slug])
def get_webapp(self):
return Webapp.objects.get(id=337141)
def _step(self, **kw):
data = dict(addon=self.webapp, terms=True, manifest=True,
details=True)
data.update(kw)
self.cl = AppSubmissionChecklist.objects.create(**data)
AddonUser.objects.create(addon=self.webapp, user=self.user)
def test_anonymous(self):
self._test_anonymous()
def test_progress_display(self):
self._step()
self._test_progress_display(['terms', 'manifest', 'details'], 'done')
def test_done(self):
self._step()
res = self.client.get(self.url)
eq_(res.status_code, 200)
class TestNextSteps(amo.tests.TestCase):
# TODO: Delete this test suite once we deploy IARC.
fixtures = fixture('user_999', 'webapp_337141')
def setUp(self):
self.create_switch('iarc')
self.user = UserProfile.objects.get(username='regularuser')
assert self.client.login(username=self.user.email, password='password')
self.webapp = Webapp.objects.get(id=337141)
self.webapp.update(status=amo.STATUS_PENDING)
self.url = reverse('submit.app.done', args=[self.webapp.app_slug])
def test_200(self, **kw):
data = dict(addon=self.webapp, terms=True, manifest=True,
details=True)
data.update(kw)
self.cl = AppSubmissionChecklist.objects.create(**data)
AddonUser.objects.create(addon=self.webapp, user=self.user)
res = self.client.get(self.url)
eq_(res.status_code, 200)
|
jinankjain/zamboni
|
mkt/submit/tests/test_views.py
|
Python
|
bsd-3-clause
| 38,619
|
[
"exciting"
] |
9b90f9b880b99b89c4be77bc1ec6a5682851d5d387d46955e93b72bc259ac973
|
#!/usr/bin/env python
"""
kafe2 example: Poisson cost function
====================================
In data analysis the uncertainty on measurement data is most often assumed to resemble a normal distribution.
For many use cases this assumption works reasonably well but there is a problem: to get meaningful fit results
you need to know about the uncertainties of your measurements. Now imagine for a moment that the quantity you're
measuring is the number of radioactive decays coming from some substance in a given time period. What is your
data error in this case? The precision with that you can correctly count the decays? The answer is that due to
the inherently random nature of radioactive decay the variance, and therefore the uncertainty on your measurement
data directly follows from the mean number of decays in a given time period - the number of decays are following
a poisson distribution. In kafe2 this distribution can be modeled by initializing a fit object with a special
cost function. In previous examples when no cost function was provided a normal distribution has been assumed
by default. It is important to know that for large numbers of events a poisson distribution can be approximated
by a normal distribution (y_error = sqrt(y_data)). Consult the other examples in this folder for more details.
For our example on cost functions we imagine the following, admittedly a little contrived scenario:
In some remote location on earth archeologists have found the ruins of an ancient civilization. They estimate
the ruins to be about 7000 years old. The civilization in question seems to have known about mathematics and they
even had their own calendar. Unfortunately we do not know the exact offset of this ancient calendar relative to
our modern calendar. Luckily the ancient civilization seems to have mummified their rulers and written down their
years of death though. Using a method called radiocarbon dating we can now try to estimate the offset between the
ancient and the modern calendar by analyzing the relative amounts of carbon isotopes in the mummified remains of
the ancient kings. More specifically, we take small samples from the mummies, extract the carbon from those samples
and then measure the number of decaying carbon-14 atoms in our samples. Carbon-14 is a trace radioisotope with a
half life of only 5730 years that is continuously being produced in earth's upper atmosphere. In a living organism
there is a continuous exchange of carbon atoms with its environment which results in a stable concentration of
carbon-14. Once an organism dies, however, the carbon atoms in its body are fixed and the concentration of
carbon-14 starts to exponentially decrease over time. If we then measure the concentration of carbon-14 in our
samples we can then calculate at which point in time they must have contained atmospheric amounts of carbon-14,
i.e. the times of death of the ancient kings.
"""
import numpy as np
import matplotlib.pyplot as plt
from kafe2 import XYFit, Plot
# Years of death are our x-data, measured c14 activity is our y-data.
# Note that our data does NOT include any x or y errors.
years_of_death, measured_c14_activity = np.loadtxt('measured_c14_activity.txt')
days_per_year = 365.25 # assumed number of days per year
current_year = 2019 # current year according to the modern calendar
sample_mass = 1.0 # Mass of the carbon samples in g
initial_c14_concentration = 1e-12 # Assumed initial concentration
N_A = 6.02214076e23 # Avogadro constant in 1/mol
molar_mass_c14 = 14.003241 # Molar mass of the Carbon-14 isotope in g/mol
expected_initial_num_c14_atoms = initial_c14_concentration * N_A * sample_mass / molar_mass_c14
# t = years of death in the ancient calendar
# Delta_t = difference between the ancient and the modern calendar in years
# T_12_C14 = half life of carbon-14 in years, read as T 1/2 carbon-14
def expected_activity_per_day(t, Delta_t=5000, T_12_C14=5730):
# activity = number of radioactive decays
expected_initial_activity_per_day = expected_initial_num_c14_atoms * np.log(2) / (T_12_C14 * days_per_year)
total_years_since_death = Delta_t + current_year - t
return expected_initial_activity_per_day * np.exp(-np.log(2) * total_years_since_death / T_12_C14)
# This is where we tell the fit to assume a poisson distribution for our data.
xy_fit = XYFit(
xy_data=[years_of_death, measured_c14_activity],
model_function=expected_activity_per_day,
cost_function="nll-poisson"
)
# The half life of carbon-14 is only known with a precision of +-40 years
xy_fit.add_parameter_constraint(name='T_12_C14', value=5730, uncertainty=40)
# Perform the fit
# Note that since for a Poisson distribution the data error is directly linked to the mean.
# Because of this fits can be performed without explicitly adding data errors.
xy_fit.do_fit()
# Optional: Assign new parameter names:
xy_fit.assign_parameter_latex_names(Delta_t=r"\Delta t", T_12_C14=r"T_{1/2}({}^{14}C)")
# Optional: print out a report on the fit results on the console
xy_fit.report()
# Optional: create a plot of the fit results using Plot
xy_plot = Plot(xy_fit)
xy_plot.plot(fit_info=True)
plt.show()
|
dsavoiu/kafe2
|
examples/007_cost_functions/01_poisson_cost_function.py
|
Python
|
gpl-3.0
| 5,192
|
[
"Avogadro"
] |
df0a55af0b06443ce7f5bc7482e13cd9396e4affe41d5c0ef2a78ad8fd590447
|
# Version: 0.18
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/warner/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"main" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other langauges) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
### Unicode version strings
While Versioneer works (and is continually tested) with both Python 2 and
Python 3, it is not entirely consistent with bytes-vs-unicode distinctions.
Newer releases probably generate unicode version strings on py2. It's not
clear that this is wrong, but it may be surprising for applications when then
write these strings to a network connection or include them in bytes-oriented
APIs like cryptographic checksums.
[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates
this question.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we cannot use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir does not start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we do not want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "main".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py has not already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there is not one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' does not start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' does not start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we do not already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you should not be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations do not do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we do not want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "main".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py has not already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there is not one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' does not start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' does not start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir does not start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we do not already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you should not be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git is not copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe will not like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
from py2exe.build_exe import py2exe as _py2exe # py2
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s does not exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip will not be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That does not cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
|
statsmodels/statsmodels
|
versioneer.py
|
Python
|
bsd-3-clause
| 68,632
|
[
"Brian"
] |
4e77f27356659b1cadb50993cf84a69ddb3bd99e396006e00dba52f0f4a028df
|
import numpy as np
from scipy import special
import links as L
EPS = np.spacing(1.)
#==============================================================================
# Skeleton parent class for all families
#==============================================================================
class Family:
'''
Class to encapulate a family of GLMs based on a common exponential-family
distribution.
Arguments consist of a string specifying a link function and another
specifying a variance function.
'''
links = []
def __setlink(self, link):
'''
Helper function to set link function while ensuring validity.
link must be a character vector corresponding to a key in the links
dictionary.
'''
if not isinstance(link, L.Link):
raise TypeError('Argument is not a valid link function')
if hasattr(self, 'links'):
valid = link in self.links
if not valid:
raise ValueError('Argument is not a valid link function '
'for this family')
self._link = link
def __getlink(self):
return self._link
link = property(__getlink, __setlink)
def __init__(self, link, var):
'''
Set link and variance function for family
'''
self.link = link()
self.var = var
def mu_init(self, y):
'''
Compute starting value for mu in Fisher scoring algorithm.
Takes untransformed values of outcome variable as input.
'''
return y/2. + y.mean()/2.
def weights(self, mu):
'''
Compute weights for Fisher scoring iterations.
Takes fitted mean as input.
'''
return self.link.deriv(self.link(mu))**2 / self.var(mu)
def loglik(self, y, mu, w=1.):
'''
Compute log-likelihood of observations given means mu and weights
(input, not Fisher).
'''
return
def deviance(self, y, mu, w=1.):
'''
Compute deviance of observations given means mu and weights
(input, not Fisher).
'''
return NotImplementedError
#==============================================================================
# Particular familes for GLMs
#==============================================================================
class Gaussian(Family):
'''
Family for Gaussian GLMs
'''
links = [L.Identity, L.Log, L.Inverse]
def __init__(self, link=L.Identity):
self.link = link()
def var(self, mu):
'''
Gaussian variance function
'''
return np.ones_like(mu)
def loglik(self, y, mu, w=1):
'''
Compute log-likelihood of observations given means mu and weights
(input, not Fisher).
'''
return np.sum(w*(y-mu)**2)
def deviance(self, y, mu, w=1):
'''
Compute deviance of observations given means mu and weights
(input, not Fisher).
'''
return np.sum(w*(y-mu)**2)
class Binomial(Family):
'''
Family for binomial GLMs
'''
links = [L.Logit, L.Probit, L.Cloglog]
def __init__(self, link=L.Logit):
self.link = link()
def var(self, mu):
'''
Binomial variance function
'''
return np.maximum(mu*(1.-mu), np.sqrt(EPS))
def mu_init(self, y):
'''
Specialized initialization for binomials. Using
(y + 0.5) / 2
'''
return (y + 0.5)/2.
def loglik(self, y, mu, w=1):
'''
Compute log-likelihood of observations given means mu and weights
(input, not Fisher).
'''
return (y*(np.log(mu + EPS) - np.log(1. - mu + EPS)) +
w*np.log(1. - mu + EPS))
def deviance(self, y, mu, w=1):
'''
Compute deviance of observations given means mu and weights
(input, not Fisher).
'''
if np.max(w) == 1:
# Handle binary case
return -2.*np.sum(y*np.log(mu + EPS) + (1.-y)*np.log(1.-mu + EPS))
else:
# Binomial case with n > 1
return 2.*np.sum(y*np.log(y/w/mu + EPS) +
(w-y)*y*np.log((1.-y/w)/(1.-mu) + EPS))
class Poisson(Family):
'''
Family for Poisson GLMs
'''
links = [L.Log, L.Identity]
def __init__(self, link=L.Log):
self.link = link()
def var(self, mu):
'''
Poisson variance function
'''
return mu
def loglik(self, y, mu, w=1):
'''
Compute log-likelihood of observations given means mu and weights
(input, not Fisher).
'''
return np.sum(w*(-mu + np.log(mu)*y - special.gammaln(y + 1)))
def deviance(self, y, mu, w=1):
'''
Compute deviance of observations given means mu and weights
(input, not Fisher).
'''
y_over_mu = y / mu
y_over_mu[y==0] = 1.
return 2.*np.sum(w*(y*np.log(y_over_mu) - (y-mu)))
class Gamma(Family):
'''
Family for Gamma GLMs
'''
links = [L.Log, L.Identity, L.Inverse]
def __init__(self, link=L.Log):
self.link = link()
def var(self, mu):
'''
Gamma variance function
'''
return mu**2
def loglik(self, y, mu, w=1):
'''
Compute log-likelihood of observations given means mu and weights
(input, not Fisher).
'''
y_over_mu = y / mu
y_over_mu[y==0] = 1.
return np.sum(w*( np.log(y_over_mu) - y/mu ))
def deviance(self, y, mu, w=1):
'''
Compute deviance of observations given means mu and weights
(input, not Fisher).
'''
y_over_mu = y / mu
y_over_mu[y==0] = 1.
return 2.*np.sum(w*(np.log(y_over_mu) - y/mu))
|
awblocker/glm
|
lib/glm/families.py
|
Python
|
bsd-3-clause
| 6,029
|
[
"Gaussian"
] |
b232c766a3c3c68226bb61b3315174103179b05b045462f91fe2b1acb7aae2b5
|
"""
===================================================================
Compute cross-talk functions (CTFs) for labels for MNE/dSPM/sLORETA
===================================================================
CTFs are computed for four labels in the MNE sample data set
for linear inverse operators (MNE, dSPM, sLORETA).
CTFs describe the sensitivity of a linear estimator (e.g. for
one label) to sources across the cortical surface. Sensitivity
to sources outside the label is undesirable, and referred to as
"leakage" or "cross-talk".
"""
# Author: Olaf Hauk <olaf.hauk@mrc-cbu.cam.ac.uk>
#
# License: BSD (3-clause)
print(__doc__)
import mne
from mne.datasets import sample
from mne.minimum_norm import cross_talk_function, read_inverse_operator
data_path = sample.data_path()
subjects_dir = data_path + '/subjects/'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
fname_label = [data_path + '/MEG/sample/labels/Aud-rh.label',
data_path + '/MEG/sample/labels/Aud-lh.label',
data_path + '/MEG/sample/labels/Vis-rh.label',
data_path + '/MEG/sample/labels/Vis-lh.label']
# In order to get gain matrix with fixed source orientation,
# read forward solution with fixed orientations
forward = mne.read_forward_solution(fname_fwd, force_fixed=True, surf_ori=True)
# read label(s)
labels = [mne.read_label(ss) for ss in fname_label]
inverse_operator = read_inverse_operator(fname_inv)
# regularisation parameter
snr = 3.0
lambda2 = 1.0 / snr ** 2
mode = 'svd'
n_svd_comp = 1
method = 'MNE' # can be 'MNE', 'dSPM', or 'sLORETA'
stc_ctf_mne = cross_talk_function(inverse_operator, forward, labels,
method=method, lambda2=lambda2,
signed=False, mode=mode,
n_svd_comp=n_svd_comp)
method = 'dSPM'
stc_ctf_dspm = cross_talk_function(inverse_operator, forward, labels,
method=method, lambda2=lambda2,
signed=False, mode=mode,
n_svd_comp=n_svd_comp)
from mayavi import mlab
fmin = 0.
time_label = "MNE %d"
fmax = stc_ctf_mne.data[:, 0].max()
fmid = fmax / 2.
brain_mne = stc_ctf_mne.plot(surface='inflated', hemi='rh',
subjects_dir=subjects_dir,
time_label=time_label, fmin=fmin,
fmid=fmid, fmax=fmax,
figure=mlab.figure(size=(500, 500)))
time_label = "dSPM %d"
fmax = stc_ctf_dspm.data[:, 0].max()
fmid = fmax / 2.
brain_dspm = stc_ctf_dspm.plot(surface='inflated', hemi='rh',
subjects_dir=subjects_dir,
time_label=time_label, fmin=fmin,
fmid=fmid, fmax=fmax,
figure=mlab.figure(size=(500, 500)))
# Cross-talk functions for MNE and dSPM (and sLORETA) have the same shapes
# (they may still differ in overall amplitude).
# Point-spread functions (PSfs) usually differ significantly.
|
effigies/mne-python
|
examples/inverse/plot_mne_crosstalk_function.py
|
Python
|
bsd-3-clause
| 3,239
|
[
"Mayavi"
] |
944697b8a3a9bbdcb125c045a7a4e7b3ddb0811ff6af02dfa883436aa833e72e
|
#!/usr/bin/env python
# Author: Andrew Jewett (jewett.aij@gmail.com)
# License: MIT License (See LICENSE.md)
# Copyright (c) 2013
man_page_text = """
Usage (example):
ttree_render.py ttree_assignments.txt < file.template > file.rendered
The argument (ttree_assignments.txt) should be a 2-column file containing
ttree-style variables (1st column), and their values (bindings, 2nd column).
This program reads a text file containing ttree-style variables,
substitutes the corresponding values stored in ttree_assignments.txt,
and prints out the new (rendered) text to the standard-out.
"""
import sys
import gc
try:
from .ttree import ExtractFormattingCommands
from .ttree_lex import SplitQuotedString, InputError, TemplateLexer
except (ImportError, SystemError, ValueError):
# not installed as a package
from ttree import ExtractFormattingCommands
from ttree_lex import SplitQuotedString, InputError, TemplateLexer
g_filename = __file__.split('/')[-1]
g_module_name = g_filename
if g_filename.rfind('.py') != -1:
g_module_name = g_filename[:g_filename.rfind('.py')]
g_date_str = '2019-11-02'
g_version_str = '0.2.4'
g_program_name = g_filename
#sys.stderr.write(g_program_name+' v'+g_version_str+' '+g_date_str+' ')
def main():
try:
if (len(sys.argv) < 2):
raise InputError('Error running \"' + g_program_name + '\"\n'
' Typical usage:\n'
' ttree_render.py ttree_assignments.txt < file.template > file.rendered\n'
'\n'
' Missing argument.\n'
' Expected the name of a 2-column file containing\n'
' variable names and their bindings (values).\n'
' (This is likely a programmer error.\n'
' This script was not intended to be run by end users.)\n')
bindings_filename = sys.argv[1]
ftemplate = sys.stdin
ftemplate_name = '__standard_input_for_ttree_render__'
if len(sys.argv) >= 3:
ftemplate_name = sys.argv[2]
ftemplate = open(ftemplate_name, 'r')
fbindings = open(bindings_filename)
assignments = {}
#BasicUIReadBindingsStream(assignments, f, bindings_filename)
# The line above is robust but it uses far too much memory.
# This for loop below works for most cases.
for line in fbindings:
#tokens = lines.strip().split()
# like split but handles quotes
tokens = SplitQuotedString(line.strip())
if len(tokens) < 2:
continue
assignments[tokens[0]] = tokens[1]
fbindings.close()
gc.collect()
lex = TemplateLexer(ftemplate, ftemplate_name)
lex.var_delim = '$@'
text_block_list = lex.ReadTemplate(simplify_output=True)
output = []
for entry in text_block_list:
assert(isinstance(entry, str))
if ((len(entry) > 1) and (entry[0] in lex.var_delim)):
var_prefix = ''
var_suffix = ''
var_format = ''
if ((len(entry) >= 3) and
(entry[1] == '{') and
(entry[-1] == '}')):
var_prefix = '{'
var_suffix = '}'
entry = entry[0] + entry[2:-1]
if '.' in entry:
ic = entry.find('.')
var_name = entry[:ic]
var_format = entry[ic:]
if not var_format[0:7] in ('.ljust(', '.rjust('):
var_name = entry
var_format = ''
else:
var_name = entry
var_format = ''
if var_name not in assignments:
#COMMENTING OUT:
#raise(InputError('Error(' + g_program_name + ')'
# #' at '+ErrorLeader(var_ref.src_loc.infile,
# # var_ref.src_loc.lineno)+
# ' unknown variable:\n'
# ' \"' + var_name + '\"\n'))
# ...actually don't raise an error message:
# Actually there are some legitimate reaons this could occur.
# Some users want to put LAMMPS-style variables in the
# write_once() {...} text blocks in their moltemplate files.
# Variables in both LAMMPS and moltemplate contain $ characters,
# and this script gets confused. Better to just ignore it
# when this happens instead of printing an error message.
# Just leave the text alone and print the variable name.
#
# Do this by substituting the variable's name as it's value:
var_value = entry[0] + var_prefix + var_name[1:] + var_suffix
else:
var_value = assignments[var_name]
format_fname, args = ExtractFormattingCommands(var_format)
if format_fname == 'ljust':
if len(args) == 1:
var_value = var_value.ljust(int(args[0]))
else:
var_value = var_value.ljust(int(args[0]), args[1])
elif format_fname == 'rjust':
if len(args) == 1:
var_value = var_value.rjust(int(args[0]))
else:
var_value = var_value.rjust(int(args[0]), args[1])
output.append(var_value)
else:
output += entry
sys.stdout.write(''.join(output))
# If we are not reading the file from sys.stdin, then close the file:
if ftemplate_name == '__standard_input_for_ttree_render__':
assert(ftemplate == sys.stdin)
else:
ftemplate.close()
except (ValueError, InputError) as err:
sys.stderr.write('\n' + str(err) + '\n')
sys.exit(-1)
return
if __name__ == '__main__':
main()
|
jewettaij/moltemplate
|
moltemplate/ttree_render.py
|
Python
|
mit
| 6,332
|
[
"LAMMPS"
] |
30f9fdf361b27a5087747a26be2bf6e91478d206c8c7ebd9a72a6e49792654b4
|
from __future__ import annotations
import math
import os
import pytest
from scitbx import matrix
class SpotPredictor:
def __init__(self, dials_regression):
import dxtbx
from iotbx.xds import integrate_hkl, xparm
from rstbx.cftbx.coordinate_frame_converter import coordinate_frame_converter
from dials.algorithms.spot_prediction import (
IndexGenerator,
ScanStaticRayPredictor,
ray_intersection,
)
from dials.util import ioutil
# The XDS files to read from
integrate_filename = os.path.join(
dials_regression, "data", "sim_mx", "INTEGRATE.HKL"
)
gxparm_filename = os.path.join(dials_regression, "data", "sim_mx", "GXPARM.XDS")
# Read the XDS files
self.integrate_handle = integrate_hkl.reader()
self.integrate_handle.read_file(integrate_filename)
self.gxparm_handle = xparm.reader()
self.gxparm_handle.read_file(gxparm_filename)
# Get the parameters we need from the GXPARM file
models = dxtbx.load(gxparm_filename)
self.beam = models.get_beam()
self.gonio = models.get_goniometer()
self.detector = models.get_detector()
self.scan = models.get_scan()
assert len(self.detector) == 1
# print self.detector
# Get crystal parameters
self.space_group_type = ioutil.get_space_group_type_from_xparm(
self.gxparm_handle
)
cfc = coordinate_frame_converter(gxparm_filename)
a_vec = cfc.get("real_space_a")
b_vec = cfc.get("real_space_b")
c_vec = cfc.get("real_space_c")
self.unit_cell = cfc.get_unit_cell()
self.ub_matrix = matrix.sqr(a_vec + b_vec + c_vec).inverse()
# Get the minimum resolution in the integrate file
self.d_min = self.detector[0].get_max_resolution_at_corners(self.beam.get_s0())
# Get the number of frames from the max z value
xcal, ycal, zcal = zip(*self.integrate_handle.xyzcal)
self.scan.set_image_range(
(
self.scan.get_image_range()[0],
self.scan.get_image_range()[0] + int(math.ceil(max(zcal))),
)
)
# Create the index generator
generate_indices = IndexGenerator(
self.unit_cell, self.space_group_type, self.d_min
)
s0 = self.beam.get_s0()
m2 = self.gonio.get_rotation_axis()
fixed_rotation = self.gonio.get_fixed_rotation()
setting_rotation = self.gonio.get_setting_rotation()
UB = self.ub_matrix
dphi = self.scan.get_oscillation_range(deg=False)
# Create the ray predictor
self.predict_rays = ScanStaticRayPredictor(
s0, m2, fixed_rotation, setting_rotation, dphi
)
# Predict the spot locations
self.reflections = self.predict_rays(generate_indices.to_array(), UB)
# Calculate the intersection of the detector and reflection frames
success = ray_intersection(self.detector, self.reflections)
self.reflections.select(success)
@pytest.fixture(scope="session")
def spotpredictor(dials_regression):
return SpotPredictor(dials_regression)
def test_dmin(spotpredictor):
"""Ensure calculated d_min < d_min in integrate file"""
d = [spotpredictor.unit_cell.d(h) for h in spotpredictor.integrate_handle.hkl]
d_min = min(d)
assert spotpredictor.d_min <= d_min
def test_miller_index_set(spotpredictor):
"""Ensure we have the whole set of miller indices"""
gen_hkl = {}
for r in spotpredictor.reflections.rows():
gen_hkl[r["miller_index"]] = True
for hkl in spotpredictor.integrate_handle.hkl:
assert gen_hkl[hkl]
def test_rotation_angles(spotpredictor):
"""Ensure the rotation angles agree with XDS"""
# Create a dict of lists of xy for each hkl
gen_phi = {}
for r in spotpredictor.reflections.rows():
hkl = r["miller_index"]
phi = r["phi"]
try:
a = gen_phi[hkl]
a.append(phi)
gen_phi[hkl] = a
except KeyError:
gen_phi[hkl] = [phi]
for hkl, xyz in zip(
spotpredictor.integrate_handle.hkl, spotpredictor.integrate_handle.xyzcal
):
xds_phi = (
spotpredictor.scan.get_oscillation(deg=False)[0]
+ xyz[2] * spotpredictor.scan.get_oscillation(deg=False)[1]
)
# Select the nearest xy to use if there are 2
my_phi = gen_phi[hkl]
if len(my_phi) == 2:
my_phi0 = my_phi[0]
my_phi1 = my_phi[1]
diff0 = abs(xds_phi - my_phi0)
diff1 = abs(xds_phi - my_phi1)
if diff0 < diff1:
my_phi = my_phi0
else:
my_phi = my_phi1
else:
my_phi = my_phi[0]
assert xds_phi == pytest.approx(my_phi, abs=0.1)
def test_beam_vectors(spotpredictor):
"""Ensure |s1| == |s0|"""
s0_length = matrix.col(spotpredictor.beam.get_s0()).length()
for r in spotpredictor.reflections.rows():
s1 = r["s1"]
s1_length = matrix.col(s1).length()
assert s0_length == pytest.approx(s1_length, abs=1e-7)
def test_image_coordinates(spotpredictor):
"""Ensure the image coordinates agree with XDS"""
# Create a dict of lists of xy for each hkl
gen_xy = {}
for r in spotpredictor.reflections.rows():
hkl = r["miller_index"]
xy = r["xyzcal.mm"][0:2]
xy = spotpredictor.detector[0].millimeter_to_pixel(xy)
try:
a = gen_xy[hkl]
a.append(xy)
gen_xy[hkl] = a
except KeyError:
gen_xy[hkl] = [xy]
for hkl, xyz in zip(
spotpredictor.integrate_handle.hkl, spotpredictor.integrate_handle.xyzcal
):
xds_xy = (xyz[0] - 0.5, xyz[1] - 0.5)
# Select the nearest xy to use if there are 2
my_xy = gen_xy[hkl]
if len(my_xy) == 2:
my_xy0 = my_xy[0]
my_xy1 = my_xy[1]
diff0 = (matrix.col(xds_xy) - matrix.col(my_xy0)).length()
diff1 = (matrix.col(xds_xy) - matrix.col(my_xy1)).length()
if diff0 < diff1:
my_xy = my_xy0
else:
my_xy = my_xy1
else:
my_xy = my_xy[0]
assert xds_xy[0] == pytest.approx(my_xy[0], abs=0.1), (xds_xy, gen_xy[hkl])
assert xds_xy[1] == pytest.approx(my_xy[1], abs=0.1), (xds_xy, gen_xy[hkl])
|
dials/dials
|
tests/algorithms/spot_prediction/test_spot_prediction.py
|
Python
|
bsd-3-clause
| 6,553
|
[
"CRYSTAL"
] |
49e0bf735d659b7a828939ec8ee5f3abf2e63f529dee840c66979debf059641f
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import locale
import os
import sys
from ctypes import c_size_t, c_void_p, c_wchar_p
from logging import NullHandler, getLogger
from subprocess import check_output
from pymediainfo import MediaInfo
from pymediainfo import __version__ as pymediainfo_version
from .. import (
OrderedDict,
VIDEO_EXTENSIONS,
)
from ..properties import (
AudioChannels,
AudioCodec,
AudioCompression,
AudioProfile,
Basic,
BitRateMode,
Duration,
Language,
Quantity,
ScanType,
SubtitleFormat,
VideoCodec,
VideoEncoder,
VideoProfile,
VideoProfileLevel,
VideoProfileTier,
YesNo,
)
from ..property import (
MultiValue,
Property,
)
from ..provider import (
MalformedFileError,
Provider,
)
from ..rules import (
AudioChannelsRule,
ClosedCaptionRule,
HearingImpairedRule,
LanguageRule,
ResolutionRule,
)
from ..units import units
from ..utils import (
define_candidate,
detect_os,
)
logger = getLogger(__name__)
logger.addHandler(NullHandler())
WARN_MSG = '''
=========================================================================================
MediaInfo not found on your system or could not be loaded.
Visit https://mediaarea.net/ to download it.
If you still have problems, please check if the downloaded version matches your system.
To load MediaInfo from a specific location, please define the location as follow:
knowit --mediainfo /usr/local/mediainfo/lib <video_path>
knowit --mediainfo /usr/local/mediainfo/bin <video_path>
knowit --mediainfo "C:\Program Files\MediaInfo" <video_path>
knowit --mediainfo C:\Software\MediaInfo.dll <video_path>
knowit --mediainfo C:\Software\MediaInfo.exe <video_path>
knowit --mediainfo /opt/mediainfo/libmediainfo.so <video_path>
knowit --mediainfo /opt/mediainfo/libmediainfo.dylib <video_path>
=========================================================================================
'''
class MediaInfoExecutor(object):
"""Media info executable knows how to execute media info: using ctypes or cli."""
def __init__(self, location):
"""Constructor."""
self.location = location
def extract_info(self, filename):
"""Extract media info."""
xml = self._execute(filename)
return MediaInfo(xml)
def _execute(self, filename):
raise NotImplementedError
@classmethod
def get_executor_instance(cls, suggested_path):
"""Return the executor instance."""
os_family = detect_os()
logger.debug('Detected os: %s', os_family)
for exec_cls in (MediaInfoCTypesExecutor, MediaInfoCliExecutor):
executor = exec_cls.create(os_family, suggested_path)
if executor:
return executor
class MediaInfoCliExecutor(MediaInfoExecutor):
"""Media info using cli."""
names = {
'unix': ['mediainfo'],
'windows': ['MediaInfo.exe'],
'macos': ['mediainfo'],
}
locations = {
'unix': ['/usr/local/mediainfo/bin', '__PATH__'],
'windows': ['__PATH__'],
'macos': ['__PATH__'],
}
def _execute(self, filename):
return check_output([self.location, '--Output=XML', '--Full', filename])
@classmethod
def create(cls, os_family, suggested_path):
"""Create the executor instance."""
for candidate in define_candidate(os_family, cls.locations, cls.names, suggested_path):
try:
check_output([candidate, '--version'])
logger.debug('MediaInfo cli detected: %s', candidate)
return MediaInfoCliExecutor(candidate)
except OSError:
pass
class MediaInfoCTypesExecutor(MediaInfoExecutor):
"""Media info ctypes."""
names = {
'unix': ['libmediainfo.so.0'],
'windows': ['MediaInfo.dll'],
'macos': ['libmediainfo.0.dylib', 'libmediainfo.dylib'],
}
locations = {
'unix': ['/usr/local/mediainfo/lib', '__PATH__'],
'windows': ['__PATH__'], # 'C:\Program Files\MediaInfo', 'C:\Program Files (x86)\MediaInfo'],
'macos': ['__PATH__'],
}
def __init__(self, location, lib):
"""Constructor."""
super(MediaInfoCTypesExecutor, self).__init__(location)
self.lib = lib
def _execute(self, filename):
# Create a MediaInfo handle
handle = self.lib.MediaInfo_New()
try:
self.lib.MediaInfo_Option(handle, 'CharSet', 'UTF-8')
# Fix for https://github.com/sbraz/pymediainfo/issues/22
# Python 2 does not change LC_CTYPE
# at startup: https://bugs.python.org/issue6203
if sys.version_info < (3, ) and os.name == 'posix' and locale.getlocale() == (None, None):
locale.setlocale(locale.LC_CTYPE, locale.getdefaultlocale())
self.lib.MediaInfo_Option(None, 'Inform', 'XML')
self.lib.MediaInfo_Option(None, 'Complete', '1')
self.lib.MediaInfo_Open(handle, filename)
return self.lib.MediaInfo_Inform(handle, 0)
finally:
# Delete the handle
self.lib.MediaInfo_Close(handle)
self.lib.MediaInfo_Delete(handle)
@classmethod
def create(cls, os_family, suggested_path):
"""Create the executor instance."""
for candidate in define_candidate(os_family, cls.locations, cls.names, suggested_path):
lib = cls._get_native_lib(os_family, candidate)
if lib:
logger.debug('MediaInfo library detected: %s', candidate)
return MediaInfoCTypesExecutor(candidate, lib)
@classmethod
def _get_native_lib(cls, os_family, library_path):
if os_family == 'windows':
return cls._get_windows_lib(library_path)
# works for unix and macos
return cls._get_unix_lib(library_path)
@classmethod
def _get_windows_lib(cls, library_path):
from ctypes import windll
try:
if sys.version_info[:3] == (2, 7, 13):
# http://bugs.python.org/issue29082
library_path = str(library_path)
lib = windll.MediaInfo = windll.LoadLibrary(library_path)
return cls._initialize_lib(lib)
except OSError:
pass
@classmethod
def _get_unix_lib(cls, library_path):
from ctypes import CDLL
try:
return cls._initialize_lib(CDLL(library_path))
except OSError:
pass
@classmethod
def _initialize_lib(cls, lib):
lib.MediaInfo_Inform.restype = c_wchar_p
lib.MediaInfo_New.argtypes = []
lib.MediaInfo_New.restype = c_void_p
lib.MediaInfo_Option.argtypes = [c_void_p, c_wchar_p, c_wchar_p]
lib.MediaInfo_Option.restype = c_wchar_p
lib.MediaInfo_Inform.argtypes = [c_void_p, c_size_t]
lib.MediaInfo_Inform.restype = c_wchar_p
lib.MediaInfo_Open.argtypes = [c_void_p, c_wchar_p]
lib.MediaInfo_Open.restype = c_size_t
lib.MediaInfo_Delete.argtypes = [c_void_p]
lib.MediaInfo_Delete.restype = None
lib.MediaInfo_Close.argtypes = [c_void_p]
lib.MediaInfo_Close.restype = None
return lib
class MediaInfoProvider(Provider):
"""Media Info provider."""
executor = None
def __init__(self, config, suggested_path):
"""Init method."""
super(MediaInfoProvider, self).__init__(config, {
'general': OrderedDict([
('title', Property('title', description='media title')),
('path', Property('complete_name', description='media path')),
('duration', Duration('duration', description='media duration')),
('size', Quantity('file_size', units.byte, description='media size')),
('bit_rate', Quantity('overall_bit_rate', units.bps, description='media bit rate')),
]),
'video': OrderedDict([
('id', Basic('track_id', int, allow_fallback=True, description='video track number')),
('name', Property('name', description='video track name')),
('language', Language('language', description='video language')),
('duration', Duration('duration', description='video duration')),
('size', Quantity('stream_size', units.byte, description='video stream size')),
('width', Quantity('width', units.pixel)),
('height', Quantity('height', units.pixel)),
('scan_type', ScanType(config, 'scan_type', default='Progressive', description='video scan type')),
('aspect_ratio', Basic('display_aspect_ratio', float, description='display aspect ratio')),
('pixel_aspect_ratio', Basic('pixel_aspect_ratio', float, description='pixel aspect ratio')),
('resolution', None), # populated with ResolutionRule
('frame_rate', Quantity('frame_rate', units.FPS, float, description='video frame rate')),
# frame_rate_mode
('bit_rate', Quantity('bit_rate', units.bps, description='video bit rate')),
('bit_depth', Quantity('bit_depth', units.bit, description='video bit depth')),
('codec', VideoCodec(config, 'codec', description='video codec')),
('profile', VideoProfile(config, 'codec_profile', description='video codec profile')),
('profile_level', VideoProfileLevel(config, 'codec_profile', description='video codec profile level')),
('profile_tier', VideoProfileTier(config, 'codec_profile', description='video codec profile tier')),
('encoder', VideoEncoder(config, 'encoded_library_name', description='video encoder')),
('media_type', Property('internet_media_type', description='video media type')),
('forced', YesNo('forced', hide_value=False, description='video track forced')),
('default', YesNo('default', hide_value=False, description='video track default')),
]),
'audio': OrderedDict([
('id', Basic('track_id', int, allow_fallback=True, description='audio track number')),
('name', Property('title', description='audio track name')),
('language', Language('language', description='audio language')),
('duration', Duration('duration', description='audio duration')),
('size', Quantity('stream_size', units.byte, description='audio stream size')),
('codec', MultiValue(AudioCodec(config, 'codec', description='audio codec'))),
('profile', MultiValue(AudioProfile(config, 'format_profile', description='audio codec profile'),
delimiter=' / ')),
('channels_count', MultiValue(AudioChannels('channel_s', description='audio channels count'))),
('channel_positions', MultiValue(name='other_channel_positions', handler=(lambda x, *args: x),
delimiter=' / ', private=True, description='audio channels position')),
('channels', None), # populated with AudioChannelsRule
('bit_depth', Quantity('bit_depth', units.bit, description='audio bit depth')),
('bit_rate', MultiValue(Quantity('bit_rate', units.bps, description='audio bit rate'))),
('bit_rate_mode', MultiValue(BitRateMode(config, 'bit_rate_mode', description='audio bit rate mode'))),
('sampling_rate', MultiValue(Quantity('sampling_rate', units.Hz, description='audio sampling rate'))),
('compression', MultiValue(AudioCompression(config, 'compression_mode',
description='audio compression'))),
('forced', YesNo('forced', hide_value=False, description='audio track forced')),
('default', YesNo('default', hide_value=False, description='audio track default')),
]),
'subtitle': OrderedDict([
('id', Basic('track_id', int, allow_fallback=True, description='subtitle track number')),
('name', Property('title', description='subtitle track name')),
('language', Language('language', description='subtitle language')),
('hearing_impaired', None), # populated with HearingImpairedRule
('_closed_caption', Property('captionservicename', private=True)),
('closed_caption', None), # populated with ClosedCaptionRule
('format', SubtitleFormat(config, 'codec_id', description='subtitle format')),
('forced', YesNo('forced', hide_value=False, description='subtitle track forced')),
('default', YesNo('default', hide_value=False, description='subtitle track default')),
]),
}, {
'video': OrderedDict([
('language', LanguageRule('video language')),
('resolution', ResolutionRule('video resolution')),
]),
'audio': OrderedDict([
('language', LanguageRule('audio language')),
('channels', AudioChannelsRule('audio channels')),
]),
'subtitle': OrderedDict([
('language', LanguageRule('subtitle language')),
('hearing_impaired', HearingImpairedRule('subtitle hearing impaired')),
('closed_caption', ClosedCaptionRule('closed caption'))
])
})
self.executor = MediaInfoExecutor.get_executor_instance(suggested_path)
def accepts(self, video_path):
"""Accept any video when MediaInfo is available."""
if self.executor is None:
logger.warning(WARN_MSG)
self.executor = False
return self.executor and video_path.lower().endswith(VIDEO_EXTENSIONS)
def describe(self, video_path, context):
"""Return video metadata."""
data = self.executor.extract_info(video_path).to_data()
if context.get('raw'):
return data
general_tracks = []
video_tracks = []
audio_tracks = []
subtitle_tracks = []
for track in data.get('tracks'):
track_type = track.get('track_type')
if track_type == 'General':
general_tracks.append(track)
elif track_type == 'Video':
video_tracks.append(track)
elif track_type == 'Audio':
audio_tracks.append(track)
elif track_type == 'Text':
subtitle_tracks.append(track)
result = self._describe_tracks(video_path, general_tracks[0] if general_tracks else {},
video_tracks, audio_tracks, subtitle_tracks, context)
if not result:
logger.warning('Invalid file %r', video_path)
if context.get('fail_on_error'):
raise MalformedFileError
result['provider'] = self.executor.location
return result
@property
def version(self):
"""Return mediainfo version information."""
return pymediainfo_version, self.executor.location if self.executor else None
|
fernandog/Medusa
|
ext/knowit/providers/mediainfo.py
|
Python
|
gpl-3.0
| 15,401
|
[
"VisIt"
] |
270b6fad8c8c84936310c80f6b090858c741332ce710d61ac2362cb9337b9e9c
|
"""Helper functions for creating the most common surfaces and related tasks.
The helper functions can create the most common low-index surfaces,
add vacuum layers and add adsorbates.
"""
from math import sqrt
from operator import itemgetter
import numpy as np
from ase.atom import Atom
from ase.atoms import Atoms
from ase.data import reference_states, atomic_numbers
from ase.lattice.general_surface import surface
from ase.lattice.cubic import FaceCenteredCubic
def fcc100(symbol, size, a=None, vacuum=None):
"""FCC(100) surface.
Supported special adsorption sites: 'ontop', 'bridge', 'hollow'."""
return _surface(symbol, 'fcc', '100', size, a, None, vacuum)
def fcc110(symbol, size, a=None, vacuum=None):
"""FCC(110) surface.
Supported special adsorption sites: 'ontop', 'longbridge',
'shortbridge','hollow'."""
return _surface(symbol, 'fcc', '110', size, a, None, vacuum)
def bcc100(symbol, size, a=None, vacuum=None):
"""BCC(100) surface.
Supported special adsorption sites: 'ontop', 'bridge', 'hollow'."""
return _surface(symbol, 'bcc', '100', size, a, None, vacuum)
def bcc110(symbol, size, a=None, vacuum=None, orthogonal=False):
"""BCC(110) surface.
Supported special adsorption sites: 'ontop', 'longbridge',
'shortbridge', 'hollow'.
Use *orthogonal=True* to get an orthogonal unit cell - works only
for size=(i,j,k) with j even."""
return _surface(symbol, 'bcc', '110', size, a, None, vacuum, orthogonal)
def bcc111(symbol, size, a=None, vacuum=None, orthogonal=False):
"""BCC(111) surface.
Supported special adsorption sites: 'ontop'.
Use *orthogonal=True* to get an orthogonal unit cell - works only
for size=(i,j,k) with j even."""
return _surface(symbol, 'bcc', '111', size, a, None, vacuum, orthogonal)
def fcc111(symbol, size, a=None, vacuum=None, orthogonal=False):
"""FCC(111) surface.
Supported special adsorption sites: 'ontop', 'bridge', 'fcc' and 'hcp'.
Use *orthogonal=True* to get an orthogonal unit cell - works only
for size=(i,j,k) with j even."""
return _surface(symbol, 'fcc', '111', size, a, None, vacuum, orthogonal)
def hcp0001(symbol, size, a=None, c=None, vacuum=None, orthogonal=False):
"""HCP(0001) surface.
Supported special adsorption sites: 'ontop', 'bridge', 'fcc' and 'hcp'.
Use *orthogonal=True* to get an orthogonal unit cell - works only
for size=(i,j,k) with j even."""
return _surface(symbol, 'hcp', '0001', size, a, c, vacuum, orthogonal)
def hcp10m10(symbol, size, a=None, c=None, vacuum=None):
"""HCP(10m10) surface.
Supported special adsorption sites: 'ontop'.
Works only for size=(i,j,k) with j even."""
return _surface(symbol, 'hcp', '10m10', size, a, c, vacuum)
def diamond100(symbol, size, a=None, vacuum=None):
"""DIAMOND(100) surface.
Supported special adsorption sites: 'ontop'."""
return _surface(symbol, 'diamond', '100', size, a, None, vacuum)
def diamond111(symbol, size, a=None, vacuum=None, orthogonal=False):
"""DIAMOND(111) surface.
Supported special adsorption sites: 'ontop'."""
if orthogonal:
raise NotImplementedError("Can't do orthogonal cell yet!")
return _surface(symbol, 'diamond', '111', size, a, None, vacuum,
orthogonal)
def add_adsorbate(slab, adsorbate, height, position=(0, 0), offset=None,
mol_index=0):
"""Add an adsorbate to a surface.
This function adds an adsorbate to a slab. If the slab is
produced by one of the utility functions in ase.lattice.surface, it
is possible to specify the position of the adsorbate by a keyword
(the supported keywords depend on which function was used to
create the slab).
If the adsorbate is a molecule, the atom indexed by the mol_index
optional argument is positioned on top of the adsorption position
on the surface, and it is the responsibility of the user to orient
the adsorbate in a sensible way.
This function can be called multiple times to add more than one
adsorbate.
Parameters:
slab: The surface onto which the adsorbate should be added.
adsorbate: The adsorbate. Must be one of the following three types:
A string containing the chemical symbol for a single atom.
An atom object.
An atoms object (for a molecular adsorbate).
height: Height above the surface.
position: The x-y position of the adsorbate, either as a tuple of
two numbers or as a keyword (if the surface is produced by one
of the functions in ase.lattice.surfaces).
offset (default: None): Offsets the adsorbate by a number of unit
cells. Mostly useful when adding more than one adsorbate.
mol_index (default: 0): If the adsorbate is a molecule, index of
the atom to be positioned above the location specified by the
position argument.
Note *position* is given in absolute xy coordinates (or as
a keyword), whereas offset is specified in unit cells. This
can be used to give the positions in units of the unit cell by
using *offset* instead.
"""
info = slab.adsorbate_info
if 'cell' not in info:
info['cell'] = slab.get_cell()[:2, :2]
pos = np.array([0.0, 0.0]) # (x, y) part
spos = np.array([0.0, 0.0]) # part relative to unit cell
if offset is not None:
spos += np.asarray(offset, float)
if isinstance(position, str):
# A site-name:
if 'sites' not in info:
raise TypeError('If the atoms are not made by an ' +
'ase.lattice.surface function, ' +
'position cannot be a name.')
if position not in info['sites']:
raise TypeError('Adsorption site %s not supported.' % position)
spos += info['sites'][position]
else:
pos += position
pos += np.dot(spos, info['cell'])
# Convert the adsorbate to an Atoms object
if isinstance(adsorbate, Atoms):
ads = adsorbate
elif isinstance(adsorbate, Atom):
ads = Atoms([adsorbate])
else:
# Hope it is a useful string or something like that
ads = Atoms(adsorbate)
# Get the z-coordinate:
try:
a = info['top layer atom index']
except KeyError:
a = slab.positions[:, 2].argmax()
info['top layer atom index'] = a
z = slab.positions[a, 2] + height
# Move adsorbate into position
ads.translate([pos[0], pos[1], z] - ads.positions[mol_index])
# Attach the adsorbate
slab.extend(ads)
def add_vacuum(atoms, vacuum):
"""Add vacuum layer to the atoms.
Parameters:
atoms: An Atoms object most likely created by one of the
ase.lattice modules.
vacuum: The thickness of the vacuum layer (in Angstrom).
"""
uc = atoms.get_cell()
normal = np.cross(uc[0], uc[1])
costheta = np.dot(normal, uc[2]) / np.sqrt(np.dot(normal, normal) *
np.dot(uc[2], uc[2]))
length = np.sqrt(np.dot(uc[2], uc[2]))
newlength = length + vacuum / costheta
uc[2] *= newlength / length
atoms.set_cell(uc)
def _surface(symbol, structure, face, size, a, c, vacuum, orthogonal=True):
"""Function to build often used surfaces.
Don't call this function directly - use fcc100, fcc110, bcc111, ..."""
Z = atomic_numbers[symbol]
if a is None:
sym = reference_states[Z]['symmetry']
if sym != structure:
raise ValueError("Can't guess lattice constant for %s-%s!" %
(structure, symbol))
a = reference_states[Z]['a']
if structure == 'hcp' and c is None:
if reference_states[Z]['symmetry'] == 'hcp':
c = reference_states[Z]['c/a'] * a
else:
c = sqrt(8 / 3.0) * a
positions = np.empty((size[2], size[1], size[0], 3))
positions[..., 0] = np.arange(size[0]).reshape((1, 1, -1))
positions[..., 1] = np.arange(size[1]).reshape((1, -1, 1))
positions[..., 2] = np.arange(size[2]).reshape((-1, 1, 1))
numbers = np.ones(size[0] * size[1] * size[2], int) * Z
tags = np.empty((size[2], size[1], size[0]), int)
tags[:] = np.arange(size[2], 0, -1).reshape((-1, 1, 1))
slab = Atoms(numbers,
tags=tags.ravel(),
pbc=(True, True, False),
cell=size)
surface_cell = None
sites = {'ontop': (0, 0)}
surf = structure + face
if surf == 'fcc100':
cell = (sqrt(0.5), sqrt(0.5), 0.5)
positions[-2::-2, ..., :2] += 0.5
sites.update({'hollow': (0.5, 0.5), 'bridge': (0.5, 0)})
elif surf == 'diamond100':
cell = (sqrt(0.5), sqrt(0.5), 0.5 / 2)
positions[-4::-4, ..., :2] += (0.5, 0.5)
positions[-3::-4, ..., :2] += (0.0, 0.5)
positions[-2::-4, ..., :2] += (0.0, 0.0)
positions[-1::-4, ..., :2] += (0.5, 0.0)
elif surf == 'fcc110':
cell = (1.0, sqrt(0.5), sqrt(0.125))
positions[-2::-2, ..., :2] += 0.5
sites.update({'hollow': (0.5, 0.5), 'longbridge': (0.5, 0),
'shortbridge': (0, 0.5)})
elif surf == 'bcc100':
cell = (1.0, 1.0, 0.5)
positions[-2::-2, ..., :2] += 0.5
sites.update({'hollow': (0.5, 0.5), 'bridge': (0.5, 0)})
else:
if orthogonal and size[1] % 2 == 1:
raise ValueError(("Can't make orthorhombic cell with size=%r. " %
(tuple(size),)) +
'Second number in size must be even.')
if surf == 'fcc111':
cell = (sqrt(0.5), sqrt(0.375), 1 / sqrt(3))
if orthogonal:
positions[-1::-3, 1::2, :, 0] += 0.5
positions[-2::-3, 1::2, :, 0] += 0.5
positions[-3::-3, 1::2, :, 0] -= 0.5
positions[-2::-3, ..., :2] += (0.0, 2.0 / 3)
positions[-3::-3, ..., :2] += (0.5, 1.0 / 3)
else:
positions[-2::-3, ..., :2] += (-1.0 / 3, 2.0 / 3)
positions[-3::-3, ..., :2] += (1.0 / 3, 1.0 / 3)
sites.update({'bridge': (0.5, 0), 'fcc': (1.0 / 3, 1.0 / 3),
'hcp': (2.0 / 3, 2.0 / 3)})
elif surf == 'diamond111':
cell = (sqrt(0.5), sqrt(0.375), 1 / sqrt(3) / 2)
assert not orthogonal
positions[-1::-6, ..., :3] += (0.0, 0.0, 0.5)
positions[-2::-6, ..., :2] += (0.0, 0.0)
positions[-3::-6, ..., :3] += (-1.0 / 3, 2.0 / 3, 0.5)
positions[-4::-6, ..., :2] += (-1.0 / 3, 2.0 / 3)
positions[-5::-6, ..., :3] += (1.0 / 3, 1.0 / 3, 0.5)
positions[-6::-6, ..., :2] += (1.0 / 3, 1.0 / 3)
elif surf == 'hcp0001':
cell = (1.0, sqrt(0.75), 0.5 * c / a)
if orthogonal:
positions[:, 1::2, :, 0] += 0.5
positions[-2::-2, ..., :2] += (0.0, 2.0 / 3)
else:
positions[-2::-2, ..., :2] += (-1.0 / 3, 2.0 / 3)
sites.update({'bridge': (0.5, 0), 'fcc': (1.0 / 3, 1.0 / 3),
'hcp': (2.0 / 3, 2.0 / 3)})
elif surf == 'hcp10m10':
cell = (1.0, 0.5 * c / a, sqrt(0.75))
assert orthogonal
positions[-2::-2, ..., 0] += 0.5
positions[:, ::2, :, 2] += 2.0 / 3
elif surf == 'bcc110':
cell = (1.0, sqrt(0.5), sqrt(0.5))
if orthogonal:
positions[:, 1::2, :, 0] += 0.5
positions[-2::-2, ..., :2] += (0.0, 1.0)
else:
positions[-2::-2, ..., :2] += (-0.5, 1.0)
sites.update({'shortbridge': (0, 0.5),
'longbridge': (0.5, 0),
'hollow': (0.375, 0.25)})
elif surf == 'bcc111':
cell = (sqrt(2), sqrt(1.5), sqrt(3) / 6)
if orthogonal:
positions[-1::-3, 1::2, :, 0] += 0.5
positions[-2::-3, 1::2, :, 0] += 0.5
positions[-3::-3, 1::2, :, 0] -= 0.5
positions[-2::-3, ..., :2] += (0.0, 2.0 / 3)
positions[-3::-3, ..., :2] += (0.5, 1.0 / 3)
else:
positions[-2::-3, ..., :2] += (-1.0 / 3, 2.0 / 3)
positions[-3::-3, ..., :2] += (1.0 / 3, 1.0 / 3)
sites.update({'hollow': (1.0 / 3, 1.0 / 3)})
else:
2 / 0
surface_cell = a * np.array([(cell[0], 0),
(cell[0] / 2, cell[1])])
if not orthogonal:
cell = np.array([(cell[0], 0, 0),
(cell[0] / 2, cell[1], 0),
(0, 0, cell[2])])
if surface_cell is None:
surface_cell = a * np.diag(cell[:2])
if isinstance(cell, tuple):
cell = np.diag(cell)
slab.set_positions(positions.reshape((-1, 3)))
slab.set_cell([a * v * n for v, n in zip(cell, size)], scale_atoms=True)
if vacuum is not None:
slab.center(vacuum=vacuum, axis=2)
slab.adsorbate_info['cell'] = surface_cell
slab.adsorbate_info['sites'] = sites
return slab
def fcc211(symbol, size, a=None, vacuum=None, orthogonal=True):
"""FCC(211) surface.
Does not currently support special adsorption sites.
Currently only implemented for *orthogonal=True* with size specified
as (i, j, k), where i, j, and k are number of atoms in each direction.
i must be divisible by 3 to accomodate the step width.
"""
if not orthogonal:
raise NotImplementedError('Only implemented for orthogonal '
'unit cells.')
if size[0] % 3 != 0:
raise NotImplementedError('First dimension of size must be '
'divisible by 3.')
atoms = FaceCenteredCubic(symbol,
directions=[[1, -1, -1],
[0, 2, -2],
[2, 1, 1]],
miller=(None, None, (2, 1, 1)),
latticeconstant=a,
size=(1, 1, 1),
pbc=True)
z = (size[2] + 1) // 2
atoms = atoms.repeat((size[0] // 3, size[1], z))
if size[2] % 2: # Odd: remove bottom layer and shrink cell.
remove_list = [atom.index for atom in atoms
if atom.z < atoms[1].z]
del atoms[remove_list]
dz = atoms[0].z
atoms.translate((0., 0., -dz))
atoms.cell[2][2] -= dz
atoms.center(vacuum=vacuum, axis=2)
# Renumber systematically from top down.
orders = [(atom.index, round(atom.x, 3), round(atom.y, 3),
-round(atom.z, 3), atom.index) for atom in atoms]
orders.sort(key=itemgetter(3, 1, 2))
newatoms = atoms.copy()
for index, order in enumerate(orders):
newatoms[index].position = atoms[order[0]].position.copy()
return newatoms
|
askhl/ase
|
ase/lattice/surface.py
|
Python
|
gpl-2.0
| 15,129
|
[
"ASE"
] |
35cc9b8773b57468af73c59dd48bd48ed9dd0ed6c850dd00d45c15739b10b934
|
# pylint: disable=arguments-differ
""" Models for the shopping cart and assorted purchase types """
import csv
import json
import logging
import smtplib
import StringIO
from collections import namedtuple
from datetime import datetime, timedelta
from decimal import Decimal
from io import BytesIO
import analytics
import pytz
from boto.exception import BotoServerError # this is a super-class of SESError and catches connection errors
from config_models.models import ConfigurationModel
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import send_mail
from django.core.mail.message import EmailMessage
from django.urls import reverse
from django.db import models, transaction
from django.db.models import Count, F, Q, Sum
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from model_utils.managers import InheritanceManager
from model_utils.models import TimeStampedModel
from opaque_keys.edx.django.models import CourseKeyField
from six import text_type
from course_modes.models import CourseMode
from courseware.courses import get_course_by_id
from edxmako.shortcuts import render_to_string
from eventtracking import tracker
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from shoppingcart.pdf import PDFInvoice
from student.models import CourseEnrollment, EnrollStatusChange
from student.signals import UNENROLL_DONE
from util.query import use_read_replica_if_available
from xmodule.modulestore.django import modulestore
from .exceptions import (
AlreadyEnrolledInCourseException,
CourseDoesNotExistException,
InvalidCartItem,
InvalidStatusToRetire,
ItemAlreadyInCartException,
ItemNotFoundInCartException,
MultipleCouponsNotAllowedException,
PurchasedCallbackException,
UnexpectedOrderItemStatus
)
log = logging.getLogger("shoppingcart")
ORDER_STATUSES = (
# The user is selecting what he/she wants to purchase.
('cart', 'cart'),
# The user has been sent to the external payment processor.
# At this point, the order should NOT be modified.
# If the user returns to the payment flow, he/she will start a new order.
('paying', 'paying'),
# The user has successfully purchased the items in the order.
('purchased', 'purchased'),
# The user's order has been refunded.
('refunded', 'refunded'),
# The user's order went through, but the order was erroneously left
# in 'cart'.
('defunct-cart', 'defunct-cart'),
# The user's order went through, but the order was erroneously left
# in 'paying'.
('defunct-paying', 'defunct-paying'),
)
# maps order statuses to their defunct states
ORDER_STATUS_MAP = {
'cart': 'defunct-cart',
'paying': 'defunct-paying',
}
# we need a tuple to represent the primary key of various OrderItem subclasses
OrderItemSubclassPK = namedtuple('OrderItemSubclassPK', ['cls', 'pk'])
class OrderTypes(object):
"""
This class specify purchase OrderTypes.
"""
PERSONAL = 'personal'
BUSINESS = 'business'
ORDER_TYPES = (
(PERSONAL, 'personal'),
(BUSINESS, 'business'),
)
class Order(models.Model):
"""
This is the model for an order. Before purchase, an Order and its related OrderItems are used
as the shopping cart.
FOR ANY USER, THERE SHOULD ONLY EVER BE ZERO OR ONE ORDER WITH STATUS='cart'.
"""
class Meta(object):
app_label = "shoppingcart"
user = models.ForeignKey(User, db_index=True, on_delete=models.CASCADE)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES)
purchase_time = models.DateTimeField(null=True, blank=True)
refunded_time = models.DateTimeField(null=True, blank=True)
# Now we store data needed to generate a reasonable receipt
# These fields only make sense after the purchase
bill_to_first = models.CharField(max_length=64, blank=True)
bill_to_last = models.CharField(max_length=64, blank=True)
bill_to_street1 = models.CharField(max_length=128, blank=True)
bill_to_street2 = models.CharField(max_length=128, blank=True)
bill_to_city = models.CharField(max_length=64, blank=True)
bill_to_state = models.CharField(max_length=8, blank=True)
bill_to_postalcode = models.CharField(max_length=16, blank=True)
bill_to_country = models.CharField(max_length=64, blank=True)
bill_to_ccnum = models.CharField(max_length=8, blank=True) # last 4 digits
bill_to_cardtype = models.CharField(max_length=32, blank=True)
# a JSON dump of the CC processor response, for completeness
processor_reply_dump = models.TextField(blank=True)
# bulk purchase registration code workflow billing details
company_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_email = models.CharField(max_length=255, null=True, blank=True)
recipient_name = models.CharField(max_length=255, null=True, blank=True)
recipient_email = models.CharField(max_length=255, null=True, blank=True)
customer_reference_number = models.CharField(max_length=63, null=True, blank=True)
order_type = models.CharField(max_length=32, default='personal', choices=OrderTypes.ORDER_TYPES)
@classmethod
def get_cart_for_user(cls, user):
"""
Always use this to preserve the property that at most 1 order per user has status = 'cart'
"""
# find the newest element in the db
try:
cart_order = cls.objects.filter(user=user, status='cart').order_by('-id')[:1].get()
except ObjectDoesNotExist:
# if nothing exists in the database, create a new cart
cart_order, _created = cls.objects.get_or_create(user=user, status='cart')
return cart_order
@classmethod
def does_user_have_cart(cls, user):
"""
Returns a boolean whether a shopping cart (Order) exists for the specified user
"""
return cls.objects.filter(user=user, status='cart').exists()
@classmethod
def user_cart_has_items(cls, user, item_types=None):
"""
Returns true if the user (anonymous user ok) has
a cart with items in it. (Which means it should be displayed.
If a item_type is passed in, then we check to see if the cart has at least one of
those types of OrderItems
"""
if not user.is_authenticated:
return False
cart = cls.get_cart_for_user(user)
if not item_types:
# check to see if the cart has at least some item in it
return cart.has_items()
else:
# if the caller is explicitly asking to check for particular types
for item_type in item_types:
if cart.has_items(item_type):
return True
return False
@classmethod
def remove_cart_item_from_order(cls, item, user):
"""
Removes the item from the cart if the item.order.status == 'cart'.
Also removes any code redemption associated with the order_item
"""
if item.order.status == 'cart':
log.info("order item %s removed for user %s", str(item.id), user)
item.delete()
# remove any redemption entry associated with the item
CouponRedemption.remove_code_redemption_from_item(item, user)
@property
def total_cost(self):
"""
Return the total cost of the cart. If the order has been purchased, returns total of
all purchased and not refunded items.
"""
return sum(i.line_cost for i in self.orderitem_set.filter(status=self.status))
def has_items(self, item_type=None):
"""
Does the cart have any items in it?
If an item_type is passed in then we check to see if there are any items of that class type
"""
if not item_type:
return self.orderitem_set.exists()
else:
items = self.orderitem_set.all().select_subclasses()
for item in items:
if isinstance(item, item_type):
return True
return False
def reset_cart_items_prices(self):
"""
Reset the items price state in the user cart
"""
for item in self.orderitem_set.all():
if item.is_discounted:
item.unit_cost = item.list_price
item.save()
def clear(self):
"""
Clear out all the items in the cart
"""
self.orderitem_set.all().delete()
@transaction.atomic
def start_purchase(self):
"""
Start the purchase process. This will set the order status to "paying",
at which point it should no longer be modified.
Future calls to `Order.get_cart_for_user()` will filter out orders with
status "paying", effectively creating a new (empty) cart.
"""
if self.status == 'cart':
self.status = 'paying'
self.save()
for item in OrderItem.objects.filter(order=self).select_subclasses():
item.start_purchase()
def update_order_type(self):
"""
updating order type. This method wil inspect the quantity associated with the OrderItem.
In the application, it is implied that when qty > 1, then the user is to purchase
'RegistrationCodes' which are randomly generated strings that users can distribute to
others in order for them to enroll in paywalled courses.
The UI/UX may change in the future to make the switching between PaidCourseRegistration
and CourseRegCodeItems a more explicit UI gesture from the purchaser
"""
cart_items = self.orderitem_set.all()
is_order_type_business = False
for cart_item in cart_items:
if cart_item.qty > 1:
is_order_type_business = True
items_to_delete = []
old_to_new_id_map = []
if is_order_type_business:
for cart_item in cart_items:
if hasattr(cart_item, 'paidcourseregistration'):
course_reg_code_item = CourseRegCodeItem.add_to_order(
self, cart_item.paidcourseregistration.course_id, cart_item.qty,
)
# update the discounted prices if coupon redemption applied
course_reg_code_item.list_price = cart_item.list_price
course_reg_code_item.unit_cost = cart_item.unit_cost
course_reg_code_item.save()
items_to_delete.append(cart_item)
old_to_new_id_map.append({"oldId": cart_item.id, "newId": course_reg_code_item.id})
else:
for cart_item in cart_items:
if hasattr(cart_item, 'courseregcodeitem'):
paid_course_registration = PaidCourseRegistration.add_to_order(
self, cart_item.courseregcodeitem.course_id,
)
# update the discounted prices if coupon redemption applied
paid_course_registration.list_price = cart_item.list_price
paid_course_registration.unit_cost = cart_item.unit_cost
paid_course_registration.save()
items_to_delete.append(cart_item)
old_to_new_id_map.append({"oldId": cart_item.id, "newId": paid_course_registration.id})
for item in items_to_delete:
item.delete()
self.order_type = OrderTypes.BUSINESS if is_order_type_business else OrderTypes.PERSONAL
self.save()
return old_to_new_id_map
def generate_pdf_receipt(self, order_items):
"""
Generates the pdf receipt for the given order_items
and returns the pdf_buffer.
"""
items_data = []
for item in order_items:
item_total = item.qty * item.unit_cost
items_data.append({
'item_description': item.pdf_receipt_display_name,
'quantity': item.qty,
'list_price': item.get_list_price(),
'discount': item.get_list_price() - item.unit_cost,
'item_total': item_total
})
pdf_buffer = BytesIO()
PDFInvoice(
items_data=items_data,
item_id=str(self.id),
date=self.purchase_time,
is_invoice=False,
total_cost=self.total_cost,
payment_received=self.total_cost,
balance=0
).generate_pdf(pdf_buffer)
return pdf_buffer
def generate_registration_codes_csv(self, orderitems, site_name):
"""
this function generates the csv file
"""
course_names = []
csv_file = StringIO.StringIO()
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['Course Name', 'Registration Code', 'URL'])
for item in orderitems:
course_id = item.course_id
course = get_course_by_id(item.course_id, depth=0)
registration_codes = CourseRegistrationCode.objects.filter(course_id=course_id, order=self)
course_names.append(course.display_name)
for registration_code in registration_codes:
redemption_url = reverse('register_code_redemption', args=[registration_code.code])
url = '{base_url}{redemption_url}'.format(base_url=site_name, redemption_url=redemption_url)
csv_writer.writerow([unicode(course.display_name).encode("utf-8"), registration_code.code, url])
return csv_file, course_names
def send_confirmation_emails(self, orderitems, is_order_type_business, csv_file, pdf_file, site_name, course_names):
"""
send confirmation e-mail
"""
recipient_list = [(self.user.username, self.user.email, 'user')]
if self.company_contact_email:
recipient_list.append((self.company_contact_name, self.company_contact_email, 'company_contact'))
joined_course_names = ""
if self.recipient_email:
recipient_list.append((self.recipient_name, self.recipient_email, 'email_recipient'))
joined_course_names = " " + ", ".join(course_names)
if not is_order_type_business:
subject = _("Order Payment Confirmation")
else:
subject = _('Confirmation and Registration Codes for the following courses: {course_name_list}').format(
course_name_list=joined_course_names
)
dashboard_url = '{base_url}{dashboard}'.format(
base_url=site_name,
dashboard=reverse('dashboard')
)
try:
from_address = configuration_helpers.get_value(
'email_from_address',
settings.PAYMENT_SUPPORT_EMAIL
)
# Send a unique email for each recipient. Don't put all email addresses in a single email.
for recipient in recipient_list:
message = render_to_string(
'emails/business_order_confirmation_email.txt' if is_order_type_business else 'emails/order_confirmation_email.txt',
{
'order': self,
'recipient_name': recipient[0],
'recipient_type': recipient[2],
'site_name': site_name,
'order_items': orderitems,
'course_names': ", ".join(course_names),
'dashboard_url': dashboard_url,
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'order_placed_by': '{username} ({email})'.format(
username=self.user.username, email=self.user.email
),
'has_billing_info': settings.FEATURES['STORE_BILLING_INFO'],
'platform_name': configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME),
'payment_support_email': configuration_helpers.get_value(
'payment_support_email', settings.PAYMENT_SUPPORT_EMAIL,
),
'payment_email_signature': configuration_helpers.get_value('payment_email_signature'),
}
)
email = EmailMessage(
subject=subject,
body=message,
from_email=from_address,
to=[recipient[1]]
)
# Only the business order is HTML formatted. A single seat order confirmation is plain text.
if is_order_type_business:
email.content_subtype = "html"
if csv_file:
email.attach(u'RegistrationCodesRedemptionUrls.csv', csv_file.getvalue(), 'text/csv')
if pdf_file is not None:
email.attach(u'ReceiptOrder{}.pdf'.format(str(self.id)), pdf_file.getvalue(), 'application/pdf')
else:
file_buffer = StringIO.StringIO(_('pdf download unavailable right now, please contact support.'))
email.attach(u'pdf_not_available.txt', file_buffer.getvalue(), 'text/plain')
email.send()
except (smtplib.SMTPException, BotoServerError): # sadly need to handle diff. mail backends individually
log.error('Failed sending confirmation e-mail for order %d', self.id)
def purchase(self, first='', last='', street1='', street2='', city='', state='', postalcode='',
country='', ccnum='', cardtype='', processor_reply_dump=''):
"""
Call to mark this order as purchased. Iterates through its OrderItems and calls
their purchased_callback
`first` - first name of person billed (e.g. John)
`last` - last name of person billed (e.g. Smith)
`street1` - first line of a street address of the billing address (e.g. 11 Cambridge Center)
`street2` - second line of a street address of the billing address (e.g. Suite 101)
`city` - city of the billing address (e.g. Cambridge)
`state` - code of the state, province, or territory of the billing address (e.g. MA)
`postalcode` - postal code of the billing address (e.g. 02142)
`country` - country code of the billing address (e.g. US)
`ccnum` - last 4 digits of the credit card number of the credit card billed (e.g. 1111)
`cardtype` - 3-digit code representing the card type used (e.g. 001)
`processor_reply_dump` - all the parameters returned by the processor
"""
if self.status == 'purchased':
log.error(
u"`purchase` method called on order {}, but order is already purchased.".format(self.id)
)
return
self.status = 'purchased'
self.purchase_time = datetime.now(pytz.utc)
self.bill_to_first = first
self.bill_to_last = last
self.bill_to_city = city
self.bill_to_state = state
self.bill_to_country = country
self.bill_to_postalcode = postalcode
if settings.FEATURES['STORE_BILLING_INFO']:
self.bill_to_street1 = street1
self.bill_to_street2 = street2
self.bill_to_ccnum = ccnum
self.bill_to_cardtype = cardtype
self.processor_reply_dump = processor_reply_dump
# save these changes on the order, then we can tell when we are in an
# inconsistent state
self.save()
# this should return all of the objects with the correct types of the
# subclasses
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
site_name = configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME)
if self.order_type == OrderTypes.BUSINESS:
self.update_order_type()
for item in orderitems:
item.purchase_item()
csv_file = None
course_names = []
if self.order_type == OrderTypes.BUSINESS:
#
# Generate the CSV file that contains all of the RegistrationCodes that have already been
# generated when the purchase has transacted
#
csv_file, course_names = self.generate_registration_codes_csv(orderitems, site_name)
try:
pdf_file = self.generate_pdf_receipt(orderitems)
except Exception: # pylint: disable=broad-except
log.exception('Exception at creating pdf file.')
pdf_file = None
try:
self.send_confirmation_emails(
orderitems, self.order_type == OrderTypes.BUSINESS,
csv_file, pdf_file, site_name, course_names
)
except Exception: # pylint: disable=broad-except
# Catch all exceptions here, since the Django view implicitly
# wraps this in a transaction. If the order completes successfully,
# we don't want to roll back just because we couldn't send
# the confirmation email.
log.exception('Error occurred while sending payment confirmation email')
self._emit_order_event('Completed Order', orderitems)
def refund(self):
"""
Refund the given order. As of right now, this just marks the order as refunded.
"""
self.status = 'refunded'
self.save()
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
self._emit_order_event('Refunded Order', orderitems)
def _emit_order_event(self, event_name, orderitems):
"""
Emit an analytics event with the given name for this Order. Will iterate over all associated
OrderItems and add them as products in the event as well.
"""
try:
if settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(self.user.id, event_name, {
'orderId': self.id,
'total': str(self.total_cost),
'currency': self.currency,
'products': [item.analytics_data() for item in orderitems]
}, context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
})
except Exception: # pylint: disable=broad-except
# Capturing all exceptions thrown while tracking analytics events. We do not want
# an operation to fail because of an analytics event, so we will capture these
# errors in the logs.
log.exception(
u'Unable to emit {event} event for user {user} and order {order}'.format(
event=event_name, user=self.user.id, order=self.id)
)
def add_billing_details(self, company_name='', company_contact_name='', company_contact_email='', recipient_name='',
recipient_email='', customer_reference_number=''):
"""
This function is called after the user selects a purchase type of "Business" and
is asked to enter the optional billing details. The billing details are updated
for that order.
company_name - Name of purchasing organization
company_contact_name - Name of the key contact at the company the sale was made to
company_contact_email - Email of the key contact at the company the sale was made to
recipient_name - Name of the company should the invoice be sent to
recipient_email - Email of the company should the invoice be sent to
customer_reference_number - purchase order number of the organization associated with this Order
"""
self.company_name = company_name
self.company_contact_name = company_contact_name
self.company_contact_email = company_contact_email
self.recipient_name = recipient_name
self.recipient_email = recipient_email
self.customer_reference_number = customer_reference_number
self.save()
def generate_receipt_instructions(self):
"""
Call to generate specific instructions for each item in the order. This gets displayed on the receipt
page, typically. Instructions are something like "visit your dashboard to see your new courses".
This will return two things in a pair. The first will be a dict with keys=OrderItemSubclassPK corresponding
to an OrderItem and values=a set of html instructions they generate. The second will be a set of de-duped
html instructions
"""
instruction_set = set([]) # heh. not ia32 or alpha or sparc
instruction_dict = {}
order_items = OrderItem.objects.filter(order=self).select_subclasses()
for item in order_items:
item_pk_with_subclass, set_of_html = item.generate_receipt_instructions()
instruction_dict[item_pk_with_subclass] = set_of_html
instruction_set.update(set_of_html)
return instruction_dict, instruction_set
def retire(self):
"""
Method to "retire" orders that have gone through to the payment service
but have (erroneously) not had their statuses updated.
This method only works on orders that satisfy the following conditions:
1) the order status is either "cart" or "paying" (otherwise we raise
an InvalidStatusToRetire error)
2) the order's order item's statuses match the order's status (otherwise
we throw an UnexpectedOrderItemStatus error)
"""
# if an order is already retired, no-op:
if self.status in ORDER_STATUS_MAP.values():
return
if self.status not in ORDER_STATUS_MAP.keys():
raise InvalidStatusToRetire(
"order status {order_status} is not 'paying' or 'cart'".format(
order_status=self.status
)
)
for item in self.orderitem_set.all():
if item.status != self.status:
raise UnexpectedOrderItemStatus(
"order_item status is different from order status"
)
self.status = ORDER_STATUS_MAP[self.status]
self.save()
for item in self.orderitem_set.all():
item.retire()
def find_item_by_course_id(self, course_id):
"""
course_id: Course id of the item to find
Returns OrderItem from the Order given a course_id
Raises exception ItemNotFoundException when the item
having the given course_id is not present in the cart
"""
cart_items = OrderItem.objects.filter(order=self).select_subclasses()
found_items = []
for item in cart_items:
if getattr(item, 'course_id', None):
if item.course_id == course_id:
found_items.append(item)
if not found_items:
raise ItemNotFoundInCartException
return found_items
class OrderItem(TimeStampedModel):
"""
This is the basic interface for order items.
Order items are line items that fill up the shopping carts and orders.
Each implementation of OrderItem should provide its own purchased_callback as
a method.
"""
class Meta(object):
app_label = "shoppingcart"
base_manager_name = 'objects'
objects = InheritanceManager()
order = models.ForeignKey(Order, db_index=True, on_delete=models.CASCADE)
# this is denormalized, but convenient for SQL queries for reports, etc. user should always be = order.user
user = models.ForeignKey(User, db_index=True, on_delete=models.CASCADE)
# this is denormalized, but convenient for SQL queries for reports, etc. status should always be = order.status
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES, db_index=True)
qty = models.IntegerField(default=1)
unit_cost = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
list_price = models.DecimalField(decimal_places=2, max_digits=30, null=True)
line_desc = models.CharField(default="Misc. Item", max_length=1024)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
fulfilled_time = models.DateTimeField(null=True, db_index=True)
refund_requested_time = models.DateTimeField(null=True, db_index=True)
service_fee = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
# general purpose field, not user-visible. Used for reporting
report_comments = models.TextField(default="")
@property
def line_cost(self):
""" Return the total cost of this OrderItem """
return self.qty * self.unit_cost
@line_cost.setter
def line_cost(self, value):
"""
Django requires there be a setter for this, but it is not
necessary for the way we currently use it. Raising errors
here will cause a lot of issues and these should not be
mutable after construction, so for now we just eat this.
"""
pass
@classmethod
def add_to_order(cls, order, *args, **kwargs):
"""
A suggested convenience function for subclasses.
NOTE: This does not add anything to the cart. That is left up to the
subclasses to implement for themselves
"""
# this is a validation step to verify that the currency of the item we
# are adding is the same as the currency of the order we are adding it
# to
currency = kwargs.get('currency', 'usd')
if order.currency != currency and order.orderitem_set.exists():
raise InvalidCartItem(_("Trying to add a different currency into the cart"))
@transaction.atomic
def purchase_item(self):
"""
This is basically a wrapper around purchased_callback that handles
modifying the OrderItem itself
"""
self.purchased_callback()
self.status = 'purchased'
self.fulfilled_time = datetime.now(pytz.utc)
self.save()
def start_purchase(self):
"""
Start the purchase process. This will set the order item status to "paying",
at which point it should no longer be modified.
"""
self.status = 'paying'
self.save()
def purchased_callback(self):
"""
This is called on each inventory item in the shopping cart when the
purchase goes through.
"""
raise NotImplementedError
def generate_receipt_instructions(self):
"""
This is called on each item in a purchased order to generate receipt instructions.
This should return a list of `ReceiptInstruction`s in HTML string
Default implementation is to return an empty set
"""
return self.pk_with_subclass, set([])
@property
def pk_with_subclass(self):
"""
Returns a named tuple that annotates the pk of this instance with its class, to fully represent
a pk of a subclass (inclusive) of OrderItem
"""
return OrderItemSubclassPK(type(self), self.pk)
@property
def is_discounted(self):
"""
Returns True if the item a discount coupon has been applied to the OrderItem and False otherwise.
Earlier, the OrderItems were stored with an empty list_price if a discount had not been applied.
Now we consider the item to be non discounted if list_price is None or list_price == unit_cost. In
these lines, an item is discounted if it's non-None and list_price and unit_cost mismatch.
This should work with both new and old records.
"""
return self.list_price and self.list_price != self.unit_cost
def get_list_price(self):
"""
Returns the unit_cost if no discount has been applied, or the list_price if it is defined.
"""
return self.list_price if self.list_price else self.unit_cost
@property
def single_item_receipt_template(self):
"""
The template that should be used when there's only one item in the order
"""
return 'shoppingcart/receipt.html'
@property
def single_item_receipt_context(self):
"""
Extra variables needed to render the template specified in
`single_item_receipt_template`
"""
return {}
def additional_instruction_text(self, **kwargs): # pylint: disable=unused-argument
"""
Individual instructions for this order item.
Currently, only used for emails.
"""
return ''
@property
def pdf_receipt_display_name(self):
"""
How to display this item on a PDF printed receipt file.
This can be overridden by the subclasses of OrderItem
"""
course_key = getattr(self, 'course_id', None)
if course_key:
course = get_course_by_id(course_key, depth=0)
return course.display_name
else:
raise Exception(
"Not Implemented. OrderItems that are not Course specific should have"
" a overridden pdf_receipt_display_name property"
)
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
The default implementation returns defaults for most attributes. When no name or
category is specified by the implementation, the string 'N/A' is placed for the
name and category. This should be handled appropriately by all implementations.
Returns
A dictionary containing analytics data for this OrderItem.
"""
return {
'id': self.id,
'sku': type(self).__name__,
'name': 'N/A',
'price': str(self.unit_cost),
'quantity': self.qty,
'category': 'N/A',
}
def retire(self):
"""
Called by the `retire` method defined in the `Order` class. Retires
an order item if its (and its order's) status was erroneously not
updated to "purchased" after the order was processed.
"""
self.status = ORDER_STATUS_MAP[self.status]
self.save()
class Invoice(TimeStampedModel):
"""
This table capture all the information needed to support "invoicing"
which is when a user wants to purchase Registration Codes,
but will not do so via a Credit Card transaction.
"""
class Meta(object):
app_label = "shoppingcart"
company_name = models.CharField(max_length=255, db_index=True)
company_contact_name = models.CharField(max_length=255)
company_contact_email = models.CharField(max_length=255)
recipient_name = models.CharField(max_length=255)
recipient_email = models.CharField(max_length=255)
address_line_1 = models.CharField(max_length=255)
address_line_2 = models.CharField(max_length=255, null=True, blank=True)
address_line_3 = models.CharField(max_length=255, null=True, blank=True)
city = models.CharField(max_length=255, null=True)
state = models.CharField(max_length=255, null=True)
zip = models.CharField(max_length=15, null=True)
country = models.CharField(max_length=64, null=True)
# This field has been deprecated.
# The total amount can now be calculated as the sum
# of each invoice item associated with the invoice.
# For backwards compatibility, this field is maintained
# and written to during invoice creation.
total_amount = models.FloatField()
# This field has been deprecated in order to support
# invoices for items that are not course-related.
# Although this field is still maintained for backwards
# compatibility, you should use CourseRegistrationCodeInvoiceItem
# to look up the course ID for purchased redeem codes.
course_id = CourseKeyField(max_length=255, db_index=True)
internal_reference = models.CharField(
max_length=255,
null=True,
blank=True,
help_text=ugettext_lazy("Internal reference code for this invoice.")
)
customer_reference_number = models.CharField(
max_length=63,
null=True,
blank=True,
help_text=ugettext_lazy("Customer's reference code for this invoice.")
)
is_valid = models.BooleanField(default=True)
@classmethod
def get_invoice_total_amount_for_course(cls, course_key):
"""
returns the invoice total amount generated by course.
"""
result = cls.objects.filter(course_id=course_key, is_valid=True).aggregate(total=Sum('total_amount'))
total = result.get('total', 0)
return total if total else 0
def generate_pdf_invoice(self, course, course_price, quantity, sale_price):
"""
Generates the pdf invoice for the given course
and returns the pdf_buffer.
"""
discount_per_item = float(course_price) - sale_price / quantity
list_price = course_price - discount_per_item
items_data = [{
'item_description': course.display_name,
'quantity': quantity,
'list_price': list_price,
'discount': discount_per_item,
'item_total': quantity * list_price
}]
pdf_buffer = BytesIO()
PDFInvoice(
items_data=items_data,
item_id=str(self.id),
date=datetime.now(pytz.utc),
is_invoice=True,
total_cost=float(self.total_amount),
payment_received=0,
balance=float(self.total_amount)
).generate_pdf(pdf_buffer)
return pdf_buffer
def snapshot(self):
"""Create a snapshot of the invoice.
A snapshot is a JSON-serializable representation
of the invoice's state, including its line items
and associated transactions (payments/refunds).
This is useful for saving the history of changes
to the invoice.
Returns:
dict
"""
return {
'internal_reference': self.internal_reference,
'customer_reference': self.customer_reference_number,
'is_valid': self.is_valid,
'contact_info': {
'company_name': self.company_name,
'company_contact_name': self.company_contact_name,
'company_contact_email': self.company_contact_email,
'recipient_name': self.recipient_name,
'recipient_email': self.recipient_email,
'address_line_1': self.address_line_1,
'address_line_2': self.address_line_2,
'address_line_3': self.address_line_3,
'city': self.city,
'state': self.state,
'zip': self.zip,
'country': self.country,
},
'items': [
item.snapshot()
for item in InvoiceItem.objects.filter(invoice=self).select_subclasses()
],
'transactions': [
trans.snapshot()
for trans in InvoiceTransaction.objects.filter(invoice=self)
],
}
def __unicode__(self):
label = (
unicode(self.internal_reference)
if self.internal_reference
else u"No label"
)
created = (
self.created.strftime("%Y-%m-%d")
if self.created
else u"No date"
)
return u"{label} ({date_created})".format(
label=label, date_created=created
)
INVOICE_TRANSACTION_STATUSES = (
# A payment/refund is in process, but money has not yet been transferred
('started', 'started'),
# A payment/refund has completed successfully
# This should be set ONLY once money has been successfully exchanged.
('completed', 'completed'),
# A payment/refund was promised, but was cancelled before
# money had been transferred. An example would be
# cancelling a refund check before the recipient has
# a chance to deposit it.
('cancelled', 'cancelled')
)
class InvoiceTransaction(TimeStampedModel):
"""Record payment and refund information for invoices.
There are two expected use cases:
1) We send an invoice to someone, and they send us a check.
We then manually create an invoice transaction to represent
the payment.
2) We send an invoice to someone, and they pay us. Later, we
need to issue a refund for the payment. We manually
create a transaction with a negative amount to represent
the refund.
"""
class Meta(object):
app_label = "shoppingcart"
invoice = models.ForeignKey(Invoice, on_delete=models.CASCADE)
amount = models.DecimalField(
default=0.0, decimal_places=2, max_digits=30,
help_text=ugettext_lazy(
"The amount of the transaction. Use positive amounts for payments"
" and negative amounts for refunds."
)
)
currency = models.CharField(
default="usd",
max_length=8,
help_text=ugettext_lazy("Lower-case ISO currency codes")
)
comments = models.TextField(
null=True,
blank=True,
help_text=ugettext_lazy("Optional: provide additional information for this transaction")
)
status = models.CharField(
max_length=32,
default='started',
choices=INVOICE_TRANSACTION_STATUSES,
help_text=ugettext_lazy(
"The status of the payment or refund. "
"'started' means that payment is expected, but money has not yet been transferred. "
"'completed' means that the payment or refund was received. "
"'cancelled' means that payment or refund was expected, but was cancelled before money was transferred. "
)
)
created_by = models.ForeignKey(User, on_delete=models.CASCADE)
last_modified_by = models.ForeignKey(User, related_name='last_modified_by_user', on_delete=models.CASCADE)
@classmethod
def get_invoice_transaction(cls, invoice_id):
"""
if found Returns the Invoice Transaction object for the given invoice_id
else returns None
"""
try:
return cls.objects.get(Q(invoice_id=invoice_id), Q(status='completed') | Q(status='refunded'))
except InvoiceTransaction.DoesNotExist:
return None
@classmethod
def get_total_amount_of_paid_course_invoices(cls, course_key):
"""
returns the total amount of the paid invoices.
"""
result = cls.objects.filter(amount__gt=0, invoice__course_id=course_key, status='completed').aggregate(
total=Sum(
'amount',
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
total = result.get('total', 0)
return total if total else 0
def snapshot(self):
"""Create a snapshot of the invoice transaction.
The returned dictionary is JSON-serializable.
Returns:
dict
"""
return {
'amount': unicode(self.amount),
'currency': self.currency,
'comments': self.comments,
'status': self.status,
'created_by': self.created_by.username,
'last_modified_by': self.last_modified_by.username
}
class InvoiceItem(TimeStampedModel):
"""
This is the basic interface for invoice items.
Each invoice item represents a "line" in the invoice.
For example, in an invoice for course registration codes,
there might be an invoice item representing 10 registration
codes for the DemoX course.
"""
class Meta(object):
app_label = "shoppingcart"
base_manager_name = 'objects'
objects = InheritanceManager()
invoice = models.ForeignKey(Invoice, db_index=True, on_delete=models.CASCADE)
qty = models.IntegerField(
default=1,
help_text=ugettext_lazy("The number of items sold.")
)
unit_price = models.DecimalField(
default=0.0,
decimal_places=2,
max_digits=30,
help_text=ugettext_lazy("The price per item sold, including discounts.")
)
currency = models.CharField(
default="usd",
max_length=8,
help_text=ugettext_lazy("Lower-case ISO currency codes")
)
def snapshot(self):
"""Create a snapshot of the invoice item.
The returned dictionary is JSON-serializable.
Returns:
dict
"""
return {
'qty': self.qty,
'unit_price': unicode(self.unit_price),
'currency': self.currency
}
class CourseRegistrationCodeInvoiceItem(InvoiceItem):
"""
This is an invoice item that represents a payment for
a course registration.
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
def snapshot(self):
"""Create a snapshot of the invoice item.
This is the same as a snapshot for other invoice items,
with the addition of a `course_id` field.
Returns:
dict
"""
snapshot = super(CourseRegistrationCodeInvoiceItem, self).snapshot()
snapshot['course_id'] = unicode(self.course_id)
return snapshot
class InvoiceHistory(models.Model):
"""History of changes to invoices.
This table stores snapshots of invoice state,
including the associated line items and transactions
(payments/refunds).
Entries in the table are created, but never deleted
or modified.
We use Django signals to save history entries on change
events. These signals are fired within a database
transaction, so the history record is created only
if the invoice change is successfully persisted.
"""
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
invoice = models.ForeignKey(Invoice, on_delete=models.CASCADE)
# JSON-serialized representation of the current state
# of the invoice, including its line items and
# transactions (payments/refunds).
snapshot = models.TextField(blank=True)
@classmethod
def save_invoice_snapshot(cls, invoice):
"""Save a snapshot of the invoice's current state.
Arguments:
invoice (Invoice): The invoice to save.
"""
cls.objects.create(
invoice=invoice,
snapshot=json.dumps(invoice.snapshot())
)
@staticmethod
def snapshot_receiver(sender, instance, **kwargs): # pylint: disable=unused-argument
"""Signal receiver that saves a snapshot of an invoice.
Arguments:
sender: Not used, but required by Django signals.
instance (Invoice, InvoiceItem, or InvoiceTransaction)
"""
if isinstance(instance, Invoice):
InvoiceHistory.save_invoice_snapshot(instance)
elif hasattr(instance, 'invoice'):
InvoiceHistory.save_invoice_snapshot(instance.invoice)
class Meta(object):
get_latest_by = "timestamp"
app_label = "shoppingcart"
# Hook up Django signals to record changes in the history table.
# We record any change to an invoice, invoice item, or transaction.
# We also record any deletion of a transaction, since users can delete
# transactions via Django admin.
# Note that we need to include *each* InvoiceItem subclass
# here, since Django signals do not fire automatically for subclasses
# of the "sender" class.
post_save.connect(InvoiceHistory.snapshot_receiver, sender=Invoice)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceItem)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=CourseRegistrationCodeInvoiceItem)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction)
post_delete.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction)
class CourseRegistrationCode(models.Model):
"""
This table contains registration codes
With registration code, a user can register for a course for free
"""
class Meta(object):
app_label = "shoppingcart"
code = models.CharField(max_length=32, db_index=True, unique=True)
course_id = CourseKeyField(max_length=255, db_index=True)
created_by = models.ForeignKey(User, related_name='created_by_user', on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
order = models.ForeignKey(Order, db_index=True, null=True, related_name="purchase_order", on_delete=models.CASCADE)
mode_slug = models.CharField(max_length=100, null=True)
is_valid = models.BooleanField(default=True)
# For backwards compatibility, we maintain the FK to "invoice"
# In the future, we will remove this in favor of the FK
# to "invoice_item" (which can be used to look up the invoice).
invoice = models.ForeignKey(Invoice, null=True, on_delete=models.CASCADE)
invoice_item = models.ForeignKey(CourseRegistrationCodeInvoiceItem, null=True, on_delete=models.CASCADE)
@classmethod
def order_generated_registration_codes(cls, course_id):
"""
Returns the registration codes that were generated
via bulk purchase scenario.
"""
return cls.objects.filter(order__isnull=False, course_id=course_id)
@classmethod
def invoice_generated_registration_codes(cls, course_id):
"""
Returns the registration codes that were generated
via invoice.
"""
return cls.objects.filter(invoice__isnull=False, course_id=course_id)
class RegistrationCodeRedemption(models.Model):
"""
This model contains the registration-code redemption info
"""
class Meta(object):
app_label = "shoppingcart"
order = models.ForeignKey(Order, db_index=True, null=True, on_delete=models.CASCADE)
registration_code = models.ForeignKey(CourseRegistrationCode, db_index=True, on_delete=models.CASCADE)
redeemed_by = models.ForeignKey(User, db_index=True, on_delete=models.CASCADE)
redeemed_at = models.DateTimeField(auto_now_add=True, null=True)
course_enrollment = models.ForeignKey(CourseEnrollment, null=True, on_delete=models.CASCADE)
@classmethod
def registration_code_used_for_enrollment(cls, course_enrollment):
"""
Returns RegistrationCodeRedemption object if registration code
has been used during the course enrollment else Returns None.
"""
# theoretically there could be more than one (e.g. someone self-unenrolls
# then re-enrolls with a different regcode)
reg_codes = cls.objects.filter(course_enrollment=course_enrollment).order_by('-redeemed_at')
if reg_codes:
# return the first one. In all normal use cases of registration codes
# the user will only have one
return reg_codes[0]
return None
@classmethod
def is_registration_code_redeemed(cls, course_reg_code):
"""
Checks the existence of the registration code
in the RegistrationCodeRedemption
"""
return cls.objects.filter(registration_code__code=course_reg_code).exists()
@classmethod
def get_registration_code_redemption(cls, code, course_id):
"""
Returns the registration code redemption object if found else returns None.
"""
try:
code_redemption = cls.objects.get(registration_code__code=code, registration_code__course_id=course_id)
except cls.DoesNotExist:
code_redemption = None
return code_redemption
@classmethod
def create_invoice_generated_registration_redemption(cls, course_reg_code, user): # pylint: disable=invalid-name
"""
This function creates a RegistrationCodeRedemption entry in case the registration codes were invoice generated
and thus the order_id is missing.
"""
code_redemption = RegistrationCodeRedemption(registration_code=course_reg_code, redeemed_by=user)
code_redemption.save()
return code_redemption
class Coupon(models.Model):
"""
This table contains coupon codes
A user can get a discount offer on course if provide coupon code
"""
class Meta(object):
app_label = "shoppingcart"
code = models.CharField(max_length=32, db_index=True)
description = models.CharField(max_length=255, null=True, blank=True)
course_id = CourseKeyField(max_length=255)
percentage_discount = models.IntegerField(default=0)
created_by = models.ForeignKey(User, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
is_active = models.BooleanField(default=True)
expiration_date = models.DateTimeField(null=True, blank=True)
def __unicode__(self):
return "[Coupon] code: {} course: {}".format(self.code, self.course_id)
@property
def display_expiry_date(self):
"""
return the coupon expiration date in the readable format
"""
return (self.expiration_date - timedelta(days=1)).strftime("%B %d, %Y") if self.expiration_date else None
class CouponRedemption(models.Model):
"""
This table contain coupon redemption info
"""
class Meta(object):
app_label = "shoppingcart"
order = models.ForeignKey(Order, db_index=True, on_delete=models.CASCADE)
user = models.ForeignKey(User, db_index=True, on_delete=models.CASCADE)
coupon = models.ForeignKey(Coupon, db_index=True, on_delete=models.CASCADE)
@classmethod
def remove_code_redemption_from_item(cls, item, user):
"""
If an item removed from shopping cart then we will remove
the corresponding redemption info of coupon code
"""
order_item_course_id = item.course_id
try:
# Try to remove redemption information of coupon code, If exist.
coupon_redemption = cls.objects.get(
user=user,
coupon__course_id=order_item_course_id if order_item_course_id else CourseKeyField.Empty,
order=item.order_id
)
coupon_redemption.delete()
log.info(
u'Coupon "%s" redemption entry removed for user "%s" for order item "%s"',
coupon_redemption.coupon.code,
user,
str(item.id),
)
except CouponRedemption.DoesNotExist:
log.debug(u'Code redemption does not exist for order item id=%s.', str(item.id))
@classmethod
def remove_coupon_redemption_from_cart(cls, user, cart):
"""
This method delete coupon redemption
"""
coupon_redemption = cls.objects.filter(user=user, order=cart)
if coupon_redemption:
coupon_redemption.delete()
log.info(u'Coupon redemption entry removed for user %s for order %s', user, cart.id)
@classmethod
def get_discount_price(cls, percentage_discount, value):
"""
return discounted price against coupon
"""
discount = Decimal("{0:.2f}".format(Decimal(percentage_discount / 100.00) * value))
return value - discount
@classmethod
def add_coupon_redemption(cls, coupon, order, cart_items):
"""
add coupon info into coupon_redemption model
"""
is_redemption_applied = False
coupon_redemptions = cls.objects.filter(order=order, user=order.user)
for coupon_redemption in coupon_redemptions:
if coupon_redemption.coupon.code != coupon.code or coupon_redemption.coupon.id == coupon.id:
log.exception(
u"Coupon redemption already exist for user '%s' against order id '%s'",
order.user.username,
order.id,
)
raise MultipleCouponsNotAllowedException
for item in cart_items:
if item.course_id:
if item.course_id == coupon.course_id:
coupon_redemption = cls(order=order, user=order.user, coupon=coupon)
coupon_redemption.save()
discount_price = cls.get_discount_price(coupon.percentage_discount, item.unit_cost)
item.list_price = item.unit_cost
item.unit_cost = discount_price
item.save()
log.info(
u"Discount generated for user %s against order id '%s'",
order.user.username,
order.id,
)
is_redemption_applied = True
return is_redemption_applied
return is_redemption_applied
@classmethod
def get_top_discount_codes_used(cls, course_id):
"""
Returns the top discount codes used.
QuerySet = [
{
'coupon__percentage_discount': 22,
'coupon__code': '12',
'coupon__used_count': '2',
},
{
...
}
]
"""
return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).values(
'coupon__code', 'coupon__percentage_discount'
).annotate(coupon__used_count=Count('coupon__code')).order_by('-coupon__used_count')
@classmethod
def get_total_coupon_code_purchases(cls, course_id):
"""
returns total seats purchases using coupon codes
"""
return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).aggregate(Count('coupon'))
class PaidCourseRegistration(OrderItem):
"""
This is an inventory item for paying for a course registration
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)
course_enrollment = models.ForeignKey(CourseEnrollment, null=True, on_delete=models.CASCADE)
@classmethod
def get_self_purchased_seat_count(cls, course_key, status='purchased'):
"""
returns the count of paid_course items filter by course_id and status.
"""
return cls.objects.filter(course_id=course_key, status=status).count()
@classmethod
def get_course_item_for_user_enrollment(cls, user, course_id, course_enrollment):
"""
Returns PaidCourseRegistration object if user has payed for
the course enrollment else Returns None
"""
try:
return cls.objects.filter(course_id=course_id, user=user, course_enrollment=course_enrollment,
status='purchased').latest('id')
except PaidCourseRegistration.DoesNotExist:
return None
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("paidcourseregistration")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key, status='purchased'):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(
total=Sum(
F('qty') * F('unit_cost'),
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, mode_slug=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG,
cost=None, currency=None):
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks:
# actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't.
course = modulestore().get_course(course_id)
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning(
u"User %s tried to add PaidCourseRegistration for course %s, already in cart id %s",
order.user.email,
course_id,
order.id,
)
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_MODE
course_mode = CourseMode.DEFAULT_SHOPPINGCART_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(PaidCourseRegistration, cls).add_to_order(order, course_id, cost, currency=currency)
item, __ = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id)
item.status = order.status
item.mode = course_mode.slug
item.qty = 1
item.unit_cost = cost
item.list_price = cost
item.line_desc = _(u'Registration for Course: {course_name}').format(
course_name=course.display_name_with_default_escaped)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
CourseEnrollment.send_signal_full(EnrollStatusChange.paid_start,
user=order.user, mode=item.mode, course_id=course_id,
cost=cost, currency=currency)
return item
def purchased_callback(self):
"""
When purchased, this should enroll the user in the course. We are assuming that
course settings for enrollment date are configured such that only if the (user.email, course_id) pair is found
in CourseEnrollmentAllowed will the user be allowed to enroll. Otherwise requiring payment
would in fact be quite silly since there's a clear back door.
"""
if not modulestore().has_course(self.course_id):
msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id)
log.error(msg)
raise PurchasedCallbackException(msg)
# enroll in course and link to the enrollment_id
self.course_enrollment = CourseEnrollment.enroll(user=self.user, course_key=self.course_id, mode=self.mode)
self.save()
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost))
self.course_enrollment.send_signal(EnrollStatusChange.paid_complete,
cost=self.line_cost, currency=self.currency)
def generate_receipt_instructions(self):
"""
Generates instructions when the user has purchased a PaidCourseRegistration.
Basically tells the user to visit the dashboard to see their new classes
"""
notification = _(
u"Please visit your {link_start}dashboard{link_end} "
u"to see your new course."
).format(
link_start=u'<a href="{url}">'.format(url=reverse('dashboard')),
link_end=u'</a>',
)
return self.pk_with_subclass, set([notification])
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return PaidCourseRegistrationAnnotation.objects.get(course_id=self.course_id).annotation
except PaidCourseRegistrationAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the Order Item is associated with a course, additional fields will be populated with
course information. If there is a mode associated, the mode data is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(PaidCourseRegistration, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class CourseRegCodeItem(OrderItem):
"""
This is an inventory item for paying for
generating course registration codes
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)
@classmethod
def get_bulk_purchased_seat_count(cls, course_key, status='purchased'):
"""
returns the sum of bulk purchases seats.
"""
total = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(total=Sum('qty'))
if result['total'] is not None:
total = result['total']
return total
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("courseregcodeitem")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key, status='purchased'):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(
total=Sum(
F('qty') * F('unit_cost'),
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, qty, mode_slug=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG,
cost=None, currency=None):
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks:
# actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't.
course = modulestore().get_course(course_id)
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning("User {} tried to add PaidCourseRegistration for course {}, already in cart id {}"
.format(order.user.email, course_id, order.id))
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_SHOPPINGCART_MODE
course_mode = CourseMode.DEFAULT_SHOPPINGCART_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(CourseRegCodeItem, cls).add_to_order(order, course_id, cost, currency=currency)
item, created = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id) # pylint: disable=unused-variable
item.status = order.status
item.mode = course_mode.slug
item.unit_cost = cost
item.list_price = cost
item.qty = qty
item.line_desc = _(u'Enrollment codes for Course: {course_name}').format(
course_name=course.display_name_with_default_escaped)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
return item
def purchased_callback(self):
"""
The purchase is completed, this OrderItem type will generate Registration Codes that will
be redeemed by users
"""
if not modulestore().has_course(self.course_id):
msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id)
log.error(msg)
raise PurchasedCallbackException(msg)
total_registration_codes = int(self.qty)
# we need to import here because of a circular dependency
# we should ultimately refactor code to have save_registration_code in this models.py
# file, but there's also a shared dependency on a random string generator which
# is in another PR (for another feature)
from lms.djangoapps.instructor.views.api import save_registration_code
for i in range(total_registration_codes): # pylint: disable=unused-variable
save_registration_code(self.user, self.course_id, self.mode, order=self.order)
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost))
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return CourseRegCodeItemAnnotation.objects.get(course_id=self.course_id).annotation
except CourseRegCodeItemAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the OrderItem is associated with a course, additional fields will be populated with
course information. If a mode is available, it will be included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CourseRegCodeItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class CourseRegCodeItemAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
return u"{} : {}".format(text_type(self.course_id), self.annotation)
class PaidCourseRegistrationAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
return u"{} : {}".format(text_type(self.course_id), self.annotation)
class CertificateItem(OrderItem):
"""
This is an inventory item for purchasing certificates
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
course_enrollment = models.ForeignKey(CourseEnrollment, on_delete=models.CASCADE)
mode = models.SlugField()
@receiver(UNENROLL_DONE)
def refund_cert_callback(sender, course_enrollment=None, skip_refund=False, **kwargs): # pylint: disable=no-self-argument,unused-argument
"""
When a CourseEnrollment object calls its unenroll method, this function checks to see if that unenrollment
occurred in a verified certificate that was within the refund deadline. If so, it actually performs the
refund.
Returns the refunded certificate on a successful refund; else, it returns nothing.
"""
# Only refund verified cert unenrollments that are within bounds of the expiration date
if skip_refund or (not course_enrollment.refundable()):
return
target_certs = CertificateItem.objects.filter(course_id=course_enrollment.course_id, user_id=course_enrollment.user, status='purchased', mode='verified')
try:
target_cert = target_certs[0]
except IndexError:
log.warning(
u"Matching CertificateItem not found while trying to refund. User %s, Course %s",
course_enrollment.user,
course_enrollment.course_id,
)
return
target_cert.status = 'refunded'
target_cert.refund_requested_time = datetime.now(pytz.utc)
target_cert.save()
target_cert.order.refund()
order_number = target_cert.order_id
# send billing an email so they can handle refunding
subject = _("[Refund] User-Requested Refund")
message = "User {user} ({user_email}) has requested a refund on Order #{order_number}.".format(user=course_enrollment.user,
user_email=course_enrollment.user.email,
order_number=order_number)
to_email = [settings.PAYMENT_SUPPORT_EMAIL]
from_email = configuration_helpers.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
try:
send_mail(subject, message, from_email, to_email, fail_silently=False)
except Exception as exception: # pylint: disable=broad-except
err_str = ('Failed sending email to billing to request a refund for verified certificate'
' (User {user}, Course {course}, CourseEnrollmentID {ce_id}, Order #{order})\n{exception}')
log.error(err_str.format(
user=course_enrollment.user,
course=course_enrollment.course_id,
ce_id=course_enrollment.id,
order=order_number,
exception=exception,
))
return target_cert
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, cost, mode, currency='usd'):
"""
Add a CertificateItem to an order
Returns the CertificateItem object after saving
`order` - an order that this item should be added to, generally the cart order
`course_id` - the course that we would like to purchase as a CertificateItem
`cost` - the amount the user will be paying for this CertificateItem
`mode` - the course mode that this certificate is going to be issued for
This item also creates a new enrollment if none exists for this user and this course.
Example Usage:
cart = Order.get_cart_for_user(user)
CertificateItem.add_to_order(cart, 'edX/Test101/2013_Fall', 30, 'verified')
"""
super(CertificateItem, cls).add_to_order(order, course_id, cost, currency=currency)
course_enrollment = CourseEnrollment.get_or_create_enrollment(order.user, course_id)
# do some validation on the enrollment mode
valid_modes = CourseMode.modes_for_course_dict(course_id)
if mode in valid_modes:
mode_info = valid_modes[mode]
else:
msg = u"Mode {mode} does not exist for {course_id}".format(mode=mode, course_id=course_id)
log.error(msg)
raise InvalidCartItem(
_(u"Mode {mode} does not exist for {course_id}").format(mode=mode, course_id=course_id)
)
item, _created = cls.objects.get_or_create(
order=order,
user=order.user,
course_id=course_id,
course_enrollment=course_enrollment,
mode=mode,
)
item.status = order.status
item.qty = 1
item.unit_cost = cost
item.list_price = cost
course_name = modulestore().get_course(course_id).display_name
# Translators: In this particular case, mode_name refers to a
# particular mode (i.e. Honor Code Certificate, Verified Certificate, etc)
# by which a user could enroll in the given course.
item.line_desc = _("{mode_name} for course {course}").format(
mode_name=mode_info.name,
course=course_name
)
item.currency = currency
order.currency = currency
order.save()
item.save()
# signal course added to cart
course_enrollment.send_signal(EnrollStatusChange.paid_start, cost=cost, currency=currency)
return item
def purchased_callback(self):
"""
When purchase goes through, activate and update the course enrollment for the correct mode
"""
self.course_enrollment.change_mode(self.mode)
self.course_enrollment.activate()
self.course_enrollment.send_signal(EnrollStatusChange.upgrade_complete,
cost=self.unit_cost, currency=self.currency)
def additional_instruction_text(self):
verification_reminder = ""
refund_reminder_msg = _("You can unenroll in the course and receive a full refund for 14 days after the course "
"start date. ")
is_enrollment_mode_verified = self.course_enrollment.is_verified_enrollment()
is_professional_mode_verified = self.course_enrollment.is_professional_enrollment()
if is_enrollment_mode_verified:
domain = configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME)
path = reverse('verify_student_verify_now', kwargs={'course_id': unicode(self.course_id)})
verification_url = "http://{domain}{path}".format(domain=domain, path=path)
verification_reminder = _(
"If you haven't verified your identity yet, please start the verification process ({verification_url})."
).format(verification_url=verification_url)
if is_professional_mode_verified:
refund_reminder_msg = _("You can unenroll in the course and receive a full refund for 2 days after the "
"course start date. ")
refund_reminder = _(
"{refund_reminder_msg}"
"To receive your refund, contact {billing_email}. "
"Please include your order number in your email. "
"Please do NOT include your credit card information."
).format(
refund_reminder_msg=refund_reminder_msg,
billing_email=settings.PAYMENT_SUPPORT_EMAIL
)
# Need this to be unicode in case the reminder strings
# have been translated and contain non-ASCII unicode
return u"{verification_reminder} {refund_reminder}".format(
verification_reminder=verification_reminder,
refund_reminder=refund_reminder
)
@classmethod
def verified_certificates_count(cls, course_id, status):
"""Return a queryset of CertificateItem for every verified enrollment in course_id with the given status."""
return use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status).count())
# TODO combine these three methods into one
@classmethod
def verified_certificates_monetary_field_sum(cls, course_id, status, field_to_aggregate):
"""
Returns a Decimal indicating the total sum of field_to_aggregate for all verified certificates with a particular status.
Sample usages:
- status 'refunded' and field_to_aggregate 'unit_cost' will give the total amount of money refunded for course_id
- status 'purchased' and field_to_aggregate 'service_fees' gives the sum of all service fees for purchased certificates
etc
"""
query = use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status)).aggregate(Sum(field_to_aggregate))[field_to_aggregate + '__sum']
if query is None:
return Decimal(0.00)
else:
return query
@classmethod
def verified_certificates_contributing_more_than_minimum(cls, course_id):
return use_read_replica_if_available(
CertificateItem.objects.filter(
course_id=course_id,
mode='verified',
status='purchased',
unit_cost__gt=(CourseMode.min_course_price_for_verified_for_currency(course_id, 'usd')))).count()
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the CertificateItem is associated with a course, additional fields will be populated with
course information. If there is a mode associated with the certificate, it is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CertificateItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class DonationConfiguration(ConfigurationModel):
"""Configure whether donations are enabled on the site."""
class Meta(ConfigurationModel.Meta):
app_label = "shoppingcart"
class Donation(OrderItem):
"""A donation made by a user.
Donations can be made for a specific course or to the organization as a whole.
Users can choose the donation amount.
"""
class Meta(object):
app_label = "shoppingcart"
# Types of donations
DONATION_TYPES = (
("general", "A general donation"),
("course", "A donation to a particular course")
)
# The type of donation
donation_type = models.CharField(max_length=32, default="general", choices=DONATION_TYPES)
# If a donation is made for a specific course, then store the course ID here.
# If the donation is made to the organization as a whole,
# set this field to CourseKeyField.Empty
course_id = CourseKeyField(max_length=255, db_index=True)
@classmethod
@transaction.atomic
def add_to_order(cls, order, donation_amount, course_id=None, currency='usd'):
"""Add a donation to an order.
Args:
order (Order): The order to add this donation to.
donation_amount (Decimal): The amount the user is donating.
Keyword Args:
course_id (CourseKey): If provided, associate this donation with a particular course.
currency (str): The currency used for the the donation.
Raises:
InvalidCartItem: The provided course ID is not valid.
Returns:
Donation
"""
# This will validate the currency but won't actually add the item to the order.
super(Donation, cls).add_to_order(order, currency=currency)
# Create a line item description, including the name of the course
# if this is a per-course donation.
# This will raise an exception if the course can't be found.
description = cls._line_item_description(course_id=course_id)
params = {
"order": order,
"user": order.user,
"status": order.status,
"qty": 1,
"unit_cost": donation_amount,
"currency": currency,
"line_desc": description
}
if course_id is not None:
params["course_id"] = course_id
params["donation_type"] = "course"
else:
params["donation_type"] = "general"
return cls.objects.create(**params)
def purchased_callback(self):
"""Donations do not need to be fulfilled, so this method does nothing."""
pass
def generate_receipt_instructions(self):
"""Provide information about tax-deductible donations in the receipt.
Returns:
tuple of (Donation, unicode)
"""
return self.pk_with_subclass, set([self._tax_deduction_msg()])
def additional_instruction_text(self, **kwargs):
"""Provide information about tax-deductible donations in the confirmation email.
Returns:
unicode
"""
return self._tax_deduction_msg()
def _tax_deduction_msg(self):
"""Return the translated version of the tax deduction message.
Returns:
unicode
"""
return _(
u"We greatly appreciate this generous contribution and your support of the {platform_name} mission. "
u"This receipt was prepared to support charitable contributions for tax purposes. "
u"We confirm that neither goods nor services were provided in exchange for this gift."
).format(platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME))
@classmethod
def _line_item_description(cls, course_id=None):
"""Create a line-item description for the donation.
Includes the course display name if provided.
Keyword Arguments:
course_id (CourseKey)
Raises:
CourseDoesNotExistException: The course ID is not valid.
Returns:
unicode
"""
# If a course ID is provided, include the display name of the course
# in the line item description.
if course_id is not None:
course = modulestore().get_course(course_id)
if course is None:
msg = u"Could not find a course with the ID '{course_id}'".format(course_id=course_id)
log.error(msg)
raise CourseDoesNotExistException(
_(u"Could not find a course with the ID '{course_id}'").format(course_id=course_id)
)
return _(u"Donation for {course}").format(course=course.display_name)
# The donation is for the organization as a whole, not a specific course
else:
return _(u"Donation for {platform_name}").format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME),
)
@property
def single_item_receipt_context(self):
return {
'receipt_has_donation_item': True,
}
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the donation is associated with a course, additional fields will be populated with
course information. When no name or category is specified by the implementation, the
platform name is used as a default value for required event fields, to declare that
the Order is specific to the platform, rather than a specific product name or category.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(Donation, self).analytics_data()
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
else:
data['name'] = configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
data['category'] = configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
return data
@property
def pdf_receipt_display_name(self):
"""
How to display this item on a PDF printed receipt file.
"""
return self._line_item_description(course_id=self.course_id)
|
ahmedaljazzar/edx-platform
|
lms/djangoapps/shoppingcart/models.py
|
Python
|
agpl-3.0
| 91,693
|
[
"VisIt"
] |
9329338406d526f5ed2c64a3daab1a3566159f0708c935a849a84492ba139eea
|
# -*- coding: utf-8 -*-
#
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import Dict
from typing import List
from typing import Union
from ORCA.scripts.BaseScript import cBaseScript
from ORCA.utils.Path import cPath
from ORCA.utils.FileName import cFileName
from ORCA.utils.Tar import cTarFile
from ORCA.utils.Platform import OS_GetSystemTmpPath
from ORCA.utils.Platform import OS_GetSystemUserPath
from ORCA.ui.ShowErrorPopUp import ShowErrorPopUp
import ORCA.Globals as Globals
'''
<root>
<repositorymanager>
<entry>
<name>Create TV Logos Script (internal)</name>
<description language='English'>Helper script to create TV Logos (internal)</description>
<description language='German'>Hilfs - Skript zum Erstellen der TV Logos (internal)</description>
<author>Carsten Thielepape</author>
<version>5.0.4</version>
<minorcaversion>5.0.4</minorcaversion>
<skip>1</skip>
<sources>
<source>
<local>$var(APPLICATIONPATH)/scripts/helper/helper_createtvlogos</local>
<sourcefile>$var(REPOSITORYWWWPATH)/scripts/helper_createtvlogos.zip</sourcefile>
<targetpath>scripts/helper</targetpath>
</source>
</sources>
<skipfiles>
</skipfiles>
</entry>
</repositorymanager>
</root>
'''
class cScript(cBaseScript):
"""
WikiDoc:Doc
WikiDoc:Context:Scripts
WikiDoc:Page:Scripts-CreateTvLogos
WikiDoc:TOCTitle:Helper Script to create TV logos
= Create TV Scripts =
This is a internal helper script to create the TV Logos
WikiDoc:End
"""
def __init__(self):
super().__init__()
self.uType:str = u'HELPERS'
self.dServices:Dict = {}
self.dLogos:Dict = {}
self.uIniFileLocation:str = u'none'
def Init(self,uObjectName:str,oFnScript:Union[cFileName,None]=None) -> None:
"""
Init function for the script
:param str uObjectName: The name of the script (to be passed to all scripts)
:param cFileName oFnScript: The file of the script (to be passed to all scripts)
"""
super().Init(uObjectName= uObjectName, oFnObject=oFnScript)
def RunScript(self, *args:List, **kwargs:Dict) -> Union[Dict,None]:
""" Main Entry point, parses the cmd_type and calls the relevant functions """
try:
if kwargs.get("caller",None)=="settings":
self.CreateLogoSources(**kwargs)
return None
except Exception as e:
self.ShowError(uMsg="Can''t run TV Helper script, invalid parameter",uParConfigName=self.uConfigName,oException=e)
return {"ret":1}
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def CreateLogoSources(self,**kwargs) -> None:
aSourceIcons:List[str]
dFolderTarget:Dict[str,str] = {}
oFnDest:cFileName
oFnSource:cFileName
oFnTarFile:cFileName
oPathDest:cPath
oPathDestRef:cPath
oPathDestSub:cPath
uFileCore:str
uFile:str
uFnSource:str
uPathSource:str
uSubFolder:str
oPathSources:cPath
oPathSourcesDebug:cPath
aFolder:List[str]
aFiles:List[str]
oPathSources = cPath("$var(RESOURCEPATH)/tvlogos-src")
oPathSourcesDebug = cPath("c:/tvlogos-src")
if oPathSourcesDebug.Exists():
oPathSources=oPathSourcesDebug
oPathSourcesDebug = OS_GetSystemUserPath() + "tvlogos-src"
if oPathSourcesDebug.Exists():
oPathSources=oPathSourcesDebug
aFiles = oPathSources.GetFileList(bSubDirs=False,bFullPath=True)
if aFiles:
oFnSource = cFileName(oPathSources)+"srp.index.txt"
if not oFnSource.Exists():
ShowErrorPopUp(uMessage="srp.index.txt is missing in source folder!")
return
oPathSourcesDebug = OS_GetSystemTmpPath()+"tvlogos-src"
oPathSourcesDebug.Delete()
oPathSourcesDebug.Create()
oFnSource.Copy(oNewFile=oPathSourcesDebug)
for uFnXY in aFiles:
if uFnXY.endswith(".tar.xz") and "snp-full" in uFnXY and "190x102." in uFnXY:
oTarFile = cTarFile().ImportFullPath(uFnFullName=uFnXY)
self.ShowDebug(uMsg="Untar: %s to %s" % (oTarFile.string,oPathSourcesDebug.string))
oTarFile.UnTar(oPath=oPathSourcesDebug)
oPathSources = oPathSourcesDebug
aFolder = oPathSources.GetFolderList(bFullPath=True)
for uPathSource in aFolder:
dFolderTarget.clear()
uFileCore = uPathSource[uPathSource.rfind("102.")+4:uPathSource.find("_")]
oPathDest = Globals.oPathTVLogos+uFileCore
oPathDest.Create()
self.ShowDebug(uMsg="Create Picons for: %s" % (oPathDest.string))
oFnSource = cFileName(oPathSources)+"srp.index.txt"
if not oFnSource.Exists():
ShowErrorPopUp(uMessage="srp.index.txt is missing in source folder!")
return
oFnSource.Copy(oNewFile=oPathDest)
oPathDest=oPathDest+"picons"
oPathDest.Create()
oPathDestRef=oPathDest+"references"
oPathDestRef.Create()
aSourceIcons = cPath(uPathSource).GetFileList(bFullPath=False,bSubDirs=False)
for uFnSource in aSourceIcons:
oFnSource = cFileName(cPath(uPathSource)) + uFnSource
if uFnSource.startswith("1_"):
oFnSource.Copy(oNewFile=oPathDestRef)
else:
uSubFolder=uFnSource.upper()[:2]
uSubFolder=uSubFolder.replace(".","")
if uSubFolder[0].isnumeric():
uSubFolder="0-9"
oPathDestSub=oPathDest+uSubFolder
if not uSubFolder in dFolderTarget:
dFolderTarget[uSubFolder] = uSubFolder
oPathDestSub.Create()
oFnSource.Copy(oNewFile=oPathDestSub)
|
thica/ORCA-Remote
|
src/scripts/helper/helper_createtvlogos/script.py
|
Python
|
gpl-3.0
| 7,307
|
[
"ORCA"
] |
b0874d988801cfbf08770146addd2b05c19812dbc753cfdf4433de233462097a
|
# -*- coding: utf-8 -*-
# This file is part of MyPaint.
# Copyright (C) 2012-2015 by Andrew Chadwick <a.t.chadwick@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Hue/Relative chroma/Luma adjuster widgets, with an editable gamut mask.
"""
import math
from copy import deepcopy
from random import random
import re
import os.path
import gui.gtk2compat as gtk2compat
import gtk
from gtk import gdk
import cairo
from bases import CachedBgDrawingArea
from adjbases import ColorManager
from adjbases import ColorAdjuster
from adjbases import ColorAdjusterWidget
from adjbases import HueSaturationWheelMixin
from adjbases import HueSaturationWheelAdjuster
from sliders import HCYLumaSlider
from combined import CombinedAdjusterPage
from lib.color import *
import gui.uicolor
from util import *
from lib.palette import Palette
import lib.alg as geom
from paletteview import palette_load_via_dialog
from paletteview import palette_save_via_dialog
from lib.gettext import gettext as _
from lib.gettext import C_
PREFS_MASK_KEY = "colors.hcywheel.mask.gamuts"
PREFS_ACTIVE_KEY = "colors.hcywheel.mask.active"
MASK_EDITOR_HELP_URI = C_(
"Online help pages",
u"https://github.com/mypaint/mypaint/wiki/"
u"v1.2-HCY-Wheel-and-Gamut-Mask-Editor"
)
class MaskableWheelMixin(object):
"""Provides wheel widgets with maskable areas.
For use with implementations of `HueSaturationWheelAdjusterMixin`.
Concrete implementations can be masked so that they ignore clicks outside
certain color areas. If the mask is active, clicks inside the mask
shapes are treated as normal, but clicks outside them are remapped to a
point on the nearest edge of the nearest shape. This can be useful for
artists who wish to plan the color gamut of their artwork in advance.
http://gurneyjourney.blogspot.com/2011/09/part-1-gamut-masking-method.html
http://gurneyjourney.blogspot.com/2008/01/color-wheel-masking-part-1.html
"""
# Class-level variables: drawing constants etc.
min_shape_size = 0.15 #: Smallest useful shape: fraction of radius
# Instance variables (defaults / documentation)
__mask = None
mask_toggle = None #: gtk.ToggleAction controling whether the mask is used
mask_observers = None #: List of no-argument mask change observer callbacks
def __init__(self):
"""Instantiate instance vars and bind actions.
"""
self.__mask = []
self.mask_observers = []
action_name = "wheel%s_masked" % (id(self),)
self.mask_toggle = gtk.ToggleAction(
action_name,
C_(
"Color Wheels: activity toggle: action title",
u"Gamut Mask Active",
),
C_(
"Color Wheels: activity toggle: action tooltip",
u"Limit your palette for specific moods using a gamut mask.",
),
None
)
self.mask_toggle.connect("toggled", self.__mask_toggled_cb)
def __mask_toggled_cb(self, action):
active = action.get_active()
prefs = self.get_prefs()
prefs[PREFS_ACTIVE_KEY] = active
self.queue_draw()
def set_color_manager(self, manager):
"""Sets the color manager, and reads an initial mask from prefs.
Extends `ColorAdjuster`'s implementation.
"""
ColorAdjuster.set_color_manager(self, manager)
prefs = self.get_prefs()
mask_flat = prefs.get(PREFS_MASK_KEY, None)
mask_active = prefs.get(PREFS_ACTIVE_KEY, False)
if mask_flat is not None:
self.set_mask(self._unflatten_mask(mask_flat))
self.mask_toggle.set_active(mask_active)
@staticmethod
def _flatten_mask(mask):
flat_mask = []
for shape_colors in mask:
shape_flat = [c.to_hex_str() for c in shape_colors]
flat_mask.append(shape_flat)
return flat_mask
@staticmethod
def _unflatten_mask(flat_mask):
mask = []
for shape_flat in flat_mask:
shape_colors = [RGBColor.new_from_hex_str(s) for s in shape_flat]
mask.append(shape_colors)
return mask
def set_mask_from_palette(self, pal):
"""Sets the mask from a palette.
Any `palette.Palette` can be loaded into the wheel widget, and color
names are used for distinguishing mask shapes. If a color name
matches the pattern "``mask #<decimal-int>``", it will be associated
with the shape having the ID ``<decimal-int>``.
"""
if pal is None:
return
mask_id_re = re.compile(r'\bmask\s*#?\s*(\d+)\b')
mask_shapes = {}
for i in xrange(len(pal)):
color = pal.get_color(i)
if color is None:
continue
shape_id = 0
color_name = pal.get_color_name(i)
if color_name is not None:
mask_id_match = mask_id_re.search(color_name)
if mask_id_match:
shape_id = int(mask_id_match.group(1))
if shape_id not in mask_shapes:
mask_shapes[shape_id] = []
mask_shapes[shape_id].append(color)
mask_list = []
shape_ids = mask_shapes.keys()
shape_ids.sort()
for shape_id in shape_ids:
mask_list.append(mask_shapes[shape_id])
self.set_mask(mask_list)
def set_mask(self, mask):
"""Sets the mask (a list of lists of `UIColor`s).
"""
prefs = self.get_prefs()
if mask is None:
self.__mask = None
self.mask_toggle.set_active(False)
prefs[PREFS_MASK_KEY] = None
else:
self.mask_toggle.set_active(True)
self.__mask = mask
prefs[PREFS_MASK_KEY] = self._flatten_mask(mask)
for func in self.mask_observers:
func()
self.queue_draw()
def get_mask(self):
"""Returns the current mask.
"""
return self.__mask
def get_mask_voids(self):
"""Returns the current mask as a list of lists of (x, y) pairs.
"""
voids = []
if not self.__mask:
return voids
for shape in self.__mask:
if len(shape) >= 3:
void = self.colors_to_mask_void(shape)
voids.append(void)
return voids
def colors_to_mask_void(self, colors):
"""Converts a set of colors to a mask void (convex hull).
Mask voids are the convex hulls of the (x, y) positions for the
colors making up the mask, so mask shapes with fewer than 3 colors
are returned as the empty list.
"""
points = []
if len(colors) < 3:
return points
for col in colors:
points.append(self.get_pos_for_color(col))
return geom.convex_hull(points)
def get_color_at_position(self, x, y, ignore_mask=False):
"""Converts an `x`, `y` position to a color.
Ordinarily, this implmentation uses any active mask to limit the
colors which can be clicked on. Set `ignore_mask` to disable this
added behaviour.
"""
sup = HueSaturationWheelMixin
if ignore_mask or not self.mask_toggle.get_active():
return sup.get_color_at_position(self, x, y)
voids = self.get_mask_voids()
if not voids:
return sup.get_color_at_position(self, x, y)
isects = []
for vi, void in enumerate(voids):
# If we're inside a void, use the unchanged value
if geom.point_in_convex_poly((x, y), void):
return sup.get_color_at_position(self, x, y)
# If outside, find the nearest point on the nearest void's edge
for p1, p2 in geom.pairwise(void):
isect = geom.nearest_point_in_segment(p1, p2, (x, y))
if isect is not None:
d = math.sqrt((isect[0]-x)**2 + (isect[1]-y)**2)
isects.append((d, isect))
# Above doesn't include segment ends, so add those
d = math.sqrt((p1[0]-x)**2 + (p1[1]-y)**2)
isects.append((d, p1))
# Determine the closest point.
if isects:
isects.sort()
x, y = isects[0][1]
return sup.get_color_at_position(self, x, y)
@staticmethod
def _get_void_size(void):
"""Size metric for a mask void (list of x,y points; convex hull)
"""
area = geom.poly_area(void)
return math.sqrt(area)
def _get_mask_fg(self):
"""Returns the mask edge drawing color as an rgb triple"""
state = self.get_state_flags()
style = self.get_style_context()
c = style.get_color(state)
return gui.uicolor.from_gdk_rgba(c).get_rgb()
def _get_mask_bg(self):
"""Returns the mask area drawing color as an rgb triple."""
state = self.get_state_flags()
style = self.get_style_context()
c = style.get_background_color(state)
return gui.uicolor.from_gdk_rgba(c).get_rgb()
def draw_mask(self, cr, wd, ht):
"""Draws the mask, if enabled and if it has any usable voids.
For the sake of the editor subclass, this doesn't draw any voids
which are smaller than `self.min_shape_size` times the wheel radius.
"""
if not self.mask_toggle.get_active():
return
if self.__mask is None or self.__mask == []:
return
cr.save()
radius = self.get_radius(wd=wd, ht=ht)
cx, cy = self.get_center(wd=wd, ht=ht)
cr.arc(cx, cy, radius+self.BORDER_WIDTH, 0, 2*math.pi)
cr.clip()
bg_rgb = self._get_mask_bg()
fg_rgb = self._get_mask_fg()
cr.push_group()
cr.set_operator(cairo.OPERATOR_OVER)
cr.set_source_rgb(*bg_rgb)
cr.rectangle(0, 0, wd, ht)
cr.fill()
voids = []
min_size = radius * self.min_shape_size
for void in self.get_mask_voids():
if len(void) < 3:
continue
size = self._get_void_size(void)
if size >= min_size:
voids.append(void)
cr.set_source_rgb(*fg_rgb)
for void in voids:
cr.new_sub_path()
cr.move_to(*void[0])
for x, y in void[1:]:
cr.line_to(x, y)
cr.close_path()
cr.set_line_width(2.0)
cr.stroke_preserve()
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.set_source_rgba(1, 1, 1, 0)
cr.fill()
cr.set_operator(cairo.OPERATOR_OVER)
cr.pop_group_to_source()
cr.paint_with_alpha(0.666)
cr.restore()
def paint_foreground_cb(self, cr, wd, ht):
"""Paints the foreground items: mask, then marker.
"""
self.draw_mask(cr, wd, ht)
HueSaturationWheelMixin.paint_foreground_cb(self, cr, wd, ht)
class HCYHueChromaWheelMixin(object):
"""Mixin for wheel-style adjusters to display the H+C from the HCY model.
For use with implementations of `HueSaturationWheelAdjusterMixin`; make
sure this mixin comes before it in the MRO.
"""
def get_normalized_polar_pos_for_color(self, col):
col = HCYColor(color=col)
return col.c, col.h
def color_at_normalized_polar_pos(self, r, theta):
col = HCYColor(color=self.get_managed_color())
col.h = theta
col.c = r
return col
class HCYHueChromaWheel (MaskableWheelMixin,
HCYHueChromaWheelMixin,
HueSaturationWheelAdjuster):
"""Circular mapping of the H and C terms of the HCY model."""
STATIC_TOOLTIP_TEXT = C_(
"HCY Color Wheel: tooltip",
u"HCY hue and chroma.",
)
def __init__(self):
"""Instantiate, binding events.
"""
MaskableWheelMixin.__init__(self)
HueSaturationWheelAdjuster.__init__(self)
self.connect("scroll-event", self.__scroll_cb)
self.add_events(gdk.SCROLL_MASK)
def __scroll_cb(self, widget, event):
# Scrolling controls luma.
d = self.SCROLL_DELTA
if event.direction in (gdk.SCROLL_DOWN, gdk.SCROLL_LEFT):
d *= -1
col = HCYColor(color=self.get_managed_color())
y = clamp(col.y+d, 0.0, 1.0)
if col.y != y:
col.y = y
self.set_managed_color(col)
return True
class HCYMaskEditorWheel (HCYHueChromaWheel):
"""HCY wheel specialized for mask editing."""
## Instance vars
__last_cursor = None # previously set cursor (determines some actions)
# Objects which are active or being manipulated
__tmp_new_ctrlpoint = None # new control-point color
__active_ctrlpoint = None # active point in active_void
__active_shape = None # list of colors or None
# Drag state
__drag_func = None
__drag_start_pos = None
## Class-level constants and variables
# Specialized cursors for different actions
__add_cursor = gdk.Cursor(gdk.PLUS)
__move_cursor = gdk.Cursor(gdk.FLEUR)
__move_point_cursor = gdk.Cursor(gdk.CROSSHAIR)
__rotate_cursor = gdk.Cursor(gdk.EXCHANGE)
# Constrain the range of allowable lumas
__MAX_LUMA = 0.75
__MIN_LUMA = 0.25
# Drawing constraints and activity proximities
__ctrlpoint_radius = 2.5
__ctrlpoint_grab_radius = 10
__max_num_shapes = 6 # how many shapes are allowed
# Tooltip text. Is here a better way of explaining this? It obscures the
# editor quite a lot.
STATIC_TOOLTIP_TEXT = C_(
"HCY Mask Editor Wheel: tooltip",
u"Gamut mask editor. Click in the middle to create "
u"or manipulate shapes, or rotate the mask using "
u"the edges of the disc.",
)
def __init__(self):
"""Instantiate, and connect the editor events.
"""
HCYHueChromaWheel.__init__(self)
self.connect("button-press-event", self.__button_press_cb)
self.connect("button-release-event", self.__button_release_cb)
self.connect("motion-notify-event", self.__motion_cb)
self.connect("leave-notify-event", self.__leave_cb)
self.add_events(gdk.POINTER_MOTION_MASK | gdk.LEAVE_NOTIFY_MASK)
def __leave_cb(self, widget, event):
# Reset the active objects when the pointer leaves.
if self.__drag_func is not None:
return
self.__active_shape = None
self.__active_ctrlpoint = None
self.__tmp_new_ctrlpoint = None
self.queue_draw()
self.__set_cursor(None)
def __set_cursor(self, cursor):
# Sets the window cursor, retaining a record.
if cursor != self.__last_cursor:
self.get_window().set_cursor(cursor)
self.__last_cursor = cursor
def __update_active_objects(self, x, y):
# Decides what a click or a drag at (x, y) would do, and updates the
# mouse cursor and draw state to match.
assert self.__drag_func is None
self.__active_shape = None
self.__active_ctrlpoint = None
self.__tmp_new_ctrlpoint = None
self.queue_draw() # yes, always
# Possible mask void manipulations
mask = self.get_mask()
for mask_idx in xrange(len(mask)):
colors = mask[mask_idx]
if len(colors) < 3:
continue
# If the pointer is near an existing control point, clicking and
# dragging will move it.
void = []
for col_idx in xrange(len(colors)):
col = colors[col_idx]
px, py = self.get_pos_for_color(col)
dp = math.sqrt((x-px)**2 + (y-py)**2)
if dp <= self.__ctrlpoint_grab_radius:
mask.remove(colors)
mask.insert(0, colors)
self.__active_shape = colors
self.__active_ctrlpoint = col_idx
self.__set_cursor(None)
return
void.append((px, py))
# If within a certain distance of an edge, dragging will create and
# then move a new control point.
void = geom.convex_hull(void)
for p1, p2 in geom.pairwise(void):
isect = geom.nearest_point_in_segment(p1, p2, (x, y))
if isect is not None:
ix, iy = isect
di = math.sqrt((ix-x)**2 + (iy-y)**2)
if di <= self.__ctrlpoint_grab_radius:
newcol = self.get_color_at_position(ix, iy)
self.__tmp_new_ctrlpoint = newcol
mask.remove(colors)
mask.insert(0, colors)
self.__active_shape = colors
self.__set_cursor(None)
return
# If the mouse is within a mask void, then dragging would move that
# shape around within the mask.
if geom.point_in_convex_poly((x, y), void):
mask.remove(colors)
mask.insert(0, colors)
self.__active_shape = colors
self.__set_cursor(None)
return
# Away from shapes, clicks and drags manipulate the entire mask: adding
# cutout voids to it, or rotating the whole mask around its central
# axis.
alloc = self.get_allocation()
cx, cy = self.get_center(alloc=alloc)
radius = self.get_radius(alloc=alloc)
dx, dy = x-cx, y-cy
r = math.sqrt(dx**2 + dy**2)
if r < radius*(1.0-self.min_shape_size):
if len(mask) < self.__max_num_shapes:
d = self.__dist_to_nearest_shape(x, y)
minsize = radius * self.min_shape_size
if d is None or d > minsize:
# Clicking will result in a new void
self.__set_cursor(self.__add_cursor)
else:
# Click-drag to rotate the entire mask
self.__set_cursor(self.__rotate_cursor)
def __drag_active_shape(self, px, py):
# Updates the position of the active shape during drags.
sup = HCYHueChromaWheel
x0, y0 = self.__drag_start_pos
dx = px - x0
dy = py - y0
self.__active_shape[:] = []
for col in self.__active_shape_predrag:
cx, cy = self.get_pos_for_color(col)
cx += dx
cy += dy
col2 = sup.get_color_at_position(self, cx, cy, ignore_mask=True)
self.__active_shape.append(col2)
def __drag_active_ctrlpoint(self, px, py):
# Moves the highlighted control point during drags.
sup = HCYHueChromaWheel
x0, y0 = self.__drag_start_pos
dx = px - x0
dy = py - y0
col = self.__active_ctrlpoint_predrag
cx, cy = self.get_pos_for_color(col)
cx += dx
cy += dy
col = sup.get_color_at_position(self, cx, cy, ignore_mask=True)
self.__active_shape[self.__active_ctrlpoint] = col
def __rotate_mask(self, px, py):
# Rotates the entire mask around the grey axis during drags.
cx, cy = self.get_center()
x0, y0 = self.__drag_start_pos
theta0 = math.atan2(x0-cx, y0-cy)
theta = math.atan2(px-cx, py-cy)
dntheta = (theta0 - theta) / (2*math.pi)
while dntheta <= 0:
dntheta += 1.0
if self.__mask_predrag is None:
self.__mask_predrag = []
for shape in self.get_mask():
shape_hcy = [HCYColor(color=c) for c in shape]
self.__mask_predrag.append(shape_hcy)
mgr = self.get_color_manager()
newmask = []
for shape in self.__mask_predrag:
shape_rot = []
for col in shape:
col_r = HCYColor(color=col)
h = mgr.distort_hue(col_r.h)
h += dntheta
h %= 1.0
col_r.h = mgr.undistort_hue(h)
shape_rot.append(col_r)
newmask.append(shape_rot)
self.set_mask(newmask)
def __button_press_cb(self, widget, event):
# Begins drags.
if self.__drag_func is None:
self.__update_active_objects(event.x, event.y)
self.__drag_start_pos = event.x, event.y
if self.__tmp_new_ctrlpoint is not None:
self.__active_ctrlpoint = len(self.__active_shape)
self.__active_shape.append(self.__tmp_new_ctrlpoint)
self.__tmp_new_ctrlpoint = None
if self.__active_ctrlpoint is not None:
self.__active_shape_predrag = self.__active_shape[:]
ctrlpt = self.__active_shape[self.__active_ctrlpoint]
self.__active_ctrlpoint_predrag = ctrlpt
self.__drag_func = self.__drag_active_ctrlpoint
self.__set_cursor(self.__move_point_cursor)
elif self.__active_shape is not None:
self.__active_shape_predrag = self.__active_shape[:]
self.__drag_func = self.__drag_active_shape
self.__set_cursor(self.__move_cursor)
elif self.__last_cursor is self.__rotate_cursor:
self.__mask_predrag = None
self.__drag_func = self.__rotate_mask
def __button_release_cb(self, widget, event):
# Ends the current drag & cleans up, or handle other clicks.
if self.__drag_func is None:
# Clicking when not in a drag adds a new shape
if self.__last_cursor is self.__add_cursor:
self.__add_void(event.x, event.y)
else:
# Cleanup when dragging ends
self.__drag_func = None
self.__drag_start_pos = None
self.__cleanup_mask()
self.__update_active_objects(event.x, event.y)
def __motion_cb(self, widget, event):
# Fire the current drag function if one's active.
if self.__drag_func is not None:
self.__drag_func(event.x, event.y)
self.queue_draw()
else:
self.__update_active_objects(event.x, event.y)
def __cleanup_mask(self):
mask = self.get_mask()
# Drop points from all shapes which are not part of the convex hulls.
for shape in mask:
if len(shape) <= 3:
continue
points = [self.get_pos_for_color(c) for c in shape]
edge_points = geom.convex_hull(points)
for col, point in zip(shape, points):
if point in edge_points:
continue
shape.remove(col)
# Drop shapes smaller than the minimum size.
newmask = []
min_size = self.get_radius() * self.min_shape_size
for shape in mask:
points = [self.get_pos_for_color(c) for c in shape]
void = geom.convex_hull(points)
size = self._get_void_size(void)
if size >= min_size:
newmask.append(shape)
mask = newmask
# Drop shapes whose points entirely lie within other shapes
newmask = []
maskvoids = [(shape, geom.convex_hull([self.get_pos_for_color(c)
for c in shape]))
for shape in mask]
for shape1, void1 in maskvoids:
shape1_subsumed = True
for p1 in void1:
p1_subsumed = False
for shape2, void2 in maskvoids:
if shape1 is shape2:
continue
if geom.point_in_convex_poly(p1, void2):
p1_subsumed = True
break
if not p1_subsumed:
shape1_subsumed = False
break
if not shape1_subsumed:
newmask.append(shape1)
mask = newmask
self.set_mask(mask)
self.queue_draw()
def __dist_to_nearest_shape(self, x, y):
# Distance from `x`, `y` to the nearest edge or vertex of any shape.
dists = []
for hull in self.get_mask_voids():
# cx, cy = geom.poly_centroid(hull)
for p1, p2 in geom.pairwise(hull):
np = geom.nearest_point_in_segment(p1, p2, (x, y))
if np is not None:
nx, ny = np
d = math.sqrt((x-nx)**2 + (y-ny)**2)
dists.append(d)
# Segment end too
d = math.sqrt((p1[0]-x)**2 + (p1[1]-y)**2)
dists.append(d)
if not dists:
return None
dists.sort()
return dists[0]
def __add_void(self, x, y):
# Adds a new shape into the empty space centred at `x`, `y`.
self.queue_draw()
# Pick a nice size for the new shape, taking care not to
# overlap any other shapes, at least initially.
alloc = self.get_allocation()
cx, cy = self.get_center(alloc=alloc)
radius = self.get_radius(alloc=alloc)
dx, dy = x-cx, y-cy
r = math.sqrt(dx**2 + dy**2)
d = self.__dist_to_nearest_shape(x, y)
if d is None:
d = radius
size = min((radius - r), d) * 0.95
minsize = radius * self.min_shape_size
if size < minsize:
return
# Create a regular polygon with one of its edges facing the
# middle of the wheel.
shape = []
nsides = 3 + len(self.get_mask())
psi = math.atan2(dy, dx) + (math.pi/nsides)
psi += math.pi
for i in xrange(nsides):
theta = 2.0 * math.pi * float(i)/nsides
theta += psi
px = int(x + size*math.cos(theta))
py = int(y + size*math.sin(theta))
col = self.get_color_at_position(px, py, ignore_mask=True)
shape.append(col)
mask = self.get_mask()
mask.append(shape)
self.set_mask(mask)
def draw_mask_control_points(self, cr, wd, ht):
# Draw active and inactive control points on the active shape.
if self.__active_shape is None:
return
cr.save()
active_rgb = 1, 1, 1
normal_rgb = 0, 0, 0
delete_rgb = 1, 0, 0
cr.set_line_width(1.0)
void = self.colors_to_mask_void(self.__active_shape)
# Highlight the objects that would be directly or indirectly affected
# if the shape were dragged, and how.
min_size = self.get_radius(wd=wd, ht=ht) * self.min_shape_size
void_rgb = normal_rgb
if self._get_void_size(void) < min_size:
# Shape will be deleted
void_rgb = delete_rgb
elif ((self.__active_ctrlpoint is None) and
(self.__tmp_new_ctrlpoint is None)):
# The entire shape would be moved
void_rgb = active_rgb
# Outline the current shape
cr.set_source_rgb(*void_rgb)
for p_idx, p in enumerate(void):
if p_idx == 0:
cr.move_to(*p)
else:
cr.line_to(*p)
cr.close_path()
cr.stroke()
# Control points
colors = self.__active_shape
for col_idx, col in enumerate(colors):
px, py = self.get_pos_for_color(col)
if (px, py) not in void:
# not in convex hull (is it worth doing this fragile test?)
continue
point_rgb = void_rgb
if col_idx == self.__active_ctrlpoint:
point_rgb = active_rgb
cr.set_source_rgb(*point_rgb)
cr.arc(px, py, self.__ctrlpoint_radius, 0, 2*math.pi)
cr.fill()
if self.__tmp_new_ctrlpoint:
px, py = self.get_pos_for_color(self.__tmp_new_ctrlpoint)
cr.set_source_rgb(*active_rgb)
cr.arc(px, py, self.__ctrlpoint_radius, 0, 2*math.pi)
cr.fill()
# Centroid
cr.set_source_rgb(*void_rgb)
cx, cy = geom.poly_centroid(void)
cr.save()
cr.set_line_cap(cairo.LINE_CAP_SQUARE)
cr.set_line_width(0.5)
cr.translate(int(cx)+0.5, int(cy)+0.5)
cr.move_to(-2, 0)
cr.line_to(2, 0)
cr.stroke()
cr.move_to(0, -2)
cr.line_to(0, 2)
cr.stroke()
cr.restore()
def paint_foreground_cb(self, cr, wd, ht):
"""Foreground drawing override.
"""
self.draw_mask(cr, wd, ht)
self.draw_mask_control_points(cr, wd, ht)
def get_managed_color(self):
"""Override, with a limited range or returned luma.
"""
col = super(HCYMaskEditorWheel, self).get_managed_color()
col = HCYColor(color=col)
col.y = clamp(col.y, self.__MIN_LUMA, self.__MAX_LUMA)
return col
def set_managed_color(self, color):
"""Override, limiting the luma range.
"""
col = HCYColor(color=color)
col.y = clamp(col.y, self.__MIN_LUMA, self.__MAX_LUMA)
super(HCYMaskEditorWheel, self).set_managed_color(col)
class HCYMaskPreview (MaskableWheelMixin,
HCYHueChromaWheelMixin,
HueSaturationWheelAdjuster):
"""Mask preview widget; not scrollable.
These widgets can be used with `paletteview.palette_load_via_dialog()` as
preview widgets during mask selection.
"""
def __init__(self):
MaskableWheelMixin.__init__(self)
HueSaturationWheelAdjuster.__init__(self)
self.set_app_paintable(True)
self.set_has_window(False)
self.set_mask([])
self.mask_toggle.set_active(True)
self.set_size_request(64, 64)
def render_background_cb(self, cr, wd, ht):
sup = HueSaturationWheelAdjuster
sup.render_background_cb(self, cr, wd=wd, ht=ht)
self.draw_mask(cr, wd=wd, ht=ht)
def paint_foreground_cb(self, cr, wd, ht):
pass
def get_background_validity(self):
return deepcopy(self.get_mask())
def set_palette(self, palette):
# Compatibility with palette_load_via_dialog()
self.set_mask_from_palette(palette)
class HCYMaskTemplateDialog (gtk.Dialog):
"""Dialog for choosing a mask from a small set of templates.
http://gurneyjourney.blogspot.co.uk/2008/02/shapes-of-color-schemes.html
"""
@property
def __templates(self):
Y = 0.5
H = 1-0.05
# Reusable shapes...
atmos_triad = [
(H, 0.95, Y),
((H+0.275) % 1, 0.55, Y),
((1+H-0.275) % 1, 0.55, Y)
]
def __coffin(h):
# Hexagonal coffin shape with the foot end at the centre
# of the wheel.
shape = []
shape.append(((h + 0.25) % 1, 0.03, Y))
shape.append(((h + 1 - 0.25) % 1, 0.03, Y))
shape.append(((h + 0.01) % 1, 0.95, Y))
shape.append(((h + 1 - 0.01) % 1, 0.95, Y))
shape.append(((h + 0.04) % 1, 0.70, Y))
shape.append(((h + 1 - 0.04) % 1, 0.70, Y))
return shape
def __complement_blob(h):
# Small pentagonal blob at the given hue, used for an organic-
# looking dab of a complementary hue.
shape = []
shape.append(((h+0.015) % 1, 0.94, Y))
shape.append(((h+0.985) % 1, 0.94, Y))
shape.append(((h+0.035) % 1, 0.71, Y))
shape.append(((h+0.965) % 1, 0.71, Y))
shape.append(((h) % 1, 0.54, Y))
return shape
templates = []
templates.append((
C_(
"HCY Gamut Mask template name",
u"Atmospheric Triad",
),
C_(
"HCY Gamut Mask template description",
"Moody and subjective, defined by one dominant primary "
"and two primaries which are less intense.",
),
[deepcopy(atmos_triad)]
))
templates.append((
C_(
"HCY Gamut Mask template name",
u"Shifted Triad",
),
C_(
"HCY Gamut Mask template description",
u"Weighted more strongly towards the dominant color.",
),
[
[(H, 0.95, Y),
((H+0.35) % 1, 0.4, Y),
((1+H-0.35) % 1, 0.4, Y)]
]
))
templates.append((
C_(
"HCY Gamut Mask template name",
u"Complementary",
),
C_(
"HCY Gamut Mask template description",
u"Contrasting opposites, "
u"balanced by having central neutrals "
u"between them on the color wheel.",
),
[
[((H+0.005) % 1, 0.9, Y),
((H+0.995) % 1, 0.9, Y),
((H+0.250) % 1, 0.1, Y),
((H+0.750) % 1, 0.1, Y),
((H+0.505) % 1, 0.9, Y),
((H+0.495) % 1, 0.9, Y)]
]
))
templates.append((
C_(
"HCY Gamut Mask template name",
u"Mood and Accent",
),
C_(
"HCY Gamut Mask template description",
u"One main range of colors, "
u"with a complementary accent for "
u"variation and highlights.",
),
[deepcopy(atmos_triad), __complement_blob(H+0.5)]
))
templates.append((
C_(
"HCY Gamut Mask template name",
u"Split Complementary",
),
C_(
"HCY Gamut Mask template description",
u"Two analogous colors and a complement to them, "
u"with no secondary colors between them.",
),
[__coffin(H+0.5), __coffin(1+H-0.1), __coffin(H+0.1)]
))
return templates
def __init__(self, parent, target):
gtk.Dialog.__init__(
self,
C_(
u"HCY Gamut Mask new-from-template dialog: window title",
"New Gamut Mask from Template",
),
parent,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT),
)
self.set_position(gtk.WIN_POS_MOUSE)
target_mgr = target.get_color_manager()
prefs_ro = deepcopy(target_mgr.get_prefs())
datapath = target_mgr.get_data_path()
mgr = ColorManager(prefs=prefs_ro, datapath=datapath)
mgr.set_wheel_type(target_mgr.get_wheel_type())
self.target = target
for name, desc, mask_shapes_float in self.__templates:
mask = []
for mask_shape_float in mask_shapes_float:
shape = []
for h, c, y in mask_shape_float:
h = mgr.undistort_hue(h)
shape.append(HCYColor(h, c, y))
mask.append(shape)
label = gtk.Label()
label.set_markup("<b>%s</b>\n\n%s" % (name, desc))
label.set_size_request(375, -1)
label.set_line_wrap(True)
label.set_alignment(0, 0.5)
preview = HCYMaskPreview()
preview.set_color_manager(mgr)
preview.set_mask(mask)
preview_frame = gtk.AspectFrame(obey_child=True)
preview_frame.add(preview)
preview_frame.set_shadow_type(gtk.SHADOW_NONE)
hbox = gtk.HBox()
hbox.set_spacing(6)
hbox.pack_start(preview_frame, False, False)
hbox.pack_start(label, True, True)
button = gtk.Button()
button.add(hbox)
button.set_relief(gtk.RELIEF_NONE)
button.connect("clicked", self.__button_clicked_cb, mask)
self.vbox.pack_start(button, True, True)
self.connect("response", self.__response_cb)
self.connect("show", self.__show_cb)
for w in self.vbox:
w.show_all()
ref_color = target.get_managed_color()
mgr.set_color(ref_color)
def __button_clicked_cb(self, widget, mask):
self.target.set_mask(mask)
self.hide()
def __show_cb(self, widget, *a):
self.vbox.show_all()
def __response_cb(self, widget, response_id):
self.hide()
return True
class HCYMaskPropertiesDialog (gtk.Dialog):
"""Dialog for choosing, editing, or enabling/disabling masks.
"""
def __init__(self, parent, target):
gtk.Dialog.__init__(
self,
C_(
"HCY Gamut Mask Editor dialog: window title",
u"Gamut Mask Editor",
),
parent,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(
gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT,
),
)
self.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
self.target = target
ed = HCYMaskEditorWheel()
target_mgr = target.get_color_manager()
prefs_ro = deepcopy(target_mgr.get_prefs())
datapath = target_mgr.get_data_path()
ed_mgr = ColorManager(prefs=prefs_ro, datapath=datapath)
ed.set_color_manager(ed_mgr)
self.editor = ed
ed.set_size_request(300, 300)
ed.mask_toggle.set_active(True)
self.mask_toggle_ctrl = gtk.CheckButton(
C_(
"HCY Gamut Mask Editor dialog: mask-is-active checkbox",
u"Active",
),
use_underline=False,
)
self.mask_toggle_ctrl.set_tooltip_text(ed.mask_toggle.get_tooltip())
ed.mask_observers.append(self.__mask_changed_cb)
hbox = gtk.HBox()
hbox.set_spacing(3)
# Sidebar buttonbox
# On the right and packed to the top. This places its secondary
# control, a mask toggle button, next to the "OK" button so it's less
# likely to be missed.
bbox = gtk.VButtonBox()
new_btn = self.__new_button = gtk.Button(stock=gtk.STOCK_NEW)
load_btn = self.__load_button = gtk.Button(stock=gtk.STOCK_OPEN)
save_btn = self.__save_button = gtk.Button(stock=gtk.STOCK_SAVE)
clear_btn = self.__clear_button = gtk.Button(stock=gtk.STOCK_CLEAR)
help_btn = self.__help_button = gtk.LinkButton.new_with_label(
uri = MASK_EDITOR_HELP_URI,
label = C_(
"HCY Mask Editor: action button labels",
u"Help…",
),
)
new_btn.set_tooltip_text(C_(
"HCY Mask Editor: action button tooltips",
u"Create mask from template."),
)
load_btn.set_tooltip_text(C_(
"HCY Mask Editor: action button tooltips",
u"Load mask from a GIMP palette file."),
)
save_btn.set_tooltip_text(C_(
"HCY Mask Editor: action button tooltips",
u"Save mask to a GIMP palette file."),
)
clear_btn.set_tooltip_text(C_(
"HCY Mask Editor: action button tooltips",
u"Erase the mask."),
)
help_btn.set_tooltip_text(C_(
"HCY Mask Editor: action button tooltips",
u"Open the online help for this dialog in a web browser."),
)
new_btn.connect("clicked", self.__new_clicked)
save_btn.connect("clicked", self.__save_clicked)
load_btn.connect("clicked", self.__load_clicked)
clear_btn.connect("clicked", self.__clear_clicked)
bbox.pack_start(new_btn)
bbox.pack_start(load_btn)
bbox.pack_start(save_btn)
bbox.pack_start(clear_btn)
action_area = self.get_action_area()
if isinstance(action_area, gtk.ButtonBox):
action_area.pack_start(help_btn)
action_area.set_child_secondary(help_btn, True)
action_area.set_child_non_homogeneous(help_btn, True)
bbox.pack_start(self.mask_toggle_ctrl)
bbox.set_child_secondary(self.mask_toggle_ctrl, True)
else:
bbox.pack_start(self.mask_toggle_ctrl)
bbox.pack_start(help_btn)
bbox.set_child_secondary(help_btn, True)
bbox.set_layout(gtk.BUTTONBOX_START)
hbox.pack_start(ed, True, True)
hbox.pack_start(bbox, False, False)
hbox.set_border_width(9)
self.vbox.pack_start(hbox, True, True)
self.connect("response", self.__response_cb)
self.connect("show", self.__show_cb)
for w in self.vbox:
w.show_all()
def __mask_changed_cb(self):
mask = self.editor.get_mask()
empty = mask == []
self.__save_button.set_sensitive(not empty)
self.__clear_button.set_sensitive(not empty)
def __new_clicked(self, widget):
dialog = HCYMaskTemplateDialog(self, self.editor)
dialog.run()
def __save_clicked(self, button):
pal = Palette()
mask = self.editor.get_mask()
for i, shape in enumerate(mask):
for j, col in enumerate(shape):
col_name = "mask#%d primary#%d" % (i, j) # NOT localised
pal.append(col, col_name)
preview = HCYMaskPreview()
preview.set_size_request(128, 128)
target_mgr = self.target.get_color_manager()
prefs_ro = deepcopy(target_mgr.get_prefs())
datapath = target_mgr.get_data_path()
mgr = ColorManager(prefs=prefs_ro, datapath=datapath)
preview.set_color_manager(mgr)
preview.set_managed_color(self.editor.get_managed_color())
palette_save_via_dialog(
pal,
title = C_(
"HCY Gamut Mask load dialog: window title",
u"Save Mask as a GIMP Palette"
),
parent = self,
preview = preview,
)
def __load_clicked(self, button):
preview = HCYMaskPreview()
preview.set_size_request(128, 128)
target_mgr = self.target.get_color_manager()
prefs_ro = deepcopy(target_mgr.get_prefs())
datapath = target_mgr.get_data_path()
mgr = ColorManager(prefs=prefs_ro, datapath=datapath)
preview.set_color_manager(mgr)
preview.set_managed_color(self.editor.get_managed_color())
dialog_title = C_(
"HCY Gamut Mask load dialog: window title",
u"Load Mask from a GIMP Palette",
)
pal = palette_load_via_dialog(title=dialog_title, parent=self,
preview=preview)
if pal is None:
return
self.editor.set_mask_from_palette(pal)
def __clear_clicked(self, widget):
self.editor.set_mask([])
def __show_cb(self, widget, *a):
# When the dialog is shown, clone the target adjuster's mask for
# editing. Assume the user wants to turn on the mask if there
# is no mask on the target already (reduce the number of mouse clicks)
active = True
if self.target.get_mask():
active = self.target.mask_toggle.get_active()
self.mask_toggle_ctrl.set_active(active)
mask = deepcopy(self.target.get_mask())
self.editor.set_mask(mask)
# The wheel type may have changed elsewhere
editor_mgr = self.editor.get_color_manager()
wheel_type = self.target.get_color_manager().get_wheel_type()
editor_mgr.set_wheel_type(wheel_type)
# Clone the target's luma too,
# but not too bright, not too dark
col = HCYColor(color=self.target.get_managed_color())
self.editor.set_managed_color(col)
# Necessary for the content to be displayed
self.vbox.show_all()
def __response_cb(self, widget, response_id):
if response_id == gtk.RESPONSE_ACCEPT:
self.target.set_mask(self.editor.get_mask())
mask_active = self.mask_toggle_ctrl.get_active()
self.target.mask_toggle.set_active(mask_active)
self.hide()
class HCYAdjusterPage (CombinedAdjusterPage):
"""Combined HCY adjuster.
"""
def __init__(self):
y_adj = HCYLumaSlider()
y_adj.vertical = True
hc_adj = HCYHueChromaWheel()
table = gtk.Table(rows=2, columns=2)
xopts = gtk.FILL | gtk.EXPAND
yopts = gtk.FILL | gtk.EXPAND
table.attach(y_adj, 0, 1, 0, 1, gtk.FILL, yopts, 3, 3)
table.attach(hc_adj, 1, 2, 0, 2, xopts, yopts, 3, 3)
self.__y_adj = y_adj
self.__hc_adj = hc_adj
self.__table = table
self.__mask_dialog = None
@classmethod
def get_properties_description(cls):
return C_(
"HCY Wheel color adjuster page: properties tooltip.",
u"Set gamut mask.",
)
def show_properties(self):
if self.__mask_dialog is None:
toplevel = self.__hc_adj.get_toplevel()
dia = HCYMaskPropertiesDialog(toplevel, self.__hc_adj)
self.__mask_dialog = dia
self.__mask_dialog.run()
@classmethod
def get_page_icon_name(cls):
return 'mypaint-tool-hcywheel'
@classmethod
def get_page_title(cls):
return C_(
"HCY Wheel color adjuster page: title for tooltips etc.",
u"HCY Wheel",
)
@classmethod
def get_page_description(cls):
return C_(
"HCY Wheel color adjuster page: description for tooltips etc.",
u"Set the color using cylindrical hue/chroma/luma space. "
u"The circular slices are equiluminant.",
)
def get_page_widget(self):
frame = gtk.AspectFrame(obey_child=True)
frame.set_shadow_type(gtk.SHADOW_NONE)
frame.add(self.__table)
return frame
def set_color_manager(self, manager):
ColorAdjuster.set_color_manager(self, manager)
self.__y_adj.set_property("color-manager", manager)
self.__hc_adj.set_property("color-manager", manager)
if __name__ == '__main__':
import os
import sys
from adjbases import ColorManager
mgr = ColorManager(prefs={}, datapath='.')
mgr.set_color(HSVColor(0.0, 0.0, 0.55))
if len(sys.argv) > 1:
# Generate icons
wheel = HCYHueChromaWheel()
wheel.set_color_manager(mgr)
icon_name = HCYAdjusterPage.get_page_icon_name()
for dir_name in sys.argv[1:]:
wheel.save_icon_tree(dir_name, icon_name)
else:
# Interactive test
page = HCYAdjusterPage()
page.set_color_manager(mgr)
window = gtk.Window()
window.add(page.get_page_widget())
window.set_title(os.path.basename(sys.argv[0]))
window.set_border_width(6)
window.connect("destroy", lambda *a: gtk.main_quit())
window.show_all()
gtk.main()
|
dothiko/mypaint
|
gui/colors/hcywheel.py
|
Python
|
gpl-2.0
| 47,368
|
[
"FLEUR"
] |
8105aa69420574efbdd7a114d496c5500b6f4e38fc89b0555b73ed3656baa5d4
|
# coding: utf-8
# In[1]:
import numpy as np
from scipy.fftpack import fft #, ifft
import scipy.optimize as opt
from sys import exit
# Wave functions definitions
# -------------------------------------------------------------------------------------
# In[2]:
def gaussian(x,n,x0,s0,w,v): # Gaussian wf in K3
# x = grid points
# n = number of grid points
# x0 = Gaussian position
# s0 = switch for a node: =1 imprint node, =0 no node
# w = Gaussian width
# v = wavepacket speed
#
print('Initial state: Gaussian')
fx = np.exp(1j*v*x)*np.pi**0.25*np.exp(-0.5*((x-x0)/w)**2); # define the Gaussian in R3
if (s0):
fx *= x # Equivalent to a first excited state in a harmonic trap
return fft(fx)/n # FFT to K3
# In[ ]:
def thomas_fermi(x,n,x0,s0,gN,Ve): # Thomas-Fermi (with/out node) ansatz for harmonic oscillator in K3
# It assumes the harmonic oscillator frequency as 1,
# although this function should be updated for generic frequency and intended for generic potentials
# x = grid points
# n = number of grid points
# Ve = external-potential vector
# gN = interaction strength times number of particles
# x0 = node position
# s0 = switch for imprinting a (hyperbolic tangent) node: =1 imprint node, =0 no node
#
print('Initial state: Thomas Fermi (TF) ansatz')
R=(1.5*gN)**(1.0/3.0)
muTF=0.5*R**2
print(' TF chemical potential = ', muTF,', TF radius =',R)
fx = (muTF-Ve)/gN # fx is now the TF density (including the negative values)
for i in range(n):
if ( fx[i] > 0.0 ):
fx[i] =fx[i]**0.5
else:
fx[i] = 0.0
if ( s0 ): # imprint a node
fx *= np.tanh((x-x0)*(muTF-Ve)**0.5) # the local healing length is \hbar/(m (mu-V(x)))
return fft(fx)/n # FFT to K3
# In[1]:
def dark_soliton(x,n,x0,gn,v): # tanh(x/\xi) wf
# x = grid points
# n = number of grid points
# x0 = soliton position
# gn = (positive) interaction energy or chemical potential
# v = soliton speed
#
if (gn <= 0):
exit(" error: interaction must be positive...")
print('Initial state: dark soliton: hyperbolic tangent')
c = (gn)**0.5 # speed of sound
xi = 1.0/c
print(' healing length = ',xi)
b = v/c
print(' velocity / speed of sound = ',b )
d= (1-b**2)**0.5 # velocity dependent width
fx =1j*b+d*np.tanh(d*(x-x0)/xi) # (complex) wave function in R3
return fft(fx)/n # FFT to K3
# In[ ]:
def bright_soliton(x,n,x0,gn,v): # 1/cosh(x/\xi) wf
# x = grid points
# n = number of grid points
# x0 = soliton position
# gn = (negative) interaction energy or double of the chemical potential
# v = soliton speed
#
if (gn >= 0):
exit(" error: interaction must be negative...")
print('Initial state: bright soliton: hyperbolic secant')
xi = 1.0/(abs(gn))**0.5
print('healing length=',xi)
print(' velocity = ',v )
fx = np.exp(1j*v*x)/np.cosh((x-x0)/xi) # wave function in R3
return fft(fx)/n # FFT to K3
|
brunojulia/ultracoldUB
|
Wavepackdisper/wave_functions.py
|
Python
|
gpl-3.0
| 3,154
|
[
"Gaussian"
] |
cb546186c3f79223d8332bbe27cbedc82f7941e978394094e055f18f84eb961b
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2021, Shuup Commerce Inc. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import os
import pytest
import time
from django.contrib.auth.models import Group, Permission
from shuup.admin.module_registry import get_modules
from shuup.admin.utils.permissions import (
get_default_model_permissions,
get_permission_object_from_string,
set_permissions_for_group,
)
from shuup.core.models import Category, Product, Shop, ShopProduct
from shuup.testing.browser_utils import (
click_element,
initialize_admin_browser_test,
wait_until_appeared,
wait_until_condition,
)
from shuup.testing.factories import (
create_random_user,
get_default_product_type,
get_default_sales_unit,
get_default_shop,
get_default_tax_class,
)
from shuup.utils.django_compat import reverse
pytestmark = pytest.mark.skipif(os.environ.get("SHUUP_BROWSER_TESTS", "0") != "1", reason="No browser tests run.")
@pytest.mark.django_db
def test_quick_add(browser, admin_user, live_server, settings):
shop = get_default_shop()
get_default_product_type()
get_default_sales_unit()
get_default_tax_class()
initialize_admin_browser_test(browser, live_server, settings)
url = reverse("shuup_admin:shop_product.new")
browser.visit("%s%s" % (live_server, url))
sku = "testsku"
name = "Some product name"
price_value = 10
short_description = "short but gold"
browser.fill("base-sku", sku)
browser.fill("base-name__en", name)
browser.fill("base-short_description__en", short_description)
browser.fill("shop%s-default_price_value" % shop.pk, price_value)
wait_until_appeared(browser, "#id_shop%d-primary_category ~ .quick-add-btn a.btn" % shop.id)
click_element(browser, "#id_shop%d-primary_category ~ .quick-add-btn a.btn" % shop.id)
wait_until_appeared(browser, "#create-object-iframe")
with browser.get_iframe("create-object-iframe") as iframe:
assert Category.objects.count() == 0
wait_until_appeared(iframe, "input[name='base-name__en']")
iframe.fill("base-name__en", "Test Category")
time.sleep(3) # Let's just wait here to the iFrame to open fully (for Chrome and headless)
wait_until_appeared(iframe, "button[form='category_form']")
click_element(browser, "button[form='category_form']")
wait_until_condition(browser, condition=lambda x: Category.objects.count() == 1, timeout=20)
assert Category.objects.first().name == "Test Category"
# click to edit the button
click_element(browser, "#id_shop%d-primary_category ~ .edit-object-btn a.btn" % shop.id)
with browser.get_iframe("create-object-iframe") as iframe:
wait_until_appeared(iframe, "input[name='base-name__en']")
new_cat_name = "Changed Name"
iframe.fill("base-name__en", new_cat_name)
time.sleep(3) # Let's just wait here to the iFrame to open fully (for Chrome and headless)
wait_until_appeared(iframe, "button[form='category_form']")
click_element(iframe, "button[form='category_form']")
wait_until_condition(browser, condition=lambda x: Category.objects.first().name == new_cat_name, timeout=20)
click_element(browser, "button[form='product_form']")
wait_until_appeared(browser, "div[class='message success']")
@pytest.mark.django_db
def test_edit_button_no_permission(browser, admin_user, live_server, settings):
shop = get_default_shop()
manager_group = Group.objects.create(name="Managers")
manager = create_random_user("en", is_staff=True)
manager.username = "manager"
manager.set_password("password")
manager.save()
manager.groups.add(manager_group)
shop.staff_members.add(manager)
# add permissions for Product admin module
manager_permissions = set(["dashboard", "Products", "shop_product.new"])
set_permissions_for_group(manager_group, manager_permissions)
get_default_product_type()
get_default_sales_unit()
get_default_tax_class()
initialize_admin_browser_test(browser, live_server, settings, username=manager.username)
url = reverse("shuup_admin:shop_product.new")
browser.visit("%s%s" % (live_server, url))
sku = "testsku"
name = "Some product name"
price_value = 10
short_description = "short but gold"
browser.fill("base-sku", sku)
browser.fill("base-name__en", name)
browser.fill("base-short_description__en", short_description)
browser.fill("shop%s-default_price_value" % shop.pk, price_value)
wait_until_appeared(browser, "#id_shop%d-primary_category ~ .quick-add-btn a.btn" % shop.id)
click_element(browser, "#id_shop%d-primary_category ~ .quick-add-btn a.btn" % shop.id)
wait_until_appeared(browser, "#create-object-iframe")
# no permission to add category
with browser.get_iframe("create-object-iframe") as iframe:
error = "Can't view this page. You do not have the required permissions: category.new"
wait_until_condition(iframe, condition=lambda x: x.is_text_present(error))
# close iframe
click_element(browser, "#create-object-overlay a.close-btn")
# add permission to add category
manager_permissions.add("category.new")
manager_permissions.add("category.edit")
set_permissions_for_group(manager_group, manager_permissions)
# click to add category again
click_element(browser, "#id_shop%d-primary_category ~ .quick-add-btn a.btn" % shop.id)
wait_until_appeared(browser, "#create-object-iframe")
# add the category
with browser.get_iframe("create-object-iframe") as iframe:
assert Category.objects.count() == 0
wait_until_appeared(iframe, "input[name='base-name__en']")
iframe.fill("base-name__en", "Test Category")
time.sleep(3) # Let's just wait here to the iFrame to open fully (for Chrome and headless)
wait_until_appeared(iframe, "button[form='category_form']")
click_element(browser, "button[form='category_form']")
wait_until_condition(browser, condition=lambda x: Category.objects.count() == 1, timeout=20)
assert Category.objects.first().name == "Test Category"
# remove the edit category permissions
# add permission to add category
manager_permissions.remove("category.edit")
set_permissions_for_group(manager_group, manager_permissions)
# click to edit the button
click_element(browser, "#id_shop%d-primary_category ~ .edit-object-btn a.btn" % shop.id)
# no permission to edit category
with browser.get_iframe("create-object-iframe") as iframe:
error = "Can't view this page. You do not have the required permission(s): `category.edit`."
wait_until_condition(iframe, condition=lambda x: x.is_text_present(error))
# close iframe
click_element(browser, "#create-object-overlay a.close-btn")
manager_permissions.add("category.edit")
set_permissions_for_group(manager_group, manager_permissions)
click_element(browser, "#id_shop%d-primary_category ~ .edit-object-btn a.btn" % shop.id)
wait_until_appeared(browser, "#create-object-iframe")
new_cat_name = "Changed Name"
with browser.get_iframe("create-object-iframe") as iframe:
wait_until_appeared(iframe, "input[name='base-name__en']")
iframe.fill("base-name__en", new_cat_name)
time.sleep(3) # Let's just wait here to the iFrame to open fully (for Chrome and headless)
wait_until_appeared(iframe, "button[form='category_form']")
click_element(browser, "button[form='category_form']")
wait_until_condition(browser, condition=lambda x: Category.objects.first().name == new_cat_name, timeout=20)
|
shoopio/shoop
|
shuup_tests/browser/admin/test_quick_add_edit_button.py
|
Python
|
agpl-3.0
| 7,834
|
[
"VisIt"
] |
348a6bd86b680a25490db66f0a9baf102a091cd86831142e529cfe2dea482cd5
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Image pipeline
reader = vtk.vtkImageReader()
reader.ReleaseDataFlagOff()
reader.SetDataByteOrderToLittleEndian()
reader.SetDataExtent(0,63,0,63,1,93)
reader.SetDataSpacing(3.2,3.2,1.5)
reader.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter")
reader.SetDataMask(0x7fff)
var = vtk.vtkImageVariance3D()
var.SetInputConnection(reader.GetOutputPort())
var.SetKernelSize(3,3,1)
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(var.GetOutputPort())
viewer.SetZSlice(22)
viewer.SetColorWindow(3000)
viewer.SetColorLevel(1000)
#viewer DebugOn
viewer.Render()
# --- end of script --
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Imaging/Core/Testing/Python/TestVariance3D.py
|
Python
|
bsd-3-clause
| 758
|
[
"VTK"
] |
11ba4fb3b87dbe477930d8a5bd0782f90f8668fe84592eb1ce35c7d94496c560
|
#!/usr/bin/env python3
# ver 0.1 - make codes on 3/29/2018
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='calculation properties (scalar) distribution using com')
## args
parser.add_argument('-icell', '--icell', default='unit_cell.npy', nargs='?',
help='input unit cell dimension file')
parser.add_argument('-icom', '--icom', default='pol.com.npy', nargs='?',
help='input COM file')
parser.add_argument('-iprop', '--iprop', default='pol.ree.npy', nargs='?',
help='input property (scalar) file')
parser.add_argument('-nmol', '--nmol', nargs='?', type=int,
help='# molecules')
parser.add_argument('-axis', '--axis', default=2, nargs='?', type=int,
help='axis for distribution')
parser.add_argument('-nbin', '--nbin', nargs='?', type=int,
help='#bins for distribution on a given axis (should be matched with nbins when convolution alignment did)')
parser.add_argument('-o', '--output', default='pol.ree', nargs='?',
help='output prefix filename for property (.1d)')
parser.add_argument('args', nargs=argparse.REMAINDER)
parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.1')
## read args
args = parser.parse_args()
## Check arguments for log
print(" input arguments: {0}".format(args))
## import modules
import sys
sys.path.append('/home/htjung/Utility/python/')
import hjung
from hjung import *
import numpy as np
from numpy import linalg as LA
import MDAnalysis as mda
from MDAnalysis.analysis import distances
from scipy.spatial.distance import euclidean
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as pyplot
## timer
start_proc, start_prof = hjung.time.init()
args.output = args.output + '.1d'
ocom = args.icom.replace('.npy','')+'.1d'
## read files
unitcell = hjung.io.read_simple(args.icell,0,-1)
n_frames = len(unitcell)
com = hjung.io.read_simple(args.icom,0,-1)
if n_frames != int(len(com)/args.nmol/3):
raise ValueError("may be wrong n_frames of com data")
else:
com = com.reshape(n_frames,args.nmol,3)
prop_scalar = hjung.io.read_simple(args.iprop,0,-1)
if n_frames != len(prop_scalar):
raise ValueError("may be wrong n_frames of prop_scalar data")
else:
prop_scalar = prop_scalar.reshape(-1,args.nmol)
# calc. com histograms
unit_cells_1d = unitcell[:,args.axis]
com_1d = com[:,:,args.axis]
com_hist_1d_t, bin_1d_t = hjung.analyze.histo_t_1d_nbin(com_1d, unit_cells_1d, args.nbin)
np.savetxt(ocom, com_hist_1d_t,
header='com distribution with {} frames and {} bins'.format(n_frames,args.nbin) , fmt='%f', comments='# ')
prop_histo_1d_t = hjung.analyze.histo_xy_t_1d_wbin(com_1d, prop_scalar, bin_1d_t)
# save raw rg data file
np.save(ocom, com_hist_1d_t)
print(" saved com hist files")
np.savetxt(args.output, prop_histo_1d_t,
header='property distribution with {} frames and {} bins'.format(n_frames,args.nbin), fmt='%f', comments='# ')
np.save(args.output, prop_histo_1d_t)
print(" saved prop_histo_1d_t files")
## timer
hjung.time.end_print(start_proc, start_prof)
|
jht0664/Utility_python_gromacs
|
python/com_scalar_1d.py
|
Python
|
mit
| 3,114
|
[
"MDAnalysis"
] |
678c2662b680df1ad8c5deac6cb3d7843de4f7b526f828544d7e304791b53de6
|
"""
PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)
Copyright (C) 2004-2020 B.G. Olivier, J.M. Rohwer, J.-H.S Hofmeyr all rights reserved,
Brett G. Olivier (bgoli@users.sourceforge.net)
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa.
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Brett G. Olivier
"""
from __future__ import division, print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from pysces.version import __version__
__doc__ = '''network and internet oriented utilities'''
from time import strftime
from getpass import getuser
class PyscesHTML:
"""PySCeS HTML formatting class: contains some basic html elements that can be used in generated reports."""
__version__ = __version__
def HTML_header(self, File):
"""
HTML_header(File)
Write an HTML page header to file (use with HTML_footer)
Arguments:
=========
File: an open, writable Python file object
"""
header = '\n'
header += '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\n'
header += '<html>\n'
header += '<head>\n'
header += (
'<title>PySCeS data generated at '
+ strftime("%H:%M:%S (%Z)")
+ '</title>\n'
)
header += (
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">\n'
)
header += '</head>\n'
header += '<body>\n\n'
header += '<h4><a href="http://pysces.sourceforge.net">PySCeS</a></h4>\n\n'
File.write(header)
File.write('<!-- PySCeS data generated at ' + strftime("%H:%M:%S") + '-->\n\n')
return File
def HTML_footer(self, File):
"""
HTML_footer(File)
Write an HTML page footer to file (use with HTML_header)
Arguments:
=========
File: an open, writable Python file object
"""
File.write(
'\n<p><a href="http://pysces.sourceforge.net"><font size="3">PySCeS '
+ __version__
+ '</font></a><font size="2"> output\n generated at '
+ strftime("%H:%M:%S")
+ ' by <i>'
)
try:
File.write(getuser())
except:
File.write('PySCeS')
File.write('</i>)</font></p>\n')
File.write('</body>\n')
File.write('</html>\n\n')
return File
def par(self, str, File=None, align='l', txtout=0):
"""
par(str,File=None,align='l',txtout=0)
Format <par> text and write it to a file (or string)
Arguments:
=========
str: the string of text to be written
File [default=None]: an open, writable, Python file object
align [default='l']: HTML alignment attribute ('l','c','r')
txtout [default=0]: do not write to file (1) return formatted HTML string
"""
if not txtout:
assert type(File) == file, 'The 2nd argument needs to be an open file'
if align == 'l':
align = 'left'
elif align == 'r':
align == 'right'
elif align == 'c':
align = 'center'
else:
align = ''
strout = '\n<p align="' + align + '">'
cntr = 0
max_str_len = 75
seeker_active = 0
for x in range(len(str)):
cntr += 1
strout += str[x]
if seeker_active:
if str[x] == ' ' or str[x] == '.' or str[x] == ',':
cntr = max_str_len
seeker_active = 0
if cntr >= max_str_len:
if str[x] == ' ' or str[x] == '.' or str[x] == ',':
strout += '\n '
else:
seeker_active = 1
cntr = 0
strout += '\n</p>\n'
if txtout:
return strout
else:
File.write(strout)
del str
del strout
def h1(self, str, File=None, align='l', txtout=0):
"""
h1(str,File=None,align='l',txtout=0)
Format <h1> text and write it to a file (or string)
Arguments:
=========
str: the string of text to be written
File [default=None]: an open, writable, Python file object
align [default='l']: HTML alignment attribute ('l','c','r')
txtout [default=0]: do not write to file (1) return formatted HTML string
"""
if not txtout:
assert type(File) == file, 'The 2nd argument needs to be an open file'
if align == 'l':
align = 'left'
elif align == 'r':
align == 'right'
elif align == 'c':
align = 'center'
else:
align = ''
strout = '\n<h1 align="' + align + '">'
cntr = 0
max_str_len = 75
seeker_active = 0
for x in range(len(str)):
cntr += 1
strout += str[x]
if seeker_active:
if str[x] == ' ' or str[x] == '.' or str[x] == ',':
cntr = max_str_len
seeker_active = 0
if cntr >= max_str_len:
print(str[x])
if str[x] == ' ' or str[x] == '.' or str[x] == ',':
strout += '\n '
else:
seeker_active = 1
cntr = 0
strout += '\n</h1>\n'
if txtout:
return strout
else:
File.write(strout)
del str
del strout
def h2(self, str, File=None, align='l', txtout=0):
"""
h2(str,File=None,align='l',txtout=0)
Format <h2> text and write it to a file (or string)
Arguments:
=========
str: the string of text to be written
File [default=None]: an open, writable, Python file object
align [default='l']: HTML alignment attribute ('l','c','r')
txtout [default=0]: do not write to file (1) return formatted HTML string
"""
if not txtout:
assert type(File) == file, 'The 2nd argument needs to be an open file'
if align == 'l':
align = 'left'
elif align == 'r':
align == 'right'
elif align == 'c':
align = 'center'
else:
align = ''
strout = '\n<h2 align="' + align + '">'
cntr = 0
max_str_len = 75
seeker_active = 0
for x in range(len(str)):
cntr += 1
strout += str[x]
if seeker_active:
if str[x] == ' ' or str[x] == '.' or str[x] == ',':
cntr = max_str_len
seeker_active = 0
if cntr >= max_str_len:
print(str[x])
if str[x] == ' ' or str[x] == '.' or str[x] == ',':
strout += '\n '
else:
seeker_active = 1
cntr = 0
strout += '\n</h2>\n'
if txtout:
return strout
else:
File.write(strout)
del str
del strout
def h3(self, str, File=None, align='l', txtout=0):
"""
h3(str,File=None,align='l',txtout=0)
Format <h3> text and write it to a file (or string)
Arguments:
=========
str: the string of text to be written
File [default=None]: an open, writable, Python file object
align [default='l']: HTML alignment attribute ('l','c','r')
txtout [default=0]: do not write to file (1) return formatted HTML string
"""
if not txtout:
assert type(File) == file, 'The 2nd argument needs to be an open file'
if align == 'l':
align = 'left'
elif align == 'r':
align == 'right'
elif align == 'c':
align = 'center'
else:
align = ''
strout = '\n<h3 align="' + align + '">'
cntr = 0
max_str_len = 75
seeker_active = 0
for x in range(len(str)):
cntr += 1
strout += str[x]
if seeker_active:
if str[x] == ' ' or str[x] == '.' or str[x] == ',':
cntr = max_str_len
seeker_active = 0
if cntr >= max_str_len:
print(str[x])
if str[x] == ' ' or str[x] == '.' or str[x] == ',':
strout += '\n'
else:
seeker_active = 1
cntr = 0
strout += '\n</h3>\n'
if txtout:
return strout
else:
File.write(strout)
del str
del strout
import email
import email.utils
import mimetypes
import smtplib
from email.mime.text import MIMEText
from time import sleep, strftime
from getpass import getuser
import os
class PyscesSMTP:
"""A purely experimental class that extends PySCeS with SMTP mailer capabilities. Initialise with
sender address and local mail server name."""
__smtp_active = 0
def __init__(self, fromadd, server):
self.server = server
try:
self.userstr = getuser()
except:
self.userstr = 'PySCeS '
self.msgintro = ''
self.fromhead = self.userstr + ' <' + fromadd + '>'
self.signature = (
3 * '\n'
+ '---\nSent using PySCeS 0.2.2 (http://pysces.sourceforge.net/)\n '
)
# auto-open connection now closed
# self.SMTPOpen()
def GenericMail(self, toadd, msgtxt, subj='PySCeS generated email'):
"""
GenericMail( toadd, msgtxt, subj='PySCeS generated email')
Generate and send a text (non-mime) email message
Arguments:
=========
toadd: recipient address
msgtxt: the message body as a string
subj [default='PySCeS generated email']: message subject line
"""
assert type(msgtxt) == str, '\nMessage text must be a string'
assert self.__smtp_active, 'SMTP Server not active\n'
msgtxt = self.msgintro + msgtxt
msgtxt += self.signature
outer = MIMEText(msgtxt)
outer['Subject'] = subj
outer['To'] = toadd
outer['From'] = self.fromhead
outer['Date'] = email.Utils.formatdate(localtime='true')
outer.epilogue = ' '
if self.CheckGo():
try:
self.__SMTPserver.sendmail(self.fromhead, toadd, outer.as_string())
except SMTPServerDisconnected as e:
print(e)
self.SMTPOpen()
self.__SMTPserver.sendmail(self.fromhead, toadd, outer.as_string())
sleep(0.2)
else:
print('\nEmail send aborted')
def CheckGo(self):
"""
CheckGo()
Do you want to continue yes or no?
Returns 1 or 0
Arguments:
None
"""
GO = 1
while GO:
resp = input('\nDo you want to continue (yes/no): ')
if resp.lower() == 'yes':
print('OK.')
GO = 0
return 1
elif resp.lower() == 'no':
print('Skipped.')
GO = 0
return 0
else:
print('\nyes to continue, no to exit')
## def GenericMailHTML(self, toadd, msgtxt, htmltxt, subj='PySCeS generated email'):
## """
## GenericMailHTML( toadd, msgtxt, htmltxt, subj='PySCeS generated email')
##
## Generate a mime-compliant HTML email message
##
## Arguments:
## =========
## toadd: recipient address
## msgtxt: text only message string
## htmltxt: html formatted message string
## subj [default='PySCeS generated email']: the subject line
##
## """
## assert type(msgtxt) == str, '\nMessage text must be a string'
## assert self.__smtp_active, 'SMTP Server not active\n'
## # Create the enclosing (outer) message
## outer = email.MIMEMultipart.MIMEMultipart()
## outer['Subject'] = subj
## outer['To'] = toadd
## outer['From'] = self.fromhead
## outer['Date'] = email.Utils.formatdate(localtime='true')
## outer.preamble = ' \n'
## outer.epilogue = '\n---\nGenerated by PySCeS 0.2.2\n '
##
## msgtxt += self.signature
## msg = email.MIMEText.MIMEText(msgtxt)
## msg.add_header('Content-Disposition', 'inline')
## outer.attach(msg)
##
## self.__SMTPserver.sendmail(self.fromhead,toadd,outer.as_string())
##
## ctype='text/plain'
## maintype, subtype = ctype.split('/', 1)
## fp = open(infile, 'r')
## att = email.MIMEBase.MIMEBase(maintype, subtype)
## att.set_payload(fp.read())
## fp.close()
## # Encode the payload using Base64
## #email.Encoders.encode_base64(att)
## # Set the filename parameter
## att.add_header('Content-Disposition', 'attachment', filename=infile)
## outer.attach(att)
##
## SMTPserver.sendmail(fromhead,toadd,outer.as_string())
##
## sleep(0.2) #seconds
def SMTPOpen(self):
"""
SMTPOpen()
Start client and connect to an SMTP server
Arguments:
None
"""
self.__SMTPserver = smtplib.SMTP(self.server)
self.__smtp_active = 1
print('\nSMTP server connection opened\n')
def SMTPClose(self):
"""
SMTPClose()
Close connection to SMTP server
Arguments:
None
"""
self.__SMTPserver.close()
self.__smtp_active = 0
print('\nSMTP server connection closed\n')
if __name__ == '__main__':
replyTo = 'bgoli@sun.ac.za'
server = 'mail.sun.ac.za'
print('Reply to:', replyTo)
print('SMTP server:', server)
smtp = PyscesSMTP(replyTo, server)
smtp.GenericMail(
'bgoli@sun.ac.za',
'This test message created: ' + strftime("%a, %d %b %Y %H:%M:%S"),
)
# smtp.GenericMail('jr@sun.ac.za','This test message created: '+ strftime("%a, %d %b %Y %H:%M:%S"))
smtp.SMTPClose()
|
bgoli/pysces
|
pysces/PyscesWeb.py
|
Python
|
bsd-3-clause
| 14,492
|
[
"PySCeS"
] |
aa2cd2cd95a245d81f9ac0b79d2b2e56c49f751576e04e559e39cc63d50b90a5
|
#!/bin/env python
# -*- coding: utf-8 -*-
from Sire.IO import *
from Sire.System import *
from Sire.Mol import *
from Sire.MM import *
from Sire.FF import *
from Sire.CAS import *
from Sire.Maths import *
from Sire.Analysis import *
from Sire.System import *
from Sire.Base import *
from Sire.Units import *
import Sire.Config
import Sire.Stream
from Sire.Tools.AmberLoader import *
from Sire.Tools import Parameter, resolveParameters
import os
import shutil
import copy
wsrc_tools_dir = "%s/Tools/WSRC" % Sire.Config.share_directory
####################################################
# ALL OF THE GLOBAL USER-AVAILABLE WSRC PARAMETERS #
####################################################
cutoff_method = Parameter("cutoff method", "shift electrostatics",
"""Method used to apply the non-bonded electrostatic cutoff.""")
rf_dielectric = Parameter("reaction field dielectric", 78.3,
"""Dielectric constant to use if the reaction field cutoff method is used.""")
coul_cutoff = Parameter("coulomb cutoff", 15*angstrom,
"""Coulomb cutoff length""")
lj_cutoff = Parameter("LJ cutoff", 15*angstrom,
"""Lennard Jones cutoff length""")
grid_spacing = Parameter("grid spacing", 1.0*angstrom,
"""Grid spacing used for the grid-based forcefields""")
grid_buffer = Parameter("grid buffer", 2*angstrom,
"""Buffer around the grid used to prevent recalculation
in the grid-based forcefields.""")
disable_grid = Parameter("disable grid", False, """Whether or not to disable use of the grid""")
fast_sim = Parameter("fast simulation", False,
"""Switch on options that simplify the calculation so that it is significantly
sped up. The calculation has much lower accuracy, but could be useful
as a way to quickly get residue values, or to equilibrate a longer calculation""")
use_oldff = Parameter("use old forcefields", False, """For debugging, use the old forcefields rather than the
new forcefields""")
temperature = Parameter("temperature", 25*celsius, """Simulation temperature""")
random_seed = Parameter("random seed", None, """Random number seed. Set this if you
want to have reproducible simulations.""")
identity_atoms = Parameter("identity atoms", None,
"""The list of atom names in the ligand on which to place
identity points. If this is not set, then the identity atoms
will be generated automatically.""")
use_fixed_points = Parameter("fixed points", False,
"""Whether or not to use fixed identity points based on looking at
the overlap with the atoms""")
use_water_points = Parameter("water points", False,
"""Whether or not to move the identity points to the oxygens of
the swap water molecules, and so keep them fixed in space during
the simulation""")
use_fixed_ligand = Parameter("fixed ligand", False,
"""Whether or not to completely fix the ligand during the simulation.""")
use_rot_trans_ligand = Parameter("ligand rot-trans", True,
"""Whether or not the ligand is free to translate and rotate.""")
use_reflect_volume = Parameter("reflect volume", False,
"""Use the reflection volume instead of the identity constraint to hold
the swap water cluster in place.""")
reflect_volume_radius = Parameter("reflect volume radius", 1.75*angstrom,
"""The radius of the reflection volume, used only if the reflection volume
is used to hold the swap water cluster in place.""")
reflect_volume_buffer = Parameter("reflect volume buffer", 0*angstrom,
"""The buffer beyond the reflection volume radius that is used when selecting
the water molecules that will be swapped. Swapped water molecules will be those
that are within 'reflect volume radius + reflect volume buffer' of any of
the heavy atoms of the swapped ligand.""")
n_equil_swap = Parameter("swap water nequilmoves", 5000,
"""The number of moves to equilibrate the swap water cluster before applying
the identity or reflection volume constraint.""")
alpha_scale = Parameter("alpha_scale", 1.0,
"""Amount by which to scale the alpha parameter. The lower the value,
the less softening with lambda, while the higher the value, the
more softening""")
delta_lambda = Parameter("delta_lambda", 0.001,
"""Value of delta lambda used in the finite difference thermodynamic
integration algorithm used to calculate the free energy""")
water_monitor_distance = Parameter("water monitor distance", 5.0*angstrom,
"""The distance up to which the free energy of water molecules
interacting with the ligand should be recorded.""")
waterbox_only = Parameter("waterbox only", False,
"""Whether or not to select water molecules only from the water box.""")
nrgmon_frequency = Parameter("energy monitor frequency", 1000,
"""The number of steps between each evaluation of the energy monitors.""")
save_all_nrgmons = Parameter("save energy monitors", False,
"""When debugging, you may want to switch on the saving of energy
monitors. Normally you shouldn't need to save these.""")
lambda_values = Parameter("lambda values", [ 0.005, 0.071, 0.137, 0.203, 0.269, 0.335, 0.401, 0.467, 0.533, 0.599, 0.665, 0.731, 0.797, 0.863, 0.929, 0.995 ],
"""The values of lambda to use in the RETI free energy simulation. Note that it is not a good idea
to use lambda values 0 or 1 as this will introduce discontinuities into the PMF.""")
nsubmoves = Parameter("nsubmoves", 50000,
"""The number of moves to perform between each RETI move.""")
ligand_name = Parameter("ligand name", None,
"""The name of the ligand. This should be the name of one of the residues
in the ligand, so that this program can find the correct molecule. If it is not set, then
the first non-protein, non-solvent molecule is used.""")
reflection_radius = Parameter("reflection radius", 15*angstrom,
"""The radius of the reflection sphere""")
ligand_reflection_radius = Parameter("ligand reflection radius", 1*angstrom,
"""The reflection radius of the ligand. This is used to constrain the ligand
to remain in the active site. This is needed to define the accessible volume
of the bound state.""")
protein_topfile = Parameter("protein topfile", "proteinbox.top",
"""Name of the topology file containing the solvated protein-ligand complex.""")
protein_crdfile = Parameter("protein crdfile", "proteinbox.crd",
"""Name of the coordinate file containing the coordinates of the
solvated protein-ligand complex.""")
protein_s3file = Parameter("protein s3file", "proteinbox.s3",
"""Name to use for the intermediate s3 file that will contain the
solvated protein-ligand complex after it has been loaded from the top/crd files.""")
water_topfile = Parameter("water topfile", "%s/waterbox.top" % wsrc_tools_dir,
"""Name of the topology file containing the water box.""")
water_crdfile = Parameter("water crdfile", "%s/waterbox.crd" % wsrc_tools_dir,
"""Name of the coordinate file containing the coordinates of the water box.""")
water_s3file = Parameter("water s3file", "waterbox.s3",
"""Name to use for the intermediate s3 file that will contain the
water box after it has been loaded from the top/crd files.""")
outdir = Parameter("output directory", "output",
"""Name of the directory in which to place all of the output files.""")
restart_file = Parameter("restart file", "wsrc_restart.s3",
"""Name of the restart file to use to save progress during the simulation.""")
sysmoves_file = Parameter("sysmoves file", "wsrc_sysmoves.s3",
"""Name of the file to save the initial WSRC pre-simulation system.""")
nequilmoves = Parameter("nequilmoves", 50000,
"""Number of equilibration moves to perform before setting up the free energy simulation.""")
nmoves = Parameter("nmoves", 1000, """Number of RETI moves to perform during the simulation.""")
move_backbone = Parameter("move backbone", True,
"""Whether or not to move the protein backbone.""")
coul_power = Parameter("coulomb power", 0,
"""The soft-core coulomb power parameter""")
shift_delta = Parameter("shift delta", 1.2,
"""The soft-core shift delta parameter""")
soften_water = Parameter("soften water", 1.1,
"""The amount by which to scale the water-water electrostatic interactions in
the swap-water cluster between lambda=0 and lambda=1. This helps keep the cluster
together as it is swapped between the two boxes.""")
lj_buffer = Parameter("LJ buffer", 0.005,
"""To prevent end-point effects, the scale factor for the LJ interactions cannot fully
move between 0 and 1, as configurations sampled at 0 or 1 will be invalid for other states.
To overcome this problem, the LJ lambda scale is moved from "LJ buffer" to "1 - LJ buffer",
e.g. for the default buffer of 0.001, the LJ lambda scale is from 0.001 to 0.999.""")
uncharge_ligand = Parameter("uncharge ligand", False,
"""Whether or not to uncharge the ligand (and swap water cluster) before
swapping them. They are then recharged at the end of the swap.""")
uncharge_ligand_max = Parameter("uncharge ligand max", 0.5,
"""The maximum amount to uncharge the ligand (and swap water cluster) before
swapping them. A value of 1.0 will not uncharge them at all, while a value of
0.0 will uncharge them completely. The default value is 0.5, which will 50%
uncharge the ligand and swap water cluster before swapping.""")
uncharge_lambda_values = Parameter("uncharge lambda values", [0.0, 0.1, 0.25, 0.45, 0.55, 0.75, 0.9, 1.0],
"""Lambda values to use when uncharging (and then recharging) the ligand. These will be
added onto the swapped lambda values to give a new range squashed between 0 and 1.""")
save_pdb = Parameter("save pdb", True,
"""Whether or not to write a PDB of the system after each iteration.""")
save_all_pdbs = Parameter("save all pdbs", False,
"""Whether or not to write all of the PDBs. If not, only PDBs at the two
end points of the simulation will be written.""")
pdb_frequency = Parameter("pdb frequency", 100,
"""The frequency (number of iterations between) saving PDBs""")
binwidth = Parameter("free energy bin width", 1 * kcal_per_mol,
"""The size of the bins used in the histogram of energies collected
as part of creating the free energy average, in multiples of delta lambda""")
restart_frequency = Parameter("restart frequency", 10,
"""The frequency (number of iterations between) saving the restart file for the simulation.""")
####################################################
def getOverlapWaters(ligand, waters, radius=2*angstrom):
overlaps = []
space = Cartesian()
# get the coordinates of all heavy atoms
coords = []
for atom in ligand.atoms():
try:
if atom.property("element").nProtons() >= 6:
coords.append( atom.property("coordinates") )
except:
if atom.property("mass").value() >= 12:
coords.append( atom.property("coordinates") )
coords = CoordGroup(coords)
for molnum in waters.molNums():
water = waters[molnum][0].molecule()
oxygen = None
for atom in water.atoms():
if atom.property("element").nProtons() == 8:
oxygen = atom
break
if oxygen is None:
oxygen = water.atoms()[0]
mindist = space.minimumDistance( CoordGroup([oxygen.property("coordinates")]), coords)
if mindist < radius.value():
overlaps.append(oxygen.molecule())
return overlaps
def getIdentityPoints(ligand):
atoms = ligand.atoms()
have_point = {}
for atom in atoms:
# skip small atoms
try:
if atom.property("element").nProtons() >= 6:
have_point[str(atom.name().value())] = True
else:
have_point[str(atom.name().value())] = False
except:
try:
if atom.property("mass").value() >= 12:
have_point[str(atom.name().value())] = True
else:
have_point[str(atom.name().value())] = False
except:
print("Atom %s has neither a mass or element. Cannot add an identity point." % str(atom))
have_point[str(atom.name().value())] = False
if ligand.hasProperty("connectivity"):
connectivity = ligand.property("connectivity")
else:
connectivity = Connectivity(ligand)
have_point_keys = list(have_point.keys())
have_point_keys.sort()
for key in list(have_point_keys):
if have_point[key]:
# if this is bonded to 3+ atoms that also have
# identity points, then get rid of this point
atom = ligand.atom( AtomName(key) )
bonded = connectivity.connectionsTo(atom.name())
if len(bonded) >=3:
n = 0
for b in bonded:
if have_point[ str(ligand.atom(b).name().value()) ]:
n += 1
if n >= 3:
print("Skipping %s as it is bonded to 3 or more points..." % atom.name())
have_point[key] = False
identity_points = []
k2 = []
# skip every 8th point
iskip = 0
have_point_keys = list(have_point.keys())
have_point_keys.sort()
for key in list(have_point_keys):
if have_point[key]:
iskip += 1
if iskip == 8:
iskip = 0
else:
k2.append(key)
identity_points.append( ligand.atom( AtomName(key) ) )
k2.sort()
print("Using %d identity points: %s" % (len(k2), str(k2)))
return identity_points
def getMinimumDistance(mol0, mol1):
space = Cartesian()
return space.minimumDistance(CoordGroup(mol0.molecule().property("coordinates").array()), \
CoordGroup(mol1.molecule().property("coordinates").array()))
def setCLJProperties(forcefield):
if cutoff_method.val.find("shift electrostatics") != -1:
forcefield.setShiftElectrostatics(True)
elif cutoff_method.val.find("reaction field") != -1:
forcefield.setUseReactionField(True)
forcefield.setReactionFieldDielectric(rf_dielectric.val)
else:
print("Cannot interpret the cutoff method from \"%s\"" % cutoff_method.val, file=sys.stderr)
forcefield.setSpace(Cartesian())
if fast_sim.val:
forcefield.setSwitchingFunction( HarmonicSwitchingFunction(7.5 * angstrom, 7.5 * angstrom,
7.5 * angstrom, 7.5 * angstrom) )
else:
forcefield.setSwitchingFunction( HarmonicSwitchingFunction(coul_cutoff.val,coul_cutoff.val,
lj_cutoff.val,lj_cutoff.val) )
return forcefield
def setFakeGridProperties(forcefield):
if fast_sim.val:
forcefield.setSwitchingFunction( HarmonicSwitchingFunction(7.5*angstrom, 7.5*angstrom,
7.5*angstrom, 7.5*angstrom) )
else:
forcefield.setSwitchingFunction( HarmonicSwitchingFunction(coul_cutoff.val,coul_cutoff.val,
lj_cutoff.val,lj_cutoff.val) )
forcefield.setSpace(Cartesian())
return forcefield
def setGridProperties(forcefield, extra_buffer=0*angstrom):
forcefield.setGridSpacing(grid_spacing.val)
forcefield.setBuffer(grid_buffer.val + extra_buffer)
if fast_sim.val:
forcefield.setLJCutoff(7.5*angstrom)
forcefield.setCoulombCutoff(7.5*angstrom)
else:
forcefield.setLJCutoff(lj_cutoff.val)
forcefield.setCoulombCutoff(coul_cutoff.val)
return forcefield
def setSoftCoreProperties(forcefield):
forcefield.setCoulombPower(coul_power.val)
forcefield.setShiftDelta(shift_delta.val)
return forcefield
def setCLJFuncProperties(cljfunc):
cljfunc.setSpace(Cartesian())
if fast_sim.val:
cljfunc.setCoulombCutoff(7.5*angstrom)
cljfunc.setLJCutoff(7.5*angstrom)
else:
cljfunc.setCoulombCutoff(coul_cutoff.val)
cljfunc.setLJCutoff(lj_cutoff.val)
cljfunc.setArithmeticCombiningRules( True )
return cljfunc
def getInterCLJFunction():
if cutoff_method.val.find("shift electrostatics") != -1:
cljfunc = CLJShiftFunction()
elif cutoff_method.val.find("reaction field") != -1:
cljfunc = CLJRFFunction()
cljfunc.setDielectric(rf_dielectric.val)
else:
print("Cannot interpret the cutoff method from \"%s\"" % cutoff_method.val, file=sys.stderr)
return setCLJFuncProperties(cljfunc)
def getSoftInterCLJFunction():
if cutoff_method.val.find("shift electrostatics") != -1:
cljfunc = CLJSoftShiftFunction()
elif cutoff_method.val.find("reaction field") != -1:
cljfunc = CLJSoftRFFunction()
cljfunc.setDielectric(rf_dielectric.val)
else:
print("Cannot interpret the cutoff method from \"%s\"" % cutoff_method.val, file=sys.stderr)
cljfunc.setAlpha(0.0)
cljfunc.setShiftDelta(shift_delta.val)
cljfunc.setCoulombPower(coul_power.val)
return setCLJFuncProperties(cljfunc)
def getIntraCLJFunction():
if cutoff_method.val.find("shift electrostatics") != -1:
cljfunc = CLJIntraShiftFunction()
elif cutoff_method.val.find("reaction field") != -1:
cljfunc = CLJIntraRFFunction()
cljfunc.setDielectric(rf_dielectric.val)
else:
print("Cannot interpret the cutoff method from \"%s\"" % cutoff_method.val, file=sys.stderr)
return setCLJFuncProperties(cljfunc)
def getSoftIntraCLJFunction():
if cutoff_method.val.find("shift electrostatics") != -1:
cljfunc = CLJSoftIntraShiftFunction()
elif cutoff_method.val.find("reaction field") != -1:
cljfunc = CLJSoftIntraRFFunction()
cljfunc.setDielectric(rf_dielectric.val)
else:
print("Cannot interpret the cutoff method from \"%s\"" % cutoff_method.val, file=sys.stderr)
cljfunc.setAlpha(0.0)
cljfunc.setShiftDelta(shift_delta.val)
cljfunc.setCoulombPower(coul_power.val)
return setCLJFuncProperties(cljfunc)
def setNewGridProperties(forcefield, extra_buffer=0*angstrom):
if disable_grid.val:
forcefield.disableGrid()
else:
forcefield.enableGrid()
forcefield.setGridSpacing(grid_spacing.val)
forcefield.setGridBuffer(grid_buffer.val + extra_buffer)
return forcefield
def getAtomNearCOG( molecule ):
"""Find the atom that is closest to the center of geometry of the passed molecule"""
mol_centre = molecule.evaluate().center()
mindist = 99999.0
for x in range(0, molecule.nAtoms()):
atom = molecule.atoms()[x]
at_coords = atom.property('coordinates')
dist = Vector().distance2(at_coords, mol_centre)
if dist < mindist:
mindist = dist
nearest_atom = atom
return nearest_atom
def createWSRCMoves(system):
# pull out all of the molecule groups for the mobile parts of the system
mobile_solvent = system[MGName("mobile_solvent")]
mobile_sidechains = system[MGName("mobile_sidechains")]
mobile_backbones = system[MGName("mobile_backbones")]
mobile_solutes = system[MGName("mobile_solutes")]
mobile_ligand = system[MGName("mobile_ligand")]
mobile_swap = system[MGName("mobile_swap_water")]
print("Creating the Monte Carlo moves to sample the WSRC system...")
# create the global set of moves that will be applied to
# the system
moves = WeightedMoves()
if fast_sim.val:
# we will only move the water
max_water_translation = 0.15 * angstroms
max_water_rotation = 15 * degrees
if mobile_swap.nViews() > 0:
rb_moves = RigidBodyMC(mobile_swap)
rb_moves.setMaximumTranslation(max_water_translation)
rb_moves.setMaximumRotation(max_water_rotation)
if use_reflect_volume.val:
rb_moves.setReflectionVolume( mobile_ligand[MolIdx(0)], reflect_volume_radius.val )
moves.add(rb_moves, 4 * mobile_swap.nViews())
if mobile_solvent.nViews() > 0:
rb_moves = RigidBodyMC(mobile_solvent)
rb_moves.setMaximumTranslation(max_water_translation)
rb_moves.setMaximumRotation(max_water_rotation)
if system.containsProperty("reflection sphere radius"):
reflection_radius = float(str(system.property("reflection sphere radius"))) * angstroms
reflection_center = system.property("reflection center").toVector()[0]
rb_moves.setReflectionSphere(reflection_center, reflection_radius)
moves.add(rb_moves, 4 * mobile_solvent.nViews())
moves.setTemperature(temperature.val)
seed = random_seed.val
if seed is None:
seed = RanGenerator().randInt(100000,1000000)
print("Using generated random number seed %d" % seed)
else:
print("Using supplied random number seed %d" % seed)
moves.setGenerator( RanGenerator(seed) )
return moves
# we are performing a normal simulation, moving everything
# create zmatrix moves to move the protein sidechains
if mobile_sidechains.nViews() > 0:
sc_moves = ZMatMove(mobile_sidechains)
moves.add( sc_moves, mobile_sidechains.nViews() )
if mobile_backbones.nViews() > 0 and move_backbone.val:
bb_moves = RigidBodyMC(mobile_backbones)
bb_moves.setCenterOfRotation( GetCOGPoint( AtomName("CA", CaseInsensitive),
AtomName("N", CaseInsensitive) ) )
bb_moves.setMaximumTranslation(0.030*angstrom)
bb_moves.setMaximumRotation(1.0*degrees)
moves.add( bb_moves, mobile_backbones.nViews() )
if not use_fixed_ligand.val:
if mobile_ligand.nViews() > 0:
scale_moves = 10
# get the amount to translate and rotate from the ligand's flexibility object
flex = mobile_ligand.moleculeAt(0)[0].molecule().property("flexibility")
if use_rot_trans_ligand.val:
if (flex.translation().value() != 0 or flex.rotation().value() != 0):
rb_moves = RigidBodyMC(mobile_ligand)
rb_moves.setMaximumTranslation(flex.translation())
rb_moves.setMaximumRotation(flex.rotation())
rb_moves.setCenterOfRotation(GetCOMPoint())
# the ligand is not allowed to move away from its original position,
# as we don't want to sample "unbound" states
if not ligand_reflection_radius.val is None:
rb_moves.setReflectionSphere(mobile_ligand.moleculeAt(0)[0].molecule().evaluate().centerOfMass(),
ligand_reflection_radius.val)
scale_moves = scale_moves / 2
moves.add( rb_moves, scale_moves * mobile_ligand.nViews() )
intra_moves = InternalMove(mobile_ligand)
intra_moves.setCenterOfMolecule(GetCOMPoint())
moves.add( intra_moves, scale_moves * mobile_ligand.nViews() )
if mobile_solutes.nViews() > 0:
rb_moves = RigidBodyMC(mobile_solutes)
if system.containsProperty("average solute translation delta"):
translation_delta = float(str(system.property("average solute translation delta")))
else:
translation_delta = 0
if system.containsProperty("average solute rotation delta"):
rotation_delta = float(str(system.property("average solute rotation delta")))
else:
rotation_delta = 0
if translation_delta > 0 and rotation_delta > 0:
rb_moves.setMaximumTranslation(translation_delta * angstroms)
rb_moves.setMaximumRotation(rotation_delta * degrees)
if system.containsProperty("reflection sphere radius"):
reflection_radius = float(str(system.property("reflection sphere radius"))) * angstroms
reflection_center = system.property("reflection center").toVector()[0]
rb_moves.setReflectionSphere(reflection_center, reflection_radius)
moves.add(rb_moves, 4 * mobile_solutes.nViews())
intra_moves = InternalMove(mobile_solutes)
moves.add(intra_moves, 4 * mobile_solutes.nViews())
max_water_translation = 0.15 * angstroms
max_water_rotation = 15 * degrees
if mobile_swap.nViews() > 0:
rb_moves = RigidBodyMC(mobile_swap)
rb_moves.setMaximumTranslation(max_water_translation)
rb_moves.setMaximumRotation(max_water_rotation)
if use_reflect_volume.val:
rb_moves.setReflectionVolume( mobile_ligand[MolIdx(0)], reflect_volume_radius.val )
moves.add(rb_moves, 4 * mobile_swap.nViews())
if mobile_solvent.nViews() > 0:
rb_moves = RigidBodyMC(mobile_solvent)
rb_moves.setMaximumTranslation(max_water_translation)
rb_moves.setMaximumRotation(max_water_rotation)
if system.containsProperty("reflection sphere radius"):
reflection_radius = float(str(system.property("reflection sphere radius"))) * angstroms
reflection_center = system.property("reflection center").toVector()[0]
rb_moves.setReflectionSphere(reflection_center, reflection_radius)
moves.add(rb_moves, 4 * mobile_solvent.nViews())
moves.setTemperature(temperature.val)
seed = random_seed.val
if seed is None:
seed = RanGenerator().randInt(100000,1000000)
print("Using generated random number seed %d" % seed)
else:
print("Using supplied random number seed %d" % seed)
moves.setGenerator( RanGenerator(seed) )
return moves
def renumberMolecules(molgroup):
newgroup = MoleculeGroup(molgroup.name().value())
for molnum in molgroup.molNums():
mol = molgroup[molnum][0]
newmol = mol.molecule().edit().renumber().commit()
newgroup.add( ViewsOfMol(newmol,mol.selections()) )
return newgroup
def pruneLambda(lamvals):
"""Half the passed number of lambda values"""
n = len(lamvals)
if n % 2 == 0:
mid = int(n/2)
return lamvals[0:mid:2] + lamvals[mid+1::2]
else:
return lamvals[0::2]
def getLambdaValues():
"""Return the lambda values to use for the simulation. Lambda scale from 0 to 1
and will include the discharging and charging steps. The values are set such
that, if no charging is used, then return lambda_values.val (the lambda values
set by the user). If discharging / charging is used, then discharging is from
lambda = 0-0.25, swapping from 0.25-0.75 and recharging from 0.75-1.0"""
if uncharge_ligand.val:
lamvals = []
charge_lams = copy.deepcopy( uncharge_lambda_values.val )
charge_lams.sort()
swap_lams = copy.deepcopy( lambda_values.val )
swap_lams.sort()
for lam in charge_lams:
if lam >= 0.0 and lam <= 1.0:
lamvals.append( 0.25 * lam )
for lam in swap_lams:
if lam >= 0.0 and lam <= 1.0:
lamvals.append( 0.25 + (0.5*lam) )
charge_lams.reverse()
for lam in charge_lams:
if lam >= 0.0 and lam <= 1.0:
lamvals.append( 0.75 + (0.25*(1.0-lam)) )
if fast_sim.val:
lamvals = pruneLambda(lamvals)
return lamvals
else:
swap_lams = copy.deepcopy( lambda_values.val )
swap_lams.sort()
lamvals = []
for lam in swap_lams:
if lam >= 0.0 and lam <= 1.0:
lamvals.append(lam)
if fast_sim.val:
lamvals = pruneLambda(lamvals)
return lamvals
def printEnergies(nrgs, FILE):
"""This function prints all of the energies in 'nrgs' to the file 'FILE'"""
keys = list(nrgs.keys())
keys.sort()
for key in keys:
FILE.write("%s == %s kcal mol-1\n" % (key, nrgs[key]))
def mergeSystems(protein_system, water_system, ligand_mol):
print("Merging the protein box and water box to create the WSRC system...")
system = System("WSRC system")
if protein_system.containsProperty("reflection center"):
prot_reflection_center = protein_system.property("reflection center").toVector()[0]
prot_reflection_radius = float(str(protein_system.property("reflection sphere radius")))
wat_reflection_center = water_system.property("reflection center").toVector()[0]
wat_reflection_radius = float(str(water_system.property("reflection sphere radius")))
if prot_reflection_center != wat_reflection_center or \
prot_reflection_radius != wat_reflection_radius:
print("Disagreement of the reflection sphere in the protein and water boxes!")
print("Protein: %s and %s Water: %s and %s" % \
(prot_reflection_center,prot_reflection_radius,
wat_reflection_center,wat_reflection_radius))
sys.exit(-1)
system.setProperty("reflection center", AtomCoords(CoordGroup(1,prot_reflection_center)))
system.setProperty("reflection sphere radius", VariantProperty(prot_reflection_radius))
if protein_system.containsProperty("average solute translation delta"):
system.setProperty("average solute translation delta", \
protein_system.property("average solute translation delta"))
if protein_system.containsProperty("average solute rotation delta"):
system.setProperty("average solute rotation delta", \
protein_system.property("average solute rotation delta"))
# create a molecule group for the ligand
ligand_group = MoleculeGroup("ligand")
ligand_group.add(ligand_mol)
bound_leg = MoleculeGroup("bound_leg")
free_leg = MoleculeGroup("free_leg")
bound_leg.add(ligand_mol)
free_leg.add(ligand_mol)
# pull out the groups that we want from the two systems
# create a group to hold all of the mobile water molecules in the free leg
mobile_free_water_group = MoleculeGroup("mobile_free")
water_mol = None
if MGName("mobile_solvents") in water_system.mgNames():
mols = water_system[MGName("mobile_solvents")].molecules()
for molnum in mols.molNums():
water_mol = mols[molnum][0].molecule().edit().renumber().commit()
for j in range(0,water_mol.nResidues()):
water_mol = water_mol.residue( ResIdx(j) ).edit() \
.setProperty( PDB.parameters().pdbResidueName(), "FWT" ) \
.commit().molecule()
mobile_free_water_group.add(water_mol)
# create a group to hold all of the fixed water molecules in the free leg
fixed_free_water_group = MoleculeGroup("fixed_free")
if MGName("fixed_molecules") in water_system.mgNames():
mols = water_system[MGName("fixed_molecules")].molecules()
for molnum in mols.molNums():
fixed_free_water_group.add( mols[molnum][0].molecule().edit().renumber().commit() )
# create a group to hold all of the fixed molecules in the bound leg
fixed_bound_group = MoleculeGroup("fixed_bound")
if MGName("fixed_molecules") in protein_system.mgNames():
fixed_bound_group.add( protein_system[ MGName("fixed_molecules") ] )
if save_pdb.val:
# write a PDB of the fixed atoms in the bound and free legs
if not os.path.exists(outdir.val):
os.makedirs(outdir.val)
PDB().write(fixed_bound_group, "%s/bound_fixed.pdb" % outdir.val)
PDB().write(fixed_free_water_group, "%s/free_fixed.pdb" % outdir.val)
# create a group to hold all of the mobile solute molecules in the bound leg
mobile_bound_solutes_group = MoleculeGroup("mobile_bound_solutes")
if MGName("mobile_solutes") in protein_system.mgNames():
mobile_bound_solutes_group.add( protein_system[MGName("mobile_solutes")] )
mobile_bound_solutes_group.remove(ligand_mol)
if mobile_bound_solutes_group.nMolecules() > 0:
bound_leg.add(mobile_bound_solutes_group)
# create a group to hold all of the mobile solvent molecules in the bound leg
mobile_bound_solvents_group = MoleculeGroup("mobile_bound_solvents")
mobile_bound_water_group = MoleculeGroup("mobile_bound_water")
if MGName("mobile_solvents") in protein_system.mgNames():
mols = protein_system[MGName("mobile_solvents")]
for molnum in mols.molNums():
solvent_mol = mols[molnum][0].molecule()
try:
# this is a water molecule if we can swap the coordinates with the
# water molecule from teh water box
water_mol.edit().setProperty("coordinates", \
solvent_mol.property("coordinates"))
for j in range(0,solvent_mol.nResidues()):
solvent_mol = solvent_mol.residue( ResIdx(j) ).edit() \
.setProperty( PDB.parameters().pdbResidueName(), "BWT" ) \
.commit().molecule()
mobile_bound_solvents_group.add(solvent_mol)
mobile_bound_water_group.add(solvent_mol)
except:
# the test molecule is not compatible, so it is not
# compatible with the water in the water box
mobile_bound_solvents_group.add(solvent_mol)
print("The number of bound leg mobile solvent molecules is %d." % mobile_bound_solvents_group.nMolecules())
print("The number of these which are compatible water molecules is %d." % mobile_bound_water_group.nMolecules())
# create the groups to hold all of the protein molecules. We will use "extract" to
# pull out only those protein atoms that are in the mobile region
bound_protein_intra_group = MoleculeGroup("bound_protein_intra_group")
mobile_bound_proteins_group = MoleculeGroup("mobile_bound_proteins")
mobile_bound_protein_sidechains_group = MoleculeGroup("mobile_bound_protein_sidechains")
mobile_bound_protein_backbones_group = MoleculeGroup("mobile_bound_protein_backbones")
if MGName("protein_sidechains") in protein_system.mgNames() or \
MGName("protein_backbones") in protein_system.mgNames():
all_proteins = Molecules()
try:
protein_sidechains = protein_system[MGName("protein_sidechains")]
all_proteins.add(protein_sidechains.molecules())
except:
protein_sidechains = MoleculeGroup()
try:
protein_backbones = protein_system[MGName("protein_backbones")]
all_proteins.add(protein_backbones.molecules())
except:
protein_backbones = MoleculeGroup()
try:
boundary_molecules = protein_system[MGName("boundary_molecules")]
all_proteins.add(boundary_molecules.molecules())
except:
boundary_molecules = MoleculeGroup()
for molnum in all_proteins.molNums():
protein_mol = Molecule.join(all_proteins[molnum])
if protein_mol.selectedAll():
bound_protein_intra_group.add(protein_mol)
bound_leg.add(protein_mol)
mobile_protein = []
if protein_sidechains.contains(molnum):
sidechains = protein_sidechains[molnum]
for sidechain in sidechains:
mobile_bound_protein_sidechains_group.add( sidechain )
mobile_protein += sidechains
if protein_backbones.contains(molnum):
backbones = protein_backbones[molnum]
for backbone in backbones:
mobile_bound_protein_backbones_group.add( backbone )
mobile_protein += backbones
if len(mobile_protein) > 0:
mobile_bound_proteins_group.add( Molecule.join(mobile_protein) )
else:
# only some of the atoms have been selected. We will extract
# the mobile atoms and will then update all of the other selections
print("Extracting the mobile atoms of protein %s" % protein_mol.molecule())
new_protein_mol = protein_mol.extract()
print("Extracted %d mobile atoms from %d total atoms..." % \
(new_protein_mol.nAtoms(), protein_mol.molecule().nAtoms()))
bound_protein_intra_group.add(new_protein_mol)
bound_leg.add( new_protein_mol )
mobile_protein_view = new_protein_mol.selection()
mobile_protein_view = mobile_protein_view.selectNone()
if protein_sidechains.contains(molnum):
sidechains = protein_sidechains[molnum]
for sidechain in sidechains:
view = new_protein_mol.selection()
view = view.selectNone()
for atomid in sidechain.selection().selectedAtoms():
atom = protein_mol.atom(atomid)
resatomid = ResAtomID( atom.residue().number(), atom.name() )
view = view.select( resatomid )
mobile_protein_view = mobile_protein_view.select( resatomid )
if view.nSelected() > 0:
mobile_bound_protein_sidechains_group.add( PartialMolecule(new_protein_mol, view) )
if protein_backbones.contains(molnum):
backbones = protein_backbones[molnum]
for backbone in backbones:
view = new_protein_mol.selection()
view = view.selectNone()
for atomid in backbone.selection().selectedAtoms():
atom = protein_mol.atom(atomid)
resatomid = ResAtomID( atom.residue().number(), atom.name() )
view = view.select( resatomid )
mobile_protein_view = mobile_protein_view.select( resatomid )
if view.nSelected() > 0:
mobile_bound_protein_backbones_group.add( PartialMolecule(new_protein_mol, view) )
print("Number of moved protein sidechain residues = %s" % mobile_bound_protein_sidechains_group.nViews())
print("Number of moved protein backbone residues = %s" % mobile_bound_protein_backbones_group.nViews())
if mobile_protein_view.nSelected() > 0:
mobile_bound_proteins_group.add( PartialMolecule(new_protein_mol, mobile_protein_view) )
# finished adding in all of the protein groups
use_identity_constraint = not use_reflect_volume.val
if use_identity_constraint:
# get the identity points for the ligand
print("\nObtaining the identity points...")
if identity_atoms.val is None:
print("Auto-identifying the identity atoms...")
identity_points = getIdentityPoints(ligand_mol)
else:
identity_points = []
for identity_atom in identity_atoms.val:
identity_points.append( ligand_mol.atom( AtomName(identity_atom) ) )
print("Using identity points:")
print(identity_points)
if use_fixed_points.val:
print("\nUsing fixed identity points...")
fixed_points = []
for point in identity_points:
fixed_points.append( point.property("coordinates") )
identity_points = fixed_points
print(identity_points)
print("\nIdentifying the swap-water cluster...")
swap_water_group = MoleculeGroup("swap water")
mobile_free_water_group = IdentityConstraint.constrain( mobile_free_water_group, identity_points )
# Rename the residues of the swap solvent so that they are easy
# to find in the output PDBs. Also remove them from the group as they
# are moved to the swap water group
for i in range(0,len(identity_points)):
# always grab the first molecule as the swap water molcules are being removed
swap_water_mol = mobile_free_water_group.moleculeAt(0)
mobile_free_water_group.remove(swap_water_mol.number())
for j in range(0,swap_water_mol.nResidues()):
swap_water_mol = swap_water_mol.residue( ResIdx(j) ).edit() \
.setProperty( PDB.parameters().pdbResidueName(), "SWP" ) \
.commit().molecule()
swap_water_group.add(swap_water_mol)
print("found %d molecules that are now part of the swap water cluster" % swap_water_group.nMolecules())
tmp = MoleculeGroup("tmp")
tmp.add(ligand_mol)
tmp.add(swap_water_group)
PDB().write(tmp, "swapcluster00.pdb")
# now equilibrate the swap water cluster, if requested
if n_equil_swap.val:
move = RigidBodyMC(swap_water_group)
move.setMaximumTranslation(0.15*angstrom)
move.setMaximumRotation(15*degrees)
# use the same random number seed so that the swap water cluster is reproducible
move.setGenerator( RanGenerator(4039251) )
equil_system = System()
ff = InterFF("interff")
ff.setCLJFunction( getInterCLJFunction() )
ff = setNewGridProperties(ff)
ff.add(swap_water_group)
fixed_mols = bound_leg.molecules()
fixed_mols.remove(ligand_mol.number())
ff.addFixedAtoms(fixed_mols)
equil_system.add(swap_water_group)
equil_system.add(ff)
n = n_equil_swap.val
print("Equilibrating the swap water cluster (moves = %d)" % n)
if n > 10:
for i in range(1,11):
move.move(equil_system, int(n / 10), False)
else:
move.move(equil_system, n, False)
swap_water_group = equil_system[ swap_water_group.name() ]
tmp.update(swap_water_group)
PDB().write(tmp, "swapcluster01.pdb")
print("Complete. Equilibrated water molecules in file 'swapcluster01.pdb'")
if use_water_points.val:
# use identity points that are fixed in space based on the current position
# of the center of each molecule of the swap water cluster. This prevents the swap cluster
# from changing shape too much during the calculation
print("\nUsing identity points based on fixed positions in space from the swap water cluster...")
fixed_points = []
for i in range(0,swap_water_group.nMolecules()):
sw = swap_water_group.molecule(MolIdx(i))[0].molecule()
fixed_points.append( VectorPoint(sw.evaluate().centerOfMass()) )
print("Using fixed identity points %s" % fixed_points)
identity_points = fixed_points
else:
# we will be using the reflection volume to get the swap water cluster
swap_water_group = MoleculeGroup("swap water")
move = RigidBodyMC(swap_water_group)
move.setMaximumTranslation(0.15*angstrom)
move.setMaximumRotation(15*degrees)
move.setReflectionVolume(ligand_mol, reflect_volume_radius.val)
# find the swap water cluster by looking at overlapping waters
swap_waters = move.extract(mobile_free_water_group.molecules(), reflect_volume_buffer.val)
print("Swap water cluster based on the %d water molecules overlapping with the ligand." % swap_waters.nMolecules())
for molnum in swap_waters.molNums():
swap_water = swap_waters[molnum][0].molecule()
for j in range(0,swap_water.nResidues()):
swap_water = swap_water.residue( ResIdx(j) ).edit() \
.setProperty( PDB.parameters().pdbResidueName(), "SWP" ) \
.commit().molecule()
swap_water_group.add( swap_water )
mobile_free_water_group.remove(swap_water.number())
# now equilibrate the swap cluster, if requested
if n_equil_reflect.val:
equil_system = System()
ff = InterCLJFF("ff")
ff.add(swap_water_group)
liggroup = MoleculeGroup("liggroup")
liggroup.add(ligand_mol)
equil_system.add(ff)
equil_system.add(swap_water_group)
equil_system.add(liggroup)
PDB().write(equil_system.molecules(), "swapcluster00.pdb")
n = n_equil_reflect.val
if n > 10:
for i in range(1,11):
move.move(equil_system, int(n / 10), False)
PDB().write(equil_system.molecules(), "swapcluster%02d.pdb" % i)
else:
move.move(equil_system, n, False)
PDB().write(equil_system.molecules(), "swapcluster01.pdb")
swap_water_group = equil_system[ swap_water_group.name() ]
bound_leg.add(swap_water_group)
bound_leg.add(mobile_bound_solvents_group)
free_leg.add(swap_water_group)
free_leg.add(mobile_free_water_group)
system.add(bound_leg)
system.add(free_leg)
# now add in the forcefields for the system...
print("Creating the forcefields for the WSRC system...")
# first, group together the molecules grouped above into convenient
# groups for the forcefields
# group holding just the ligand
ligand_mols = ligand_group.molecules()
# group holding just the swap water cluster
swap_water_mols = swap_water_group.molecules()
# group holding all of the mobile atoms in the bound leg
mobile_bound_mols = mobile_bound_solvents_group.molecules()
mobile_bound_mols.add( mobile_bound_solutes_group.molecules() )
mobile_bound_mols.add( bound_protein_intra_group.molecules() )
# group holding all of the mobile atoms in the bound leg, excluding the
# buffer atoms that are fixed, but bonded to mobile atoms
mobile_buffered_bound_mols = mobile_bound_solvents_group.molecules()
mobile_buffered_bound_mols.add( mobile_bound_solutes_group.molecules() )
mobile_buffered_bound_mols.add( mobile_bound_proteins_group.molecules() )
# group holding all of the mobile water molecules in the free leg
mobile_free_mols = mobile_free_water_group.molecules()
# group holding all of the fixed water molecules in the free leg
fixed_free_group = fixed_free_water_group
# group holding all of the protein molecules that need intramolecular terms calculated
bound_protein_intra_mols = bound_protein_intra_group.molecules()
# group holding all of the solute molecules that nede intramolecular terms calculated
bound_solute_intra_mols = mobile_bound_solutes_group.molecules()
###
### INTRA-ENERGY OF THE LIGAND AND CLUSTER
###
# intramolecular energy of the ligand
if use_oldff.val:
ligand_intraclj = IntraCLJFF("ligand:intraclj")
ligand_intraclj = setCLJProperties(ligand_intraclj)
ligand_intraclj.add(ligand_mols)
ligand_intraff = InternalFF("ligand:intra")
ligand_intraff.setUse14Calculation(False)
ligand_intraff.add(ligand_mols)
else:
print("Using the NEW PARALLEL FORCEFIELDS :-)")
# intramolecular energy of the ligands
ligand_intraclj = IntraFF("ligand:intraclj")
ligand_intraclj.setCLJFunction( getIntraCLJFunction() )
ligand_intraclj.add(ligand_mols)
ligand_intraff = InternalFF("ligand:intra")
ligand_intraff.setUse14Calculation(True)
ligand_intraff.add(ligand_mols)
# intramolecular energy of the swap water cluster
if use_oldff.val:
swap_interclj = InterSoftCLJFF("swap:interclj")
swap_interclj = setCLJProperties(swap_interclj)
swap_interclj = setSoftCoreProperties(swap_interclj)
swap_interclj.add(swap_water_mols)
else:
swap_interclj = InterFF("swap:interclj")
swap_interclj.setCLJFunction( getSoftInterCLJFunction() )
swap_interclj.setCLJFunction( "f", getSoftInterCLJFunction() )
swap_interclj.setCLJFunction( "b", getSoftInterCLJFunction() )
swap_interclj.setCLJFunction( "next", getSoftInterCLJFunction() )
swap_interclj.setCLJFunction( "prev", getSoftInterCLJFunction() )
swap_interclj.add(swap_water_mols)
###
### FORCEFIELDS INVOLVING THE LIGAND/CLUSTER BOUND LEG
###
# forcefield holding the energy between the ligand and the mobile atoms in the
# bound leg
if use_oldff.val:
bound_ligand_mobile = InterGroupSoftCLJFF("bound:ligand-mobile")
bound_ligand_mobile = setCLJProperties(bound_ligand_mobile)
bound_ligand_mobile = setSoftCoreProperties(bound_ligand_mobile)
bound_ligand_mobile.add(ligand_mols, MGIdx(0))
bound_ligand_mobile.add(mobile_bound_mols, MGIdx(1))
# Whether or not to disable the grid and calculate all energies atomisticly
if disable_grid:
# we need to renumber all of the fixed molecules so that they don't clash
# with the mobile molecules
print("Renumbering fixed molecules...")
fixed_bound_group = renumberMolecules(fixed_bound_group)
fixed_free_group = renumberMolecules(fixed_free_group)
# forcefield holding the energy between the ligand and the fixed atoms in the bound leg
if disable_grid:
bound_ligand_fixed = InterGroupCLJFF("bound:ligand-fixed")
bound_ligand_fixed = setCLJProperties(bound_ligand_fixed)
bound_ligand_fixed = setFakeGridProperties(bound_ligand_fixed)
bound_ligand_fixed.add(ligand_mols, MGIdx(0))
bound_ligand_fixed.add(fixed_bound_group, MGIdx(1))
else:
bound_ligand_fixed = GridFF2("bound:ligand-fixed")
bound_ligand_fixed = setCLJProperties(bound_ligand_fixed)
bound_ligand_fixed = setGridProperties(bound_ligand_fixed)
bound_ligand_fixed.add(ligand_mols, MGIdx(0))
bound_ligand_fixed.addFixedAtoms( fixed_bound_group )
else:
bound_ligand_mobile = InterGroupFF("bound:ligand-mobile")
bound_ligand_mobile.setCLJFunction( getSoftInterCLJFunction() )
bound_ligand_mobile.setCLJFunction( "f", getSoftInterCLJFunction() )
bound_ligand_mobile.setCLJFunction( "b", getSoftInterCLJFunction() )
bound_ligand_mobile.setCLJFunction( "next", getSoftInterCLJFunction() )
bound_ligand_mobile.setCLJFunction( "prev", getSoftInterCLJFunction() )
bound_ligand_mobile.add(ligand_mols, MGIdx(0))
bound_ligand_mobile.add(mobile_bound_mols, MGIdx(1))
bound_ligand_fixed = InterGroupFF("bound:ligand0-fixed")
bound_ligand_fixed.setCLJFunction( getInterCLJFunction() )
bound_ligand_fixed = setNewGridProperties(bound_ligand_fixed)
bound_ligand_fixed.add(ligand_mols, MGIdx(0))
bound_ligand_fixed.addFixedAtoms(fixed_bound_group.molecules())
# forcefield holding the energy between the swap water cluster and the mobile
# atoms in the bound leg
if use_oldff.val:
bound_swap_mobile = InterGroupSoftCLJFF("bound:swap-mobile")
bound_swap_mobile = setCLJProperties(bound_swap_mobile)
bound_swap_mobile = setSoftCoreProperties(bound_swap_mobile)
bound_swap_mobile.add(swap_water_mols, MGIdx(0))
bound_swap_mobile.add(mobile_bound_mols, MGIdx(1))
# forcefield holding the energy between the swap water cluster and the
# fixed atoms in the bound leg
if disable_grid:
bound_swap_fixed = InterGroupCLJFF("bound:swap-fixed")
bound_swap_fixed = setCLJProperties(bound_swap_fixed)
bound_swap_fixed = setFakeGridProperties(bound_swap_fixed)
bound_swap_fixed.add(swap_water_mols, MGIdx(0))
bound_swap_fixed.add( fixed_bound_group, MGIdx(1) )
else:
bound_swap_fixed = GridFF2("bound:swap-fixed")
bound_swap_fixed = setCLJProperties(bound_swap_fixed)
# The swap water cluster is more mobile, so needs a bigger grid buffer
bound_swap_fixed = setGridProperties(bound_swap_fixed, 6*angstrom)
bound_swap_fixed.add(swap_water_mols, MGIdx(0))
bound_swap_fixed.addFixedAtoms(fixed_bound_group)
else:
bound_swap_mobile = InterGroupFF("bound:swap-mobile")
bound_swap_mobile.setCLJFunction( getSoftInterCLJFunction() )
bound_swap_mobile.setCLJFunction( "f", getSoftInterCLJFunction() )
bound_swap_mobile.setCLJFunction( "b", getSoftInterCLJFunction() )
bound_swap_mobile.setCLJFunction( "next", getSoftInterCLJFunction() )
bound_swap_mobile.setCLJFunction( "prev", getSoftInterCLJFunction() )
bound_swap_mobile.add(swap_water_mols, MGIdx(0))
bound_swap_mobile.add(mobile_bound_mols, MGIdx(1))
bound_swap_fixed = InterGroupFF("bound:swap-fixed")
bound_swap_fixed.setCLJFunction( getInterCLJFunction() )
bound_swap_fixed = setNewGridProperties(bound_swap_fixed)
bound_swap_fixed.add(swap_water_mols, MGIdx(0))
bound_swap_fixed.addFixedAtoms(fixed_bound_group.molecules())
###
### FORCEFIELDS INVOLVING THE LIGAND/CLUSTER FREE LEG
###
# forcefield holding the energy between the ligand and the mobile atoms
# in the free leg
if use_oldff.val:
free_ligand_mobile = InterGroupSoftCLJFF("free:ligand-mobile")
free_ligand_mobile = setCLJProperties(free_ligand_mobile)
free_ligand_mobile = setSoftCoreProperties(free_ligand_mobile)
free_ligand_mobile.add(ligand_mols, MGIdx(0))
free_ligand_mobile.add(mobile_free_mols, MGIdx(1))
# forcefield holding the energy between the ligand and the fixed atoms
# in the free leg
if disable_grid.val:
free_ligand_fixed = InterGroupCLJFF("free:ligand_fixed")
free_ligand_fixed = setCLJProperties(free_ligand_fixed)
free_ligand_fixed = setFakeGridProperties(free_ligand_fixed)
free_ligand_fixed.add(ligand_mols, MGIdx(0))
free_ligand_fixed.add(fixed_free_group, MGIdx(1))
else:
free_ligand_fixed = GridFF2("free:ligand-fixed")
free_ligand_fixed = setCLJProperties(free_ligand_fixed)
free_ligand_fixed = setGridProperties(free_ligand_fixed)
free_ligand_fixed.add(ligand_mols, MGIdx(0))
free_ligand_fixed.addFixedAtoms(fixed_free_group)
else:
free_ligand_mobile = InterGroupFF("free:ligand-mobile")
free_ligand_mobile.setCLJFunction( getSoftInterCLJFunction() )
free_ligand_mobile.setCLJFunction( "f", getSoftInterCLJFunction() )
free_ligand_mobile.setCLJFunction( "b", getSoftInterCLJFunction() )
free_ligand_mobile.setCLJFunction( "next", getSoftInterCLJFunction() )
free_ligand_mobile.setCLJFunction( "prev", getSoftInterCLJFunction() )
free_ligand_mobile.add(ligand_mols, MGIdx(0))
free_ligand_mobile.add(mobile_free_mols, MGIdx(1))
free_ligand_fixed = InterGroupFF("free:ligand-fixed")
free_ligand_fixed.setCLJFunction( getInterCLJFunction() )
free_ligand_fixed = setNewGridProperties(free_ligand_fixed)
free_ligand_fixed.add(ligand_mols, MGIdx(0))
free_ligand_fixed.addFixedAtoms(fixed_free_group.molecules())
# forcefield holding the energy between the swap water cluster and the
# mobile atoms of the free leg
if use_oldff.val:
free_swap_mobile = InterGroupSoftCLJFF("free:swap-mobile")
free_swap_mobile = setCLJProperties(free_swap_mobile)
free_swap_mobile = setSoftCoreProperties(free_swap_mobile)
free_swap_mobile.add(swap_water_mols, MGIdx(0))
free_swap_mobile.add(mobile_free_mols, MGIdx(1))
# forcefield holding the energy between the swap water cluster and the
# fixed atoms in the free leg
if disable_grid.val:
free_swap_fixed = InterGroupCLJFF("free:swap-fixed")
free_swap_fixed = setCLJProperties(free_swap_fixed)
free_swap_fixed = setFakeGridProperties(free_swap_fixed)
free_swap_fixed.add(swap_water_mols, MGIdx(0))
free_swap_fixed.add(fixed_free_group, MGIdx(1))
else:
free_swap_fixed = GridFF2("free:swap-fixed")
free_swap_fixed = setCLJProperties(free_swap_fixed)
# The swap water cluster is more mobile so needs a bigger grid buffer
free_swap_fixed = setGridProperties(free_swap_fixed, 6*angstrom)
free_swap_fixed.add(swap_water_mols, MGIdx(0))
free_swap_fixed.addFixedAtoms(fixed_free_group)
else:
free_swap_mobile = InterGroupFF("free:swap-mobile")
free_swap_mobile.setCLJFunction( getSoftInterCLJFunction() )
free_swap_mobile.setCLJFunction( "f", getSoftInterCLJFunction() )
free_swap_mobile.setCLJFunction( "b", getSoftInterCLJFunction() )
free_swap_mobile.setCLJFunction( "next", getSoftInterCLJFunction() )
free_swap_mobile.setCLJFunction( "prev", getSoftInterCLJFunction() )
free_swap_mobile.add(swap_water_mols, MGIdx(0))
free_swap_mobile.add(mobile_free_mols, MGIdx(1))
free_swap_fixed = InterGroupFF("free:swap-fixed")
free_swap_fixed.setCLJFunction( getInterCLJFunction() )
free_swap_fixed = setNewGridProperties(free_swap_fixed)
free_swap_fixed.add(swap_water_mols, MGIdx(0))
free_swap_fixed.addFixedAtoms(fixed_free_group.molecules())
###
### FORCEFIELDS LOCAL ONLY TO THE BOUND LEG
###
bound_forcefields = []
if use_oldff.val:
# forcefield holding the energy between the bound leg mobile atoms and
# the bound leg fixed atoms
if disable_grid.val:
bound_mobile_fixed = InterGroupCLJFF("bound:mobile-fixed")
bound_mobile_fixed = setCLJProperties(bound_mobile_fixed)
bound_mobile_fixed = setFakeGridProperties(bound_mobile_fixed)
bound_mobile_fixed.add(mobile_buffered_bound_mols, MGIdx(0))
bound_mobile_fixed.add(fixed_bound_group, MGIdx(1))
bound_forcefields.append(bound_mobile_fixed)
else:
bound_mobile_fixed = GridFF2("bound:mobile-fixed")
bound_mobile_fixed = setCLJProperties(bound_mobile_fixed)
bound_mobile_fixed = setGridProperties(bound_mobile_fixed)
# we use mobile_buffered_bound_group as this group misses out atoms that are bonded
# to fixed atoms (thus preventing large energies caused by incorrect non-bonded calculations)
bound_mobile_fixed.add(mobile_buffered_bound_mols, MGIdx(0))
bound_mobile_fixed.addFixedAtoms(fixed_bound_group)
bound_forcefields.append(bound_mobile_fixed)
# forcefield holding the intermolecular energy between all bound molecules
bound_mobile_mobile = InterCLJFF("bound:mobile-mobile")
bound_mobile_mobile = setCLJProperties(bound_mobile_mobile)
bound_mobile_mobile.add(mobile_bound_mols)
bound_forcefields.append(bound_mobile_mobile)
else:
# forcefield holding the energy between
# the bound molecules and bound fixed atoms
bound_mobile_fixed = InterGroupFF("bound:mobile-fixed")
bound_mobile_fixed.setCLJFunction( getInterCLJFunction() )
bound_mobile_fixed = setNewGridProperties(bound_mobile_fixed)
# we use mobile_buffered_bound_group as this group misses out atoms that are bonded
# to fixed atoms (thus preventing large energies caused by incorrect non-bonded calculations)
bound_mobile_fixed.add(mobile_buffered_bound_mols, MGIdx(0))
bound_mobile_fixed.addFixedAtoms(fixed_bound_group.molecules())
bound_forcefields.append(bound_mobile_fixed)
# forcefield holding the energy between all bound mobile molecules
bound_mobile_mobile = InterFF("bound:mobile-mobile")
bound_mobile_mobile.setCLJFunction( getInterCLJFunction() )
bound_mobile_mobile.add(mobile_bound_mols)
bound_forcefields.append(bound_mobile_mobile)
# intramolecular energy of the protein
if bound_protein_intra_mols.nMolecules() > 0:
if use_oldff.val:
protein_intraclj = IntraCLJFF("bound:protein_intraclj")
protein_intraclj = setCLJProperties(protein_intraclj)
protein_intraff = InternalFF("bound:protein_intra")
protein_intraff.setUse14Calculation(False)
for molnum in bound_protein_intra_mols.molNums():
protein_mol = Molecule.join(bound_protein_intra_mols[molnum])
protein_intraclj.add(protein_mol)
protein_intraff.add(protein_mol)
bound_forcefields.append(protein_intraclj)
bound_forcefields.append(protein_intraff)
else:
protein_intraclj = IntraFF("bound:protein_intraclj")
protein_intraclj.setCLJFunction( getIntraCLJFunction() )
protein_intraff = InternalFF("bound:protein_intra")
protein_intraff.setUse14Calculation(True)
for molnum in bound_protein_intra_mols.molNums():
protein_mol = Molecule.join(bound_protein_intra_mols[molnum])
protein_intraclj.add(protein_mol)
protein_intraff.add(protein_mol)
bound_forcefields.append(protein_intraclj)
bound_forcefields.append(protein_intraff)
# intramolecular energy of any other solutes
if bound_solute_intra_mols.nMolecules() > 0:
if use_oldff.val:
solute_intraclj = IntraCLJFF("bound:solute_intraclj")
solute_intraclj = setCLJProperties(solute_intraclj)
solute_intraff = InternalFF("bound:solute_intra")
solute_intraff.setUse14Calculation(False)
for molnum in bound_solute_intra_mols.molNums():
solute_mol = Molecule.join(bound_solute_intra_mols[molnum])
solute_intraclj.add(solute_mol)
solute_intraff.add(solute_mol)
bound_forcefields.append(solute_intraclj)
bound_forcefields.append(solute_intraff)
else:
solute_intraclj = IntraFF("bound:solute_intraclj")
solute_intraclj.setCLJFunction( getIntraCLJFunction() )
solute_intraff = InternalFF("bound:solute_intra")
solute_intraff.setUse14Calculation(True)
for molnum in bound_solute_intra_mols.molNums():
solute_mol = Molecule.join(bound_solute_intra_mols[molnum])
solute_intraclj.add(solute_mol)
solute_intraff.add(solute_mol)
bound_forcefields.append(solute_intraclj)
bound_forcefields.append(solute_intraff)
###
### FORCEFIELDS LOCAL ONLY TO THE FREE LEG
###
free_forcefields = []
if use_oldff.val:
# forcefield holding the energy between the mobile free molecules and the
# fixed free molecules
if disable_grid:
free_mobile_fixed = InterGroupCLJFF("free:mobile-fixed")
free_mobile_fixed = setCLJProperties(free_mobile_fixed)
free_mobile_fixed = setFakeGridProperties(free_mobile_fixed)
free_mobile_fixed.add(mobile_free_mols, MGIdx(0))
free_mobile_fixed.add(fixed_free_group, MGIdx(1))
free_forcefields.append(free_mobile_fixed)
else:
free_mobile_fixed = GridFF2("free:mobile-fixed")
free_mobile_fixed = setCLJProperties(free_mobile_fixed)
free_mobile_fixed = setGridProperties(free_mobile_fixed)
free_mobile_fixed.add(mobile_free_mols, MGIdx(0))
free_mobile_fixed.addFixedAtoms(fixed_free_group)
free_forcefields.append(free_mobile_fixed)
# forcefield holding the intermolecular energy between the mobile free molecules
free_mobile_mobile = InterCLJFF("free:mobile-mobile")
free_mobile_mobile = setCLJProperties(free_mobile_mobile)
free_mobile_mobile.add(mobile_free_mols)
free_forcefields.append(free_mobile_mobile)
else:
# forcefield holding the energy between the mobile free molecules, and their
# interaction with the fixed free molecules
free_mobile = InterFF("free:mobile")
free_mobile.setCLJFunction( getInterCLJFunction() )
free_mobile = setNewGridProperties( free_mobile )
free_mobile.add(mobile_free_mols)
free_mobile.addFixedAtoms(fixed_free_group.molecules())
free_forcefields.append(free_mobile)
###
### NOW ADD THE FORCEFIELDS TO THE SYSTEM
###
###
### SETTING THE FORCEFIELD EXPRESSIONS
###
ligand_int_nrg_sym = Symbol("E_{ligand:internal}")
ligand_int_nrg_f_sym = Symbol("E_{ligand:internal_{f}}")
ligand_int_nrg_b_sym = Symbol("E_{ligand:internal_{b}}")
ligand_int_nrg_next_sym = Symbol("E_{ligand:internal_{next}}")
ligand_int_nrg_prev_sym = Symbol("E_{ligand:internal_{prev}}")
ligand_bound_coul_nrg_sym = Symbol("E_{ligand:bound_coul}")
ligand_bound_lj_nrg_sym = Symbol("E_{ligand:bound_lj}")
ligand_bound_coul_nrg_f_sym = Symbol("E_{ligand:bound_coul{f}}")
ligand_bound_lj_nrg_f_sym = Symbol("E_{ligand:bound_lj{f}}")
ligand_bound_coul_nrg_b_sym = Symbol("E_{ligand:bound_coul{b}}")
ligand_bound_lj_nrg_b_sym = Symbol("E_{ligand:bound_lj{b}}")
ligand_bound_coul_nrg_next_sym = Symbol("E_{ligand:bound_coul{next}}")
ligand_bound_lj_nrg_next_sym = Symbol("E_{ligand:bound_lj{next}}")
ligand_bound_coul_nrg_prev_sym = Symbol("E_{ligand:bound_coul{prev}}")
ligand_bound_lj_nrg_prev_sym = Symbol("E_{ligand:bound_lj{prev}}")
ligand_free_coul_nrg_sym = Symbol("E_{ligand:free_coul}")
ligand_free_lj_nrg_sym = Symbol("E_{ligand:free_lj}")
ligand_free_coul_nrg_f_sym = Symbol("E_{ligand:free_coul{f}}")
ligand_free_lj_nrg_f_sym = Symbol("E_{ligand:free_lj{f}}")
ligand_free_coul_nrg_b_sym = Symbol("E_{ligand:free_coul{b}}")
ligand_free_lj_nrg_b_sym = Symbol("E_{ligand:free_lj{b}}")
ligand_free_coul_nrg_next_sym = Symbol("E_{ligand:free_coul{next}}")
ligand_free_lj_nrg_next_sym = Symbol("E_{ligand:free_lj{next}}")
ligand_free_coul_nrg_prev_sym = Symbol("E_{ligand:free_coul{prev}}")
ligand_free_lj_nrg_prev_sym = Symbol("E_{ligand:free_lj{prev}}")
ligand_int_nrg = ligand_intraclj.components().total() + \
ligand_intraff.components().total()
ligand_int_nrg_f = ligand_intraclj.components().total() + \
ligand_intraff.components().total()
ligand_int_nrg_b = ligand_intraclj.components().total() + \
ligand_intraff.components().total()
ligand_int_nrg_next = ligand_intraclj.components().total() + \
ligand_intraff.components().total()
ligand_int_nrg_prev = ligand_intraclj.components().total() + \
ligand_intraff.components().total()
bound_ligand_fixed_coul_nrg = bound_ligand_fixed.components().coulomb()
bound_ligand_fixed_lj_nrg = bound_ligand_fixed.components().lj()
free_ligand_fixed_coul_nrg = free_ligand_fixed.components().coulomb()
free_ligand_fixed_lj_nrg = free_ligand_fixed.components().lj()
bound_swap_fixed_coul_nrg = bound_swap_fixed.components().coulomb()
bound_swap_fixed_lj_nrg = bound_swap_fixed.components().lj()
free_swap_fixed_coul_nrg = free_swap_fixed.components().coulomb()
free_swap_fixed_lj_nrg = free_swap_fixed.components().lj()
if use_oldff.val:
ligand_bound_coul_nrg = bound_ligand_mobile.components().coulomb(0) + \
bound_ligand_fixed_coul_nrg
ligand_bound_lj_nrg = bound_ligand_mobile.components().lj(0) + \
bound_ligand_fixed_lj_nrg
ligand_bound_coul_nrg_f = bound_ligand_mobile.components().coulomb(1) + \
bound_ligand_fixed_coul_nrg
ligand_bound_lj_nrg_f = bound_ligand_mobile.components().lj(1) + \
bound_ligand_fixed_lj_nrg
ligand_bound_coul_nrg_b = bound_ligand_mobile.components().coulomb(2) + \
bound_ligand_fixed_coul_nrg
ligand_bound_lj_nrg_b = bound_ligand_mobile.components().lj(2) + \
bound_ligand_fixed_lj_nrg
ligand_bound_coul_nrg_next = bound_ligand_mobile.components().coulomb(3) + \
bound_ligand_fixed_coul_nrg
ligand_bound_lj_nrg_next = bound_ligand_mobile.components().lj(3) + \
bound_ligand_fixed_lj_nrg
ligand_bound_coul_nrg_prev = bound_ligand_mobile.components().coulomb(4) + \
bound_ligand_fixed_coul_nrg
ligand_bound_lj_nrg_prev = bound_ligand_mobile.components().lj(4) + \
bound_ligand_fixed_lj_nrg
ligand_free_coul_nrg = free_ligand_mobile.components().coulomb(0) + \
free_ligand_fixed_coul_nrg
ligand_free_lj_nrg = free_ligand_mobile.components().lj(0) + \
free_ligand_fixed_lj_nrg
ligand_free_coul_nrg_f = free_ligand_mobile.components().coulomb(1) + \
free_ligand_fixed_coul_nrg
ligand_free_lj_nrg_f = free_ligand_mobile.components().lj(1) + \
free_ligand_fixed_lj_nrg
ligand_free_coul_nrg_b = free_ligand_mobile.components().coulomb(2) + \
free_ligand_fixed_coul_nrg
ligand_free_lj_nrg_b = free_ligand_mobile.components().lj(2) + \
free_ligand_fixed_lj_nrg
ligand_free_coul_nrg_next = free_ligand_mobile.components().coulomb(3) + \
free_ligand_fixed_coul_nrg
ligand_free_lj_nrg_next = free_ligand_mobile.components().lj(3) + \
free_ligand_fixed_lj_nrg
ligand_free_coul_nrg_prev = free_ligand_mobile.components().coulomb(4) + \
free_ligand_fixed_coul_nrg
ligand_free_lj_nrg_prev = free_ligand_mobile.components().lj(4) + \
free_ligand_fixed_lj_nrg
else:
ligand_bound_coul_nrg = bound_ligand_mobile.components().coulomb() + \
bound_ligand_fixed_coul_nrg
ligand_bound_lj_nrg = bound_ligand_mobile.components().lj() + \
bound_ligand_fixed_lj_nrg
ligand_bound_coul_nrg_f = bound_ligand_mobile.components().coulomb("f") + \
bound_ligand_fixed_coul_nrg
ligand_bound_lj_nrg_f = bound_ligand_mobile.components().lj("f") + \
bound_ligand_fixed_lj_nrg
ligand_bound_coul_nrg_b = bound_ligand_mobile.components().coulomb("b") + \
bound_ligand_fixed_coul_nrg
ligand_bound_lj_nrg_b = bound_ligand_mobile.components().lj("b") + \
bound_ligand_fixed_lj_nrg
ligand_bound_coul_nrg_next = bound_ligand_mobile.components().coulomb("next") + \
bound_ligand_fixed_coul_nrg
ligand_bound_lj_nrg_next = bound_ligand_mobile.components().lj("next") + \
bound_ligand_fixed_lj_nrg
ligand_bound_coul_nrg_prev = bound_ligand_mobile.components().coulomb("prev") + \
bound_ligand_fixed_coul_nrg
ligand_bound_lj_nrg_prev = bound_ligand_mobile.components().lj("prev") + \
bound_ligand_fixed_lj_nrg
ligand_free_coul_nrg = free_ligand_mobile.components().coulomb() + \
free_ligand_fixed_coul_nrg
ligand_free_lj_nrg = free_ligand_mobile.components().lj() + \
free_ligand_fixed_lj_nrg
ligand_free_coul_nrg_f = free_ligand_mobile.components().coulomb("f") + \
free_ligand_fixed_coul_nrg
ligand_free_lj_nrg_f = free_ligand_mobile.components().lj("f") + \
free_ligand_fixed_lj_nrg
ligand_free_coul_nrg_b = free_ligand_mobile.components().coulomb("b") + \
free_ligand_fixed_coul_nrg
ligand_free_lj_nrg_b = free_ligand_mobile.components().lj("b") + \
free_ligand_fixed_lj_nrg
ligand_free_coul_nrg_next = free_ligand_mobile.components().coulomb("next") + \
free_ligand_fixed_coul_nrg
ligand_free_lj_nrg_next = free_ligand_mobile.components().lj("next") + \
free_ligand_fixed_lj_nrg
ligand_free_coul_nrg_prev = free_ligand_mobile.components().coulomb("prev") + \
free_ligand_fixed_coul_nrg
ligand_free_lj_nrg_prev = free_ligand_mobile.components().lj("prev") + \
free_ligand_fixed_lj_nrg
lam = Symbol("lambda")
lam_f = Symbol("lambda_{f}")
lam_b = Symbol("lambda_{b}")
lam_next = Symbol("lambda_{next}")
lam_prev = Symbol("lambda_{prev}")
lam_coul_on = Symbol("lambda_coul_on")
lam_coul_on_f = Symbol("lambda_coul_on_f")
lam_coul_on_b = Symbol("lambda_coul_on_b")
lam_coul_on_next = Symbol("lambda_coul_on_next")
lam_coul_on_prev = Symbol("lambda_coul_on_prev")
lam_coul_off = Symbol("lambda_coul_off")
lam_coul_off_f = Symbol("lambda_coul_off_f")
lam_coul_off_b = Symbol("lambda_coul_off_b")
lam_coul_off_next = Symbol("lambda_coul_off_next")
lam_coul_off_prev = Symbol("lambda_coul_off_prev")
lam_lj_on = Symbol("lambda_lj_on")
lam_lj_on_f = Symbol("lambda_lj_on_f")
lam_lj_on_b = Symbol("lambda_lj_on_b")
lam_lj_on_next = Symbol("lambda_lj_on_next")
lam_lj_on_prev = Symbol("lambda_lj_on_prev")
lam_lj_off = Symbol("lambda_lj_off")
lam_lj_off_f = Symbol("lambda_lj_off_f")
lam_lj_off_b = Symbol("lambda_lj_off_b")
lam_lj_off_next = Symbol("lambda_lj_off_next")
lam_lj_off_prev = Symbol("lambda_lj_off_prev")
lam_coul_swap = Symbol("lambda_coul_swap")
lam_coul_swap_f = Symbol("lambda_coul_swap_f")
lam_coul_swap_b = Symbol("lambda_coul_swap_b")
lam_coul_swap_next = Symbol("lambda_coul_swap_next")
lam_coul_swap_prev = Symbol("lambda_coul_swap_prev")
lam_lj_swap = Symbol("lambda_lj_swap")
lam_lj_swap_f = Symbol("lambda_lj_swap_f")
lam_lj_swap_b = Symbol("lambda_lj_swap_b")
lam_lj_swap_next = Symbol("lambda_lj_swap_next")
lam_lj_swap_prev = Symbol("lambda_lj_swap_prev")
S_sym = Symbol("S")
S_scl = S_sym - 4*(S_sym-1)*(lam-0.5)**2
S_scl_f = S_sym - 4*(S_sym-1)*(lam_f-0.5)**2
S_scl_b = S_sym - 4*(S_sym-1)*(lam_b-0.5)**2
S_scl_next = S_sym - 4*(S_sym-1)*(lam_next-0.5)**2
S_scl_prev = S_sym - 4*(S_sym-1)*(lam_prev-0.5)**2
swap_int_nrg_sym = Symbol("E_{swap:internal}")
swap_int_nrg_f_sym = Symbol("E_{swap:internal_{f}}")
swap_int_nrg_b_sym = Symbol("E_{swap:internal_{b}}")
swap_int_nrg_next_sym = Symbol("E_{swap:internal_{next}}")
swap_int_nrg_prev_sym = Symbol("E_{swap:internal_{prev}}")
if use_oldff.val:
swap_int_nrg = (lam_coul_swap * S_scl * swap_interclj.components().coulomb(0)) + \
(lam_lj_swap * swap_interclj.components().lj(0))
swap_int_nrg_f = (lam_coul_swap_f * S_scl_f * swap_interclj.components().coulomb(1)) + \
(lam_lj_swap_f * swap_interclj.components().lj(1))
swap_int_nrg_b = (lam_coul_swap_b * S_scl_b * swap_interclj.components().coulomb(2)) + \
(lam_lj_swap_b * swap_interclj.components().lj(2))
swap_int_nrg_next = (lam_coul_swap_next * S_scl_next * swap_interclj.components().coulomb(3)) + \
(lam_lj_swap_next * swap_interclj.components().lj(3))
swap_int_nrg_prev = (lam_coul_swap_prev * S_scl_prev * swap_interclj.components().coulomb(4)) + \
(lam_lj_swap_prev * swap_interclj.components().lj(4))
else:
swap_int_nrg = (lam_coul_swap * S_scl * swap_interclj.components().coulomb()) + \
(lam_lj_swap * swap_interclj.components().lj())
swap_int_nrg_f = (lam_coul_swap_f * S_scl_f * swap_interclj.components().coulomb("f")) + \
(lam_lj_swap_f * swap_interclj.components().lj("f"))
swap_int_nrg_b = (lam_coul_swap_b * S_scl_b * swap_interclj.components().coulomb("b")) + \
(lam_lj_swap_b * swap_interclj.components().lj("b"))
swap_int_nrg_next = (lam_coul_swap_next * S_scl_next * swap_interclj.components().coulomb("next")) + \
(lam_lj_swap_next * swap_interclj.components().lj("next"))
swap_int_nrg_prev = (lam_coul_swap_prev * S_scl_prev * swap_interclj.components().coulomb("prev")) + \
(lam_lj_swap_prev * swap_interclj.components().lj("prev"))
swap_bound_coul_nrg_sym = Symbol("E_{swap:bound_coul}")
swap_bound_lj_nrg_sym = Symbol("E_{swap:bound_lj}")
swap_bound_coul_nrg_f_sym = Symbol("E_{swap:bound_coul{f}}")
swap_bound_lj_nrg_f_sym = Symbol("E_{swap:bound_lj{f}}")
swap_bound_coul_nrg_b_sym = Symbol("E_{swap:bound_coul{b}}")
swap_bound_lj_nrg_b_sym = Symbol("E_{swap:bound_lj{b}}")
swap_bound_coul_nrg_next_sym = Symbol("E_{swap:bound_coul{next}}")
swap_bound_lj_nrg_next_sym = Symbol("E_{swap:bound_lj{next}}")
swap_bound_coul_nrg_prev_sym = Symbol("E_{swap:bound_coul{prev}}")
swap_bound_lj_nrg_prev_sym = Symbol("E_{swap:bound_lj{prev}}")
swap_free_coul_nrg_sym = Symbol("E_{swap:free_coul}")
swap_free_lj_nrg_sym = Symbol("E_{swap:free_lj}")
swap_free_coul_nrg_f_sym = Symbol("E_{swap:free_coul{f}}")
swap_free_lj_nrg_f_sym = Symbol("E_{swap:free_lj{f}}")
swap_free_coul_nrg_b_sym = Symbol("E_{swap:free_coul{b}}")
swap_free_lj_nrg_b_sym = Symbol("E_{swap:free_lj{b}}")
swap_free_coul_nrg_next_sym = Symbol("E_{swap:free_coul{next}}")
swap_free_lj_nrg_next_sym = Symbol("E_{swap:free_lj{next}}")
swap_free_coul_nrg_prev_sym = Symbol("E_{swap:free_coul{prev}}")
swap_free_lj_nrg_prev_sym = Symbol("E_{swap:free_lj{prev}}")
if use_oldff.val:
swap_bound_coul_nrg = bound_swap_mobile.components().coulomb(0) + \
bound_swap_fixed_coul_nrg
swap_bound_lj_nrg = bound_swap_mobile.components().lj(0) + \
bound_swap_fixed_lj_nrg
swap_bound_coul_nrg_f = bound_swap_mobile.components().coulomb(1) + \
bound_swap_fixed_coul_nrg
swap_bound_lj_nrg_f = bound_swap_mobile.components().lj(1) + \
bound_swap_fixed_lj_nrg
swap_bound_coul_nrg_b = bound_swap_mobile.components().coulomb(2) + \
bound_swap_fixed_coul_nrg
swap_bound_lj_nrg_b = bound_swap_mobile.components().lj(2) + \
bound_swap_fixed_lj_nrg
swap_bound_coul_nrg_next = bound_swap_mobile.components().coulomb(3) + \
bound_swap_fixed_coul_nrg
swap_bound_lj_nrg_next = bound_swap_mobile.components().lj(3) + \
bound_swap_fixed_lj_nrg
swap_bound_coul_nrg_prev = bound_swap_mobile.components().coulomb(4) + \
bound_swap_fixed_coul_nrg
swap_bound_lj_nrg_prev = bound_swap_mobile.components().lj(4) + \
bound_swap_fixed_lj_nrg
swap_free_coul_nrg = free_swap_mobile.components().coulomb(0) + \
free_swap_fixed_coul_nrg
swap_free_lj_nrg = free_swap_mobile.components().lj(0) + \
free_swap_fixed_lj_nrg
swap_free_coul_nrg_f = free_swap_mobile.components().coulomb(1) + \
free_swap_fixed_coul_nrg
swap_free_lj_nrg_f = free_swap_mobile.components().lj(1) + \
free_swap_fixed_lj_nrg
swap_free_coul_nrg_b = free_swap_mobile.components().coulomb(2) + \
free_swap_fixed_coul_nrg
swap_free_lj_nrg_b = free_swap_mobile.components().lj(2) + \
free_swap_fixed_lj_nrg
swap_free_coul_nrg_next = free_swap_mobile.components().coulomb(3) + \
free_swap_fixed_coul_nrg
swap_free_lj_nrg_next = free_swap_mobile.components().lj(3) + \
free_swap_fixed_lj_nrg
swap_free_coul_nrg_prev = free_swap_mobile.components().coulomb(4) + \
free_swap_fixed_coul_nrg
swap_free_lj_nrg_prev = free_swap_mobile.components().lj(4) + \
free_swap_fixed_lj_nrg
else:
swap_bound_coul_nrg = bound_swap_mobile.components().coulomb() + \
bound_swap_fixed_coul_nrg
swap_bound_lj_nrg = bound_swap_mobile.components().lj() + \
bound_swap_fixed_lj_nrg
swap_bound_coul_nrg_f = bound_swap_mobile.components().coulomb("f") + \
bound_swap_fixed_coul_nrg
swap_bound_lj_nrg_f = bound_swap_mobile.components().lj("f") + \
bound_swap_fixed_lj_nrg
swap_bound_coul_nrg_b = bound_swap_mobile.components().coulomb("b") + \
bound_swap_fixed_coul_nrg
swap_bound_lj_nrg_b = bound_swap_mobile.components().lj("b") + \
bound_swap_fixed_lj_nrg
swap_bound_coul_nrg_next = bound_swap_mobile.components().coulomb("next") + \
bound_swap_fixed_coul_nrg
swap_bound_lj_nrg_next = bound_swap_mobile.components().lj("next") + \
bound_swap_fixed_lj_nrg
swap_bound_coul_nrg_prev = bound_swap_mobile.components().coulomb("prev") + \
bound_swap_fixed_coul_nrg
swap_bound_lj_nrg_prev = bound_swap_mobile.components().lj("prev") + \
bound_swap_fixed_lj_nrg
swap_free_coul_nrg = free_swap_mobile.components().coulomb() + \
free_swap_fixed_coul_nrg
swap_free_lj_nrg = free_swap_mobile.components().lj() + \
free_swap_fixed_lj_nrg
swap_free_coul_nrg_f = free_swap_mobile.components().coulomb("f") + \
free_swap_fixed_coul_nrg
swap_free_lj_nrg_f = free_swap_mobile.components().lj("f") + \
free_swap_fixed_lj_nrg
swap_free_coul_nrg_b = free_swap_mobile.components().coulomb("b") + \
free_swap_fixed_coul_nrg
swap_free_lj_nrg_b = free_swap_mobile.components().lj("b") + \
free_swap_fixed_lj_nrg
swap_free_coul_nrg_next = free_swap_mobile.components().coulomb("next") + \
free_swap_fixed_coul_nrg
swap_free_lj_nrg_next = free_swap_mobile.components().lj("next") + \
free_swap_fixed_lj_nrg
swap_free_coul_nrg_prev = free_swap_mobile.components().coulomb("prev") + \
free_swap_fixed_coul_nrg
swap_free_lj_nrg_prev = free_swap_mobile.components().lj("prev") + \
free_swap_fixed_lj_nrg
system.add(ligand_intraclj)
system.add(ligand_intraff)
system.add(swap_interclj)
system.add(bound_ligand_mobile)
system.add(bound_swap_mobile)
system.add(free_ligand_mobile)
system.add(free_swap_mobile)
system.add(bound_ligand_fixed)
system.add(bound_swap_fixed)
system.add(free_ligand_fixed)
system.add(free_swap_fixed)
system.setConstant(lam, 0.0)
system.setConstant(lam_f, 0.0)
system.setConstant(lam_b, 0.0)
system.setConstant(lam_next, 0.0)
system.setConstant(lam_prev, 0.0)
system.setConstant(lam_coul_on, 1.0)
system.setConstant(lam_coul_on_f, 1.0)
system.setConstant(lam_coul_on_b, 1.0)
system.setConstant(lam_coul_on_next, 1.0)
system.setConstant(lam_coul_on_prev, 1.0)
system.setConstant(lam_coul_off, 0.0)
system.setConstant(lam_coul_off_f, 0.0)
system.setConstant(lam_coul_off_b, 0.0)
system.setConstant(lam_coul_off_next, 0.0)
system.setConstant(lam_coul_off_prev, 0.0)
system.setConstant(lam_lj_on, 1.0)
system.setConstant(lam_lj_on_f, 1.0)
system.setConstant(lam_lj_on_b, 1.0)
system.setConstant(lam_lj_on_next, 1.0)
system.setConstant(lam_lj_on_prev, 1.0)
system.setConstant(lam_lj_off, 0.0)
system.setConstant(lam_lj_off_f, 0.0)
system.setConstant(lam_lj_off_b, 0.0)
system.setConstant(lam_lj_off_next, 0.0)
system.setConstant(lam_lj_off_prev, 0.0)
system.setConstant(lam_coul_swap, 1.0)
system.setConstant(lam_coul_swap_f, 1.0)
system.setConstant(lam_coul_swap_b, 1.0)
system.setConstant(lam_coul_swap_next, 1.0)
system.setConstant(lam_coul_swap_prev, 1.0)
system.setConstant(lam_lj_swap, 1.0)
system.setConstant(lam_lj_swap_f, 1.0)
system.setConstant(lam_lj_swap_b, 1.0)
system.setConstant(lam_lj_swap_next, 1.0)
system.setConstant(lam_lj_swap_prev, 1.0)
if uncharge_ligand.val:
system.setComponent(S_sym, 1.0)
else:
system.setComponent(S_sym, soften_water.val)
system.setComponent(ligand_int_nrg_sym, ligand_int_nrg)
system.setComponent(ligand_int_nrg_f_sym, ligand_int_nrg_f)
system.setComponent(ligand_int_nrg_b_sym, ligand_int_nrg_b)
system.setComponent(ligand_int_nrg_next_sym, ligand_int_nrg_next)
system.setComponent(ligand_int_nrg_prev_sym, ligand_int_nrg_prev)
system.setComponent(ligand_bound_coul_nrg_sym, ligand_bound_coul_nrg)
system.setComponent(ligand_bound_coul_nrg_f_sym, ligand_bound_coul_nrg_f)
system.setComponent(ligand_bound_coul_nrg_b_sym, ligand_bound_coul_nrg_b)
system.setComponent(ligand_bound_coul_nrg_next_sym, ligand_bound_coul_nrg_next)
system.setComponent(ligand_bound_coul_nrg_prev_sym, ligand_bound_coul_nrg_prev)
system.setComponent(ligand_bound_lj_nrg_sym, ligand_bound_lj_nrg)
system.setComponent(ligand_bound_lj_nrg_f_sym, ligand_bound_lj_nrg_f)
system.setComponent(ligand_bound_lj_nrg_b_sym, ligand_bound_lj_nrg_b)
system.setComponent(ligand_bound_lj_nrg_next_sym, ligand_bound_lj_nrg_next)
system.setComponent(ligand_bound_lj_nrg_prev_sym, ligand_bound_lj_nrg_prev)
system.setComponent(ligand_free_coul_nrg_sym, ligand_free_coul_nrg)
system.setComponent(ligand_free_coul_nrg_f_sym, ligand_free_coul_nrg_f)
system.setComponent(ligand_free_coul_nrg_b_sym, ligand_free_coul_nrg_b)
system.setComponent(ligand_free_coul_nrg_next_sym, ligand_free_coul_nrg_next)
system.setComponent(ligand_free_coul_nrg_prev_sym, ligand_free_coul_nrg_prev)
system.setComponent(ligand_free_lj_nrg_sym, ligand_free_lj_nrg)
system.setComponent(ligand_free_lj_nrg_f_sym, ligand_free_lj_nrg_f)
system.setComponent(ligand_free_lj_nrg_b_sym, ligand_free_lj_nrg_b)
system.setComponent(ligand_free_lj_nrg_next_sym, ligand_free_lj_nrg_next)
system.setComponent(ligand_free_lj_nrg_prev_sym, ligand_free_lj_nrg_prev)
system.setComponent(swap_int_nrg_sym, swap_int_nrg)
system.setComponent(swap_int_nrg_f_sym, swap_int_nrg_f)
system.setComponent(swap_int_nrg_b_sym, swap_int_nrg_b)
system.setComponent(swap_int_nrg_next_sym, swap_int_nrg_next)
system.setComponent(swap_int_nrg_prev_sym, swap_int_nrg_prev)
system.setComponent(swap_bound_coul_nrg_sym, swap_bound_coul_nrg)
system.setComponent(swap_bound_coul_nrg_f_sym, swap_bound_coul_nrg_f)
system.setComponent(swap_bound_coul_nrg_b_sym, swap_bound_coul_nrg_b)
system.setComponent(swap_bound_coul_nrg_next_sym, swap_bound_coul_nrg_next)
system.setComponent(swap_bound_coul_nrg_prev_sym, swap_bound_coul_nrg_prev)
system.setComponent(swap_bound_lj_nrg_sym, swap_bound_lj_nrg)
system.setComponent(swap_bound_lj_nrg_f_sym, swap_bound_lj_nrg_f)
system.setComponent(swap_bound_lj_nrg_b_sym, swap_bound_lj_nrg_b)
system.setComponent(swap_bound_lj_nrg_next_sym, swap_bound_lj_nrg_next)
system.setComponent(swap_bound_lj_nrg_prev_sym, swap_bound_lj_nrg_prev)
system.setComponent(swap_free_coul_nrg_sym, swap_free_coul_nrg)
system.setComponent(swap_free_coul_nrg_f_sym, swap_free_coul_nrg_f)
system.setComponent(swap_free_coul_nrg_b_sym, swap_free_coul_nrg_b)
system.setComponent(swap_free_coul_nrg_next_sym, swap_free_coul_nrg_next)
system.setComponent(swap_free_coul_nrg_prev_sym, swap_free_coul_nrg_prev)
system.setComponent(swap_free_lj_nrg_sym, swap_free_lj_nrg)
system.setComponent(swap_free_lj_nrg_f_sym, swap_free_lj_nrg_f)
system.setComponent(swap_free_lj_nrg_b_sym, swap_free_lj_nrg_b)
system.setComponent(swap_free_lj_nrg_next_sym, swap_free_lj_nrg_next)
system.setComponent(swap_free_lj_nrg_prev_sym, swap_free_lj_nrg_prev)
bound_bound_nrg_sym = Symbol("E_{bound-bound}")
bound_bound_nrg = None
for bound_forcefield in bound_forcefields:
if bound_bound_nrg is None:
bound_bound_nrg = bound_forcefield.components().total()
else:
bound_bound_nrg = bound_bound_nrg + bound_forcefield.components().total()
system.add(bound_forcefield)
system.setComponent(bound_bound_nrg_sym, bound_bound_nrg)
free_free_nrg_sym = Symbol("E_{free-free}")
free_free_nrg = None
for free_forcefield in free_forcefields:
if free_free_nrg is None:
free_free_nrg = free_forcefield.components().total()
else:
free_free_nrg = free_free_nrg + free_forcefield.components().total()
system.add(free_forcefield)
system.setComponent(free_free_nrg_sym, free_free_nrg)
bound_nrg_sym = Symbol("E_{bound}")
bound_nrg = (lam_coul_on * ligand_bound_coul_nrg_sym) + (lam_coul_off * swap_bound_coul_nrg_sym) + \
(lam_lj_on * ligand_bound_lj_nrg_sym) + (lam_lj_off * swap_bound_lj_nrg_sym)
bound_nrg_f_sym = Symbol("E_{bound_{f}}")
bound_nrg_f = (lam_coul_on_f * ligand_bound_coul_nrg_f_sym) + (lam_coul_off_f * swap_bound_coul_nrg_f_sym) + \
(lam_lj_on_f * ligand_bound_lj_nrg_f_sym) + (lam_lj_off_f * swap_bound_lj_nrg_f_sym)
bound_nrg_b_sym = Symbol("E_{bound_{b}}")
bound_nrg_b = (lam_coul_on_b * ligand_bound_coul_nrg_b_sym) + (lam_coul_off_b * swap_bound_coul_nrg_b_sym) + \
(lam_lj_on_b * ligand_bound_lj_nrg_b_sym) + (lam_lj_off_b * swap_bound_lj_nrg_b_sym)
bound_nrg_next_sym = Symbol("E_{bound_{next}}")
bound_nrg_next = (lam_coul_on_next * ligand_bound_coul_nrg_next_sym) + (lam_coul_off_next * swap_bound_coul_nrg_next_sym) + \
(lam_lj_on_next * ligand_bound_lj_nrg_next_sym) + (lam_lj_off_next * swap_bound_lj_nrg_next_sym)
bound_nrg_prev_sym = Symbol("E_{bound_{prev}}")
bound_nrg_prev = (lam_coul_on_prev * ligand_bound_coul_nrg_prev_sym) + (lam_coul_off_prev * swap_bound_coul_nrg_prev_sym) + \
(lam_lj_on_prev * ligand_bound_lj_nrg_prev_sym) + (lam_lj_off_prev * swap_bound_lj_nrg_prev_sym)
free_nrg_sym = Symbol("E_{free}")
free_nrg = (lam_coul_off * ligand_free_coul_nrg_sym) + (lam_coul_on * swap_free_coul_nrg_sym) + \
(lam_lj_off * ligand_free_lj_nrg_sym) + (lam_lj_on * swap_free_lj_nrg_sym)
free_nrg_f_sym = Symbol("E_{free_{f}}")
free_nrg_f = (lam_coul_off_f * ligand_free_coul_nrg_f_sym) + (lam_coul_on_f * swap_free_coul_nrg_f_sym) + \
(lam_lj_off_f * ligand_free_lj_nrg_f_sym) + (lam_lj_on_f * swap_free_lj_nrg_f_sym)
free_nrg_b_sym = Symbol("E_{free_{b}}")
free_nrg_b = (lam_coul_off_b * ligand_free_coul_nrg_b_sym) + (lam_coul_on_b * swap_free_coul_nrg_b_sym) + \
(lam_lj_off_b * ligand_free_lj_nrg_b_sym) + (lam_lj_on_b * swap_free_lj_nrg_b_sym)
free_nrg_next_sym = Symbol("E_{free_{next}}")
free_nrg_next = (lam_coul_off_next * ligand_free_coul_nrg_next_sym) + (lam_coul_on_next * swap_free_coul_nrg_next_sym) + \
(lam_lj_off_next * ligand_free_lj_nrg_next_sym) + (lam_lj_on_next * swap_free_lj_nrg_next_sym)
free_nrg_prev_sym = Symbol("E_{free_{prev}}")
free_nrg_prev = (lam_coul_off_prev * ligand_free_coul_nrg_prev_sym) + (lam_coul_on_prev * swap_free_coul_nrg_prev_sym) + \
(lam_lj_off_prev * ligand_free_lj_nrg_prev_sym) + (lam_lj_on_prev * swap_free_lj_nrg_prev_sym)
box_nrg_sym = Symbol("E_{box}")
box_nrg = bound_bound_nrg_sym + free_free_nrg_sym + ligand_int_nrg_sym + swap_int_nrg_sym
box_nrg_f_sym = Symbol("E_{box_{f}}")
box_nrg_f = bound_bound_nrg_sym + free_free_nrg_sym + ligand_int_nrg_f_sym + swap_int_nrg_f_sym
box_nrg_b_sym = Symbol("E_{box_{b}}")
box_nrg_b = bound_bound_nrg_sym + free_free_nrg_sym + ligand_int_nrg_b_sym + swap_int_nrg_b_sym
box_nrg_next_sym = Symbol("E_{box_{next}}")
box_nrg_next = bound_bound_nrg_sym + free_free_nrg_sym + ligand_int_nrg_next_sym + swap_int_nrg_next_sym
box_nrg_prev_sym = Symbol("E_{box_{prev}}")
box_nrg_prev = bound_bound_nrg_sym + free_free_nrg_sym + ligand_int_nrg_prev_sym + swap_int_nrg_prev_sym
total_nrg_sym = system.totalComponent()
total_nrg = bound_nrg_sym + free_nrg_sym + box_nrg_sym
total_nrg_f_sym = Symbol("E_{total_{f}}")
total_nrg_f = bound_nrg_f_sym + free_nrg_f_sym + box_nrg_f_sym
total_nrg_b_sym = Symbol("E_{total_{b}}")
total_nrg_b = bound_nrg_b_sym + free_nrg_b_sym + box_nrg_b_sym
total_nrg_next_sym = Symbol("E_{total_{next}}")
total_nrg_next = bound_nrg_next_sym + free_nrg_next_sym + box_nrg_next_sym
total_nrg_prev_sym = Symbol("E_{total_{prev}}")
total_nrg_prev = bound_nrg_prev_sym + free_nrg_prev_sym + box_nrg_prev_sym
system.setComponent(bound_nrg_sym, bound_nrg)
system.setComponent(bound_nrg_f_sym, bound_nrg_f)
system.setComponent(bound_nrg_b_sym, bound_nrg_b)
system.setComponent(bound_nrg_next_sym, bound_nrg_next)
system.setComponent(bound_nrg_prev_sym, bound_nrg_prev)
system.setComponent(free_nrg_sym, free_nrg)
system.setComponent(free_nrg_f_sym, free_nrg_f)
system.setComponent(free_nrg_b_sym, free_nrg_b)
system.setComponent(free_nrg_next_sym, free_nrg_next)
system.setComponent(free_nrg_prev_sym, free_nrg_prev)
system.setComponent(box_nrg_sym, box_nrg)
system.setComponent(box_nrg_f_sym, box_nrg_f)
system.setComponent(box_nrg_b_sym, box_nrg_b)
system.setComponent(box_nrg_next_sym, box_nrg_next)
system.setComponent(box_nrg_prev_sym, box_nrg_prev)
system.setComponent(total_nrg_sym, total_nrg)
system.setComponent(total_nrg_f_sym, total_nrg_f)
system.setComponent(total_nrg_b_sym, total_nrg_b)
system.setComponent(total_nrg_next_sym, total_nrg_next)
system.setComponent(total_nrg_prev_sym, total_nrg_prev)
system.setComponent( Symbol("delta_nrg^{F}"), (total_nrg_f_sym - total_nrg_sym) )
system.setComponent( Symbol("delta_nrg^{B}"), (total_nrg_b_sym - total_nrg_sym) )
system.setComponent( Symbol("delta_nrg^{next}"), (total_nrg_next_sym - total_nrg_sym) )
system.setComponent( Symbol("delta_nrg^{prev}"), (total_nrg_prev_sym - total_nrg_sym) )
system.setComponent( Symbol("delta_bound_nrg^{F}"), (bound_nrg_f_sym - bound_nrg_sym) )
system.setComponent( Symbol("delta_bound_nrg^{B}"), (bound_nrg_b_sym - bound_nrg_sym) )
system.setComponent( Symbol("delta_free_nrg^{F}"), (free_nrg_f_sym - free_nrg_sym) )
system.setComponent( Symbol("delta_free_nrg^{B}"), (free_nrg_b_sym - free_nrg_sym) )
# Now add constraints. These are used to keep the identity of the
# swap water, to keep all lambda values between 0 and 1, and to
# map the alpha values of the softcore forcefields to lambda
print("\nCreating WSRC system constraints...\n")
# Add the constraint that lambda_f is lambda + delta_lambda and
# lambda_b is lambda - delta_lambda (kept to between 0 and 1)
dlam = delta_lambda.val
if dlam > 1 or dlam < 0.0000001:
print("WARNING: Weird value of delta_lambda (%s). Setting it to 0.001" % dlam)
dlam = 0.001
# Constrain lam_f and lam_b to lie with delta_lambda of lambda
dlam_sym = Symbol("delta_lambda")
system.setConstant( dlam_sym, dlam )
system.add( ComponentConstraint( lam_f, Min( lam + dlam_sym, 1 ) ) )
system.add( ComponentConstraint( lam_b, Max( lam - dlam_sym, 0 ) ) )
# Constrain lam_next and lam_prev to be equal to the next and previous
# windows lambda values
lamvals = getLambdaValues()
print("Using lambda values: %s" % lamvals)
if lamvals[-1] != 1:
lamvals.append(1)
if lamvals[0] != 0:
lamvals.insert(0,0)
system.add( WindowedComponent( lam_next, lam, lamvals, 1 ) )
system.add( WindowedComponent( lam_prev, lam, lamvals, -1 ) )
system.setConstant( lam, lambda_values.val[0] )
# work out the maximum and minimum permissable values of lam_lj
lam_lj_min = lj_buffer.val
lam_lj_max = 1.0 - lj_buffer.val
# now constrain lam_coul_on, lam_coul_off, lam_lj_on and lam_lj_off to follow lambda
if uncharge_ligand.val:
v = uncharge_ligand_max.val
vfunc = Max
if v > 0.75:
vfunc = Min
system.add( ComponentConstraint( lam_coul_on, vfunc( 1 + (4*lam*(v-1.0)), (v/0.75)*(1.0-lam) ) ) )
system.add( ComponentConstraint( lam_coul_off, vfunc( 1 + (4*(1-lam)*(v-1.0)), (v/0.75)*(1.0-(1-lam)) ) ) )
system.add( ComponentConstraint( lam_coul_on_f, vfunc( 1 + (4*lam_f*(v-1.0)), (v/0.75)*(1.0-lam_f) ) ) )
system.add( ComponentConstraint( lam_coul_off_f, vfunc( 1 + (4*(1-lam_f)*(v-1.0)), (v/0.75)*(1.0-(1-lam_f)) ) ) )
system.add( ComponentConstraint( lam_coul_on_b, vfunc( 1 + (4*lam_b*(v-1.0)), (v/0.75)*(1.0-lam_b) ) ) )
system.add( ComponentConstraint( lam_coul_off_b, vfunc( 1 + (4*(1-lam_b)*(v-1.0)), (v/0.75)*(1.0-(1-lam_b)) ) ) )
system.add( ComponentConstraint( lam_coul_on_next, vfunc( 1 + (4*lam_next*(v-1.0)), (v/0.75)*(1.0-lam_next) ) ) )
system.add( ComponentConstraint( lam_coul_off_next, vfunc( 1 + (4*(1-lam_next)*(v-1.0)), (v/0.75)*(1.0-(1-lam_next)) ) ) )
system.add( ComponentConstraint( lam_coul_on_prev, vfunc( 1 + (4*lam_prev*(v-1.0)), (v/0.75)*(1.0-lam_prev) ) ) )
system.add( ComponentConstraint( lam_coul_off_prev, vfunc( 1 + (4*(1-lam_prev)*(v-1.0)), (v/0.75)*(1.0-(1-lam_prev)) ) ) )
system.add( ComponentConstraint( lam_lj_on, Max( Min( 2 * ((1-lam)-0.25), lam_lj_max ), lam_lj_min ) ) ) # scale from 1 to 0 from lam=0.25 to 0.75
system.add( ComponentConstraint( lam_lj_off, Max( Min( 2 * (lam-0.25), lam_lj_max ), lam_lj_min ) ) ) # scale from 0 to 1 from lam=0.25 to 0.75
system.add( ComponentConstraint( lam_lj_on_f, Max( Min( 2 * ((1-lam_f)-0.25), lam_lj_max ), lam_lj_min ) ) ) # scale from 1 to 0 from lam=0.25 to 0.75
system.add( ComponentConstraint( lam_lj_off_f, Max( Min( 2 * (lam_f-0.25), lam_lj_max ), lam_lj_min ) ) ) # scale from 0 to 1 from lam=0.25 to 0.75
system.add( ComponentConstraint( lam_lj_on_b, Max( Min( 2 * ((1-lam_b)-0.25), lam_lj_max ), lam_lj_min ) ) ) # scale from 1 to 0 from lam=0.25 to 0.75
system.add( ComponentConstraint( lam_lj_off_b, Max( Min( 2 * (lam_b-0.25), lam_lj_max ), lam_lj_min ) ) ) # scale from 0 to 1 from lam=0.25 to 0.75
system.add( ComponentConstraint( lam_lj_on_next, Max( Min( 2 * ((1-lam_next)-0.25), lam_lj_max ), lam_lj_min ) ) ) # scale from 1 to 0 from lam=0.25 to 0.75
system.add( ComponentConstraint( lam_lj_off_next, Max( Min( 2 * (lam_next-0.25), lam_lj_max ), lam_lj_min ) ) ) # scale from 0 to 1 from lam=0.25 to 0.75
system.add( ComponentConstraint( lam_lj_on_prev, Max( Min( 2 * ((1-lam_prev)-0.25), lam_lj_max ), lam_lj_min ) ) ) # scale from 1 to 0 from lam=0.25 to 0.75
system.add( ComponentConstraint( lam_lj_off_prev, Max( Min( 2 * (lam_prev-0.25), lam_lj_max ), lam_lj_min ) ) ) # scale from 0 to 1 from lam=0.25 to 0.75
system.add( ComponentConstraint( lam_coul_swap, Max( v, Max(4.0*(v-1.0)*lam + 1, 4.0*(v-1.0)*(1-lam) + 1 ) ) ) )
system.add( ComponentConstraint( lam_coul_swap_f, Max( v, Max(4.0*(v-1.0)*lam_f + 1, 4.0*(v-1.0)*(1-lam_f) + 1 ) ) ) )
system.add( ComponentConstraint( lam_coul_swap_b, Max( v, Max(4.0*(v-1.0)*lam_b + 1, 4.0*(v-1.0)*(1-lam_b) + 1 ) ) ) )
system.add( ComponentConstraint( lam_coul_swap_next, Max( v, Max(4.0*(v-1.0)*lam_next + 1, 4.0*(v-1.0)*(1-lam_next) + 1 ) ) ) )
system.add( ComponentConstraint( lam_coul_swap_prev, Max( v, Max(4.0*(v-1.0)*lam_prev + 1, 4.0*(v-1.0)*(1-lam_prev) + 1 ) ) ) )
else:
system.add( ComponentConstraint( lam_coul_on, 1-lam ) )
system.add( ComponentConstraint( lam_coul_off, lam ) )
system.add( ComponentConstraint( lam_lj_on, Max( Min( 1-lam, lam_lj_max ), lam_lj_min ) ) )
system.add( ComponentConstraint( lam_lj_off, Max( Min( lam, lam_lj_max ), lam_lj_min ) ) )
system.add( ComponentConstraint( lam_coul_on_f, 1-lam_f ) )
system.add( ComponentConstraint( lam_coul_off_f, lam_f ) )
system.add( ComponentConstraint( lam_lj_on_f, Max( Min( 1-lam_f, lam_lj_max ), lam_lj_min ) ) )
system.add( ComponentConstraint( lam_lj_off_f, Max( Min( lam_f, lam_lj_max ), lam_lj_min ) ) )
system.add( ComponentConstraint( lam_coul_on_b, 1-lam_b ) )
system.add( ComponentConstraint( lam_coul_off_b, lam_b ) )
system.add( ComponentConstraint( lam_lj_on_b, Max( Min( 1-lam_b, lam_lj_max ), lam_lj_min ) ) )
system.add( ComponentConstraint( lam_lj_off_b, Max( Min( lam_b, lam_lj_max ), lam_lj_min ) ) )
system.add( ComponentConstraint( lam_coul_on_next, 1-lam_next ) )
system.add( ComponentConstraint( lam_coul_off_next, lam_next ) )
system.add( ComponentConstraint( lam_lj_on_next, Max( Min( 1-lam_next, lam_lj_max ), lam_lj_min ) ) )
system.add( ComponentConstraint( lam_lj_off_next, Max( Min( lam_next, lam_lj_max ), lam_lj_min ) ) )
system.add( ComponentConstraint( lam_coul_on_prev, 1-lam_prev ) )
system.add( ComponentConstraint( lam_coul_off_prev, lam_prev ) )
system.add( ComponentConstraint( lam_lj_on_prev, Max( Min( 1-lam_prev, lam_lj_max ), lam_lj_min ) ) )
system.add( ComponentConstraint( lam_lj_off_prev, Max( Min( lam_prev, lam_lj_max ), lam_lj_min ) ) )
# now add alpha variables that can be used by the EnergyMonitors
alpha_on = Symbol("alpha_on")
alpha_off = Symbol("alpha_off")
system.setConstant(alpha_on, 0)
system.setConstant(alpha_off, 1)
system.setConstant( Symbol("alpha_scale"), alpha_scale.val )
system.add( ComponentConstraint( alpha_on, alpha_scale.val * lam ) )
system.add( ComponentConstraint( alpha_off, alpha_scale.val * (1-lam) ) )
# Now constrain alpha to follow lambda
# First, the reference state (alpha0)
if use_oldff.val:
system.add( PropertyConstraint( "alpha0", FFName("free:swap-mobile"), alpha_scale.val * lam ) )
system.add( PropertyConstraint( "alpha0", FFName("bound:swap-mobile"), alpha_scale.val * (1 - lam) ) )
system.add( PropertyConstraint( "alpha0", FFName("bound:ligand-mobile"), alpha_scale.val * lam ) )
system.add( PropertyConstraint( "alpha0", FFName("free:ligand-mobile"), alpha_scale.val * (1 - lam) ) )
# Now the forwards perturbed state (alpha1)
system.add( PropertyConstraint( "alpha1", FFName("free:swap-mobile"), alpha_scale.val * lam_f ) )
system.add( PropertyConstraint( "alpha1", FFName("bound:swap-mobile"), alpha_scale.val * (1 - lam_f) ) )
system.add( PropertyConstraint( "alpha1", FFName("bound:ligand-mobile"), alpha_scale.val * lam_f ) )
system.add( PropertyConstraint( "alpha1", FFName("free:ligand-mobile"), alpha_scale.val * (1 - lam_f) ) )
# Now the backwards perturbed state (alpha2)
system.add( PropertyConstraint( "alpha2", FFName("free:swap-mobile"), alpha_scale.val * lam_b ) )
system.add( PropertyConstraint( "alpha2", FFName("bound:swap-mobile"), alpha_scale.val * (1 - lam_b) ) )
system.add( PropertyConstraint( "alpha2", FFName("bound:ligand-mobile"), alpha_scale.val * lam_b ) )
system.add( PropertyConstraint( "alpha2", FFName("free:ligand-mobile"), alpha_scale.val * (1 - lam_b) ) )
# Now the next window perturbed state (alpha3)
system.add( PropertyConstraint( "alpha3", FFName("free:swap-mobile"), alpha_scale.val * lam_next ) )
system.add( PropertyConstraint( "alpha3", FFName("bound:swap-mobile"), alpha_scale.val * (1 - lam_next) ) )
system.add( PropertyConstraint( "alpha3", FFName("bound:ligand-mobile"), alpha_scale.val * lam_next ) )
system.add( PropertyConstraint( "alpha3", FFName("free:ligand-mobile"), alpha_scale.val * (1 - lam_next) ) )
# Now the previous window perturbed state (alpha4)
system.add( PropertyConstraint( "alpha4", FFName("free:swap-mobile"), alpha_scale.val * lam_prev ) )
system.add( PropertyConstraint( "alpha4", FFName("bound:swap-mobile"), alpha_scale.val * (1 - lam_prev) ) )
system.add( PropertyConstraint( "alpha4", FFName("bound:ligand-mobile"), alpha_scale.val * lam_prev ) )
system.add( PropertyConstraint( "alpha4", FFName("free:ligand-mobile"), alpha_scale.val * (1 - lam_prev) ) )
else:
system.add( PropertyConstraint( "alpha", FFName("free:swap-mobile"), alpha_scale.val * lam ) )
system.add( PropertyConstraint( "alpha", FFName("bound:swap-mobile"), alpha_scale.val * (1 - lam) ) )
system.add( PropertyConstraint( "alpha", FFName("bound:ligand-mobile"), alpha_scale.val * lam ) )
system.add( PropertyConstraint( "alpha", FFName("free:ligand-mobile"), alpha_scale.val * (1 - lam) ) )
# Now the forwards perturbed state (alpha1)
system.add( PropertyConstraint( "alpha[f]", FFName("free:swap-mobile"), alpha_scale.val * lam_f ) )
system.add( PropertyConstraint( "alpha[f]", FFName("bound:swap-mobile"), alpha_scale.val * (1 - lam_f) ) )
system.add( PropertyConstraint( "alpha[f]", FFName("bound:ligand-mobile"), alpha_scale.val * lam_f ) )
system.add( PropertyConstraint( "alpha[f]", FFName("free:ligand-mobile"), alpha_scale.val * (1 - lam_f) ) )
# Now the backwards perturbed state (alpha2)
system.add( PropertyConstraint( "alpha[b]", FFName("free:swap-mobile"), alpha_scale.val * lam_b ) )
system.add( PropertyConstraint( "alpha[b]", FFName("bound:swap-mobile"), alpha_scale.val * (1 - lam_b) ) )
system.add( PropertyConstraint( "alpha[b]", FFName("bound:ligand-mobile"), alpha_scale.val * lam_b ) )
system.add( PropertyConstraint( "alpha[b]", FFName("free:ligand-mobile"), alpha_scale.val * (1 - lam_b) ) )
# Now the next window perturbed state (alpha3)
system.add( PropertyConstraint( "alpha[next]", FFName("free:swap-mobile"), alpha_scale.val * lam_next ) )
system.add( PropertyConstraint( "alpha[next]", FFName("bound:swap-mobile"), alpha_scale.val * (1 - lam_next) ) )
system.add( PropertyConstraint( "alpha[next]", FFName("bound:ligand-mobile"), alpha_scale.val * lam_next ) )
system.add( PropertyConstraint( "alpha[next]", FFName("free:ligand-mobile"), alpha_scale.val * (1 - lam_next) ) )
# Now the previous window perturbed state (alpha4)
system.add( PropertyConstraint( "alpha[prev]", FFName("free:swap-mobile"), alpha_scale.val * lam_prev ) )
system.add( PropertyConstraint( "alpha[prev]", FFName("bound:swap-mobile"), alpha_scale.val * (1 - lam_prev) ) )
system.add( PropertyConstraint( "alpha[prev]", FFName("bound:ligand-mobile"), alpha_scale.val * lam_prev ) )
system.add( PropertyConstraint( "alpha[prev]", FFName("free:ligand-mobile"), alpha_scale.val * (1 - lam_prev) ) )
# Now soften the swap-water-swap-water interactions around lambda = 0.5 (used if decharging the ligand)
if uncharge_ligand.val:
s_scl = soften_water.val
else:
s_scl = 0
if use_oldff.val:
system.add( PropertyConstraint( "alpha0", FFName("swap:interclj"), s_scl * (1 - 2*Abs(lam - 0.5)) ) )
system.add( PropertyConstraint( "alpha1", FFName("swap:interclj"), s_scl * (1 - 2*Abs(lam_f - 0.5)) ) )
system.add( PropertyConstraint( "alpha2", FFName("swap:interclj"), s_scl * (1 - 2*Abs(lam_b - 0.5)) ) )
system.add( PropertyConstraint( "alpha3", FFName("swap:interclj"), s_scl * (1 - 2*Abs(lam_next - 0.5)) ) )
system.add( PropertyConstraint( "alpha4", FFName("swap:interclj"), s_scl * (1 - 2*Abs(lam_prev - 0.5)) ) )
else:
system.add( PropertyConstraint( "alpha", FFName("swap:interclj"), s_scl * (1 - 2*Abs(lam - 0.5)) ) )
system.add( PropertyConstraint( "alpha[f]", FFName("swap:interclj"), s_scl * (1 - 2*Abs(lam_f - 0.5)) ) )
system.add( PropertyConstraint( "alpha[b]", FFName("swap:interclj"), s_scl * (1 - 2*Abs(lam_b - 0.5)) ) )
system.add( PropertyConstraint( "alpha[next]", FFName("swap:interclj"), s_scl * (1 - 2*Abs(lam_next - 0.5)) ) )
system.add( PropertyConstraint( "alpha[prev]", FFName("swap:interclj"), s_scl * (1 - 2*Abs(lam_prev - 0.5)) ) )
# Now lets create all of the groups for moves based on the above
# All solvent molecules in the bound and free legs are moved together
mobile_solvent = MoleculeGroup("mobile_solvent")
mobile_solvent.add( mobile_bound_solvents_group.molecules() )
mobile_solvent.add( mobile_free_water_group.molecules() )
system.add( mobile_solvent )
# All protein sidechains are moved together
mobile_sidechains = MoleculeGroup("mobile_sidechains")
mobile_sidechains.add(mobile_bound_protein_sidechains_group.molecules())
system.add( mobile_sidechains )
# All protein backbones are moved together
mobile_backbones = MoleculeGroup("mobile_backbones")
mobile_backbones.add(mobile_bound_protein_backbones_group.molecules())
system.add( mobile_backbones )
# All solute molecules are moved together
mobile_solutes = MoleculeGroup("mobile_solutes")
mobile_solutes.add(mobile_bound_solutes_group.molecules())
system.add( mobile_solutes )
# The ligand is moved in its own group
mobile_ligand = MoleculeGroup("mobile_ligand")
mobile_ligand.add(ligand_mol)
system.add( mobile_ligand )
# The swap water cluster is moved in its own group
mobile_swap = MoleculeGroup("mobile_swap_water")
mobile_swap.add(swap_water_group.molecules())
system.add( mobile_swap )
if use_identity_constraint:
print("Adding the identity constraint...")
# Now add the constraint that keeps the identities of the
# swap molecules. The swap molecules are chosen from all available mobile
# water molecules. We need to build a group of all mobile water molecules that
# are waters (as opposed to ions, as other molecules may be in mobile_solvent)
mobile_water = MoleculeGroup("mobile_water")
# The mobile water *must* contain the swap waters, so that they can be swapped
mobile_water.add(swap_water_group)
mobile_water.add(mobile_free_water_group)
if waterbox_only.val:
print("Choosing water molecules only from the free water box.")
else:
print("Choosing swap waters from both the protein box and water box.")
mobile_water.add(mobile_bound_water_group)
print("The number of candidates for the swap water equals: %d" % mobile_water.nMolecules())
system.add(mobile_water)
system.add( IdentityConstraint(identity_points, mobile_water, { "space" : Cartesian() } ) )
# Apply all of the constraints
system.applyConstraints()
else:
print("Using the reflection volume to hold the swap water in place.")
system.applyConstraints()
###
### ADD THE SYSTEM MONITORS
###
# Now we need to add the monitors...
print("\nAdding system monitors...")
system.add( "delta_g^{F}", MonitorComponent( Symbol("delta_nrg^{F}"),
FreeEnergyAverage(temperature.val,
dlam * binwidth.val) ) )
system.add( "delta_g^{B}", MonitorComponent( Symbol("delta_nrg^{B}"),
FreeEnergyAverage(temperature.val,
dlam * binwidth.val, False) ) )
system.add( "delta_g^{next}", MonitorComponent( Symbol("delta_nrg^{next}"),
BennettsFreeEnergyAverage(0 * kcal_per_mol,
temperature.val,
0.1 * binwidth.val) ) )
system.add( "delta_g^{prev}", MonitorComponent( Symbol("delta_nrg^{prev}"),
BennettsFreeEnergyAverage(0 * kcal_per_mol,
temperature.val,
0.1 * binwidth.val, False) ) )
system.add( "delta_bound_g^{F}", MonitorComponent( Symbol("delta_bound_nrg^{F}"),
FreeEnergyAverage(temperature.val,
dlam * binwidth.val) ) )
system.add( "delta_bound_g^{B}", MonitorComponent( Symbol("delta_bound_nrg^{B}"),
FreeEnergyAverage(temperature.val,
dlam * binwidth.val, False) ) )
system.add( "delta_free_g^{F}", MonitorComponent( Symbol("delta_free_nrg^{F}"),
FreeEnergyAverage(temperature.val,
dlam * binwidth.val) ) )
system.add( "delta_free_g^{B}", MonitorComponent( Symbol("delta_free_nrg^{B}"),
FreeEnergyAverage(temperature.val,
dlam * binwidth.val, False) ) )
# we will monitor the average energy between the swap cluster/ligand and each
# residue with mobile sidechain, and each mobile solute
monitor_prosol = None
if mobile_solutes.isEmpty():
monitor_prosol = mobile_sidechains
elif mobile_sidechains.isEmpty():
monitor_prosol = mobile_solutes
else:
monitor_prosol = MoleculeGroup("monitored_protein_solute")
monitor_prosol.add(mobile_sidechains)
monitor_prosol.add(mobile_solutes)
system.add(monitor_prosol)
residue_nrgmon = FreeEnergyMonitor(monitor_prosol, ligand_group, mobile_swap)
nrgmons = {}
nrgmons["residue_nrgmon"] = residue_nrgmon
# because the water molecules can diffuse, we find all waters within
# a certain distance of the ligand, and then identify them using identity
# points (placed at the center of the initial positions of the waters),
# and then monitor those...
boundwater_points = []
freewater_points = []
if water_monitor_distance.val:
dist = water_monitor_distance.val.to(angstrom)
for molnum in mobile_bound_water_group.molNums():
water_mol = mobile_bound_water_group[molnum].molecule()
if getMinimumDistance(ligand_mol,water_mol) < dist:
# we should monitor this water
boundwater_points.append( VectorPoint(water_mol.evaluate().center()) )
for molnum in mobile_free_water_group.molNums():
#this is a mobile water, so a candidate for monitoring
water_mol = mobile_free_water_group[molnum].molecule()
if getMinimumDistance(ligand_mol,water_mol) < dist:
# we should monitor this water
freewater_points.append( VectorPoint(water_mol.evaluate().center()) )
system.add(mobile_bound_water_group)
system.add(mobile_free_water_group)
boundwater_assigner = IDAssigner(boundwater_points, mobile_bound_water_group,
{"space" : Cartesian()})
boundwater_assigner.update(system)
freewater_assigner = IDAssigner(freewater_points, mobile_free_water_group,
{"space" : Cartesian()})
freewater_assigner.update(system)
boundwater_nrgmon = FreeEnergyMonitor(boundwater_assigner, ligand_group, mobile_swap)
freewater_nrgmon = FreeEnergyMonitor(freewater_assigner, mobile_swap, ligand_group)
nrgmons["boundwater_nrgmon"] = boundwater_nrgmon
nrgmons["freewater_nrgmon"] = freewater_nrgmon
for key in list(nrgmons.keys()):
nrgmons[key].setCoulombPower(coul_power.val)
nrgmons[key].setShiftDelta(shift_delta.val)
nrgmons[key].setTemperature(temperature.val)
system.add(key, nrgmons[key], nrgmon_frequency.val)
# now calculate the total energy of the system - this initialises grids etc.
# ensuring that, when we make the replicas, the maximum amount of sharing between
# replicas occurs
print("\nEnergies of this system at lambda == 0...")
system.setConstant(lam, 0.0)
printEnergies(system.energies(), sys.stdout)
print("\nEnergies of this system at lambda == 1...")
system.setConstant(lam, 1.0)
printEnergies(system.energies(), sys.stdout)
system.setConstant(lam, 0.0)
return system
def makeRETI(system, moves):
"""This function replicates 'system' over each of the supplied lambda values
and uses 'moves' to sample each of the replicated systems. This uses RETI
to perform replica exchange moves across lambda"""
lam = Symbol("lambda")
lamvals = getLambdaValues()
replicas = Replicas( len(lamvals) )
replicas.setSubSystem(system)
replicas.setSubMoves(moves)
replicas.setNSubMoves(nsubmoves.val)
replicas.setLambdaComponent(lam)
replicas.setRecordAllStatistics(True)
seed = random_seed.val
if seed is None:
seed = RanGenerator().randInt(100000,1000000)
print("RETI system using generated random number seed %d" % seed)
else:
print("RETI system using supplied random number seed %d" % seed)
replicas.setGenerator( RanGenerator(seed+5) )
for i in range(0, len(lamvals)):
# set the initial lambda value for this replica
replicas.setLambdaValue(i, lamvals[i])
for i in range(0, len(lamvals)):
print(lamvals[i])
print(replicas[i].subSystem().constants())
# Now add monitors for each replica that will copy back
nrgmons = [ "delta_g^{F}", "delta_g^{B}", "delta_g^{next}", "delta_g^{prev}",
"delta_bound_g^{F}", "delta_bound_g^{B}",
"delta_free_g^{F}", "delta_free_g^{B}",
"residue_nrgmon", "boundwater_nrgmon", "freewater_nrgmon" ]
for nrgmon in nrgmons:
replicas.add( nrgmon, MonitorMonitor(MonitorName(nrgmon), True) )
# now create the replica exchange moves for the replicas
replica_moves = RepExMove2()
replica_moves.setDisableSwaps(False)
replica_moves.setGenerator( RanGenerator(seed+7) )
print("\nReturning the WSRC RETI replicas and moves...")
return (replicas, replica_moves)
def getName(view):
try:
residue = view.residue()
return "%s:%s" % (residue.name().value(), residue.number().value())
except:
return "%s:%s" % (view.name().value(), view.number().value())
def loadWSRC():
# Load the WSRC system and moves using the passed parameters
# This returns (wsrc_system, wsrc_moves), ready for simulation
print("Loading the protein box system...")
if os.path.exists(protein_s3file.val):
print("Loading existing s3 file %s..." % protein_s3file.val)
proteinsys = Sire.Stream.load(protein_s3file.val)
else:
print("Loading from Amber files %s / %s..." % (protein_topfile.val, protein_crdfile.val))
# Add the name of the ligand to the list of solute molecules
proteinsys_scheme = NamingScheme()
proteinsys_scheme.addSoluteResidueName( ligand_name.val )
# Load up the system. This will automatically find the protein, solute, water, solvent
# and ion molecules and assign them to different groups
proteinsys = createSystem(protein_topfile.val, protein_crdfile.val, proteinsys_scheme)
ligand_mol = findMolecule(proteinsys, ligand_name.val)
if ligand_mol is None:
print("Cannot find the ligand (%s) in the set of loaded molecules!" % ligand_name.val)
sys.exit(-1)
# Center the system with the ligand at (0,0,0)
proteinsys = centerSystem(proteinsys, ligand_mol)
ligand_mol = proteinsys[ligand_mol.number()][0].molecule()
proteinsys = addFlexibility(proteinsys, Vector(0,0,0), reflection_radius.val, proteinsys_scheme )
Sire.Stream.save(proteinsys, protein_s3file.val)
ligand_mol = findMolecule(proteinsys, ligand_name.val)
if ligand_mol is None:
print("Cannot find the ligand (%s) in the set of loaded molecules!" % ligand_name.val)
sys.exit(-1)
print("Loading the water box system...")
if os.path.exists(water_s3file.val):
print("Loading from existing s3 file %s..." % water_s3file.val)
watersys = Sire.Stream.load(water_s3file.val)
else:
print("Loading from Amber files %s / %s..." % (water_topfile.val, water_crdfile.val))
watersys = createSystem(water_topfile.val, water_crdfile.val)
watersys = addFlexibility(watersys, Vector(0,0,0), reflection_radius.val)
Sire.Stream.save(watersys, water_s3file.val)
print("Building the WSRC forcefields")
wsrc_system = mergeSystems(proteinsys, watersys, ligand_mol)
wsrc_moves = createWSRCMoves(wsrc_system)
return (wsrc_system, wsrc_moves)
def printComponents(comps, FILE):
"""This function prints out all of the free energy components in the passed object"""
print("RESIDUE TOTAL COULOMB LJ", file=FILE)
for i in range(0, comps.nComponents()):
print("%s %s %s %s" % (comps.viewAt(i).residue(), \
-comps.integrate(i).values()[-1].y(), \
-comps.integrateCoulomb(i).values()[-1].y(), \
-comps.integrateLJ(i).values()[-1].y()), file=FILE)
def printFreeEnergy(total, bound, free, FILE):
"""This function prints out the total, bound and free free energies"""
print("TOTAL BOUND FREE", file=FILE)
print("%s %s %s" % (-total.integrate().values()[-1].y(), \
-bound.integrate().values()[-1].y(), \
-free.integrate().values()[-1].y()), file=FILE)
def analyseWSRC(replicas, iteration, bennetts_freenrgs, fep_freenrgs, ti_freenrgs, bound_freenrgs, free_freenrgs,
res_freenrgs, bound_water_freenrgs, free_water_freenrgs):
"""This function is used to perform all analysis of iteration 'it' of the passed WSRC system"""
print("Analysing iteration %d..." % iteration)
if not os.path.exists(outdir.val):
os.makedirs(outdir.val)
# read the value of delta_lambda from the first system
system = replicas[0].subSystem()
delta_lambda = system.constant(Symbol("delta_lambda"))
logfile = "%s/results_%0004d.log" % (outdir.val, iteration)
FILE = open(logfile, "w")
print("===========================", file=FILE)
print(" Results for iteration %d" % iteration, file=FILE)
print("===========================", file=FILE)
print("\ndelta_lambda == %f" % delta_lambda, file=FILE)
print("temperature == %f K\n" % replicas[0].subMoves().temperature().to(kelvin), file=FILE)
nreplicas = replicas.nReplicas()
# extract all of the monitors from the replicas
lambda_values = []
dg_f = {}
dg_b = {}
dg_next = {}
dg_prev = {}
dg_bound_f = {}
dg_bound_b = {}
dg_free_f = {}
dg_free_b = {}
dg_residue = {}
dg_boundwater = {}
dg_freewater = {}
write_pdbs = (save_pdb.val) and (iteration % pdb_frequency.val == 0)
if write_pdbs:
print("Saving PDBs of the system at iteration %d..." % iteration)
for i in range(0, nreplicas):
replica = replicas[i]
monitors = replica.monitors()
lamval = replica.lambdaValue()
lambda_values.append(lamval)
if write_pdbs:
if save_all_pdbs.val or (i == 0) or (i == nreplicas-1):
# Save a PDB of the final configuration for the bound and free legs for each lambda value
system = replica.subSystem()
bound_leg = system[MGName("bound_leg")]
free_leg = system[MGName("free_leg")]
PDB().write(bound_leg, "%s/bound_mobile_%000006d_%.5f.pdb" % (outdir.val, iteration, lamval))
PDB().write(free_leg, "%s/free_mobile_%000006d_%.5f.pdb" % (outdir.val, iteration, lamval))
dg_f[lamval] = monitors[MonitorName("delta_g^{F}")][-1].accumulator()
dg_b[lamval] = monitors[MonitorName("delta_g^{B}")][-1].accumulator()
dg_next[lamval] = monitors[MonitorName("delta_g^{next}")][-1].accumulator()
dg_prev[lamval] = monitors[MonitorName("delta_g^{prev}")][-1].accumulator()
dg_bound_f[lamval] = monitors[MonitorName("delta_bound_g^{F}")][-1].accumulator()
dg_bound_b[lamval] = monitors[MonitorName("delta_bound_g^{B}")][-1].accumulator()
dg_free_f[lamval] = monitors[MonitorName("delta_free_g^{F}")][-1].accumulator()
dg_free_b[lamval] = monitors[MonitorName("delta_free_g^{B}")][-1].accumulator()
dg_residue[lamval] = monitors[MonitorName("residue_nrgmon")][-1]
dg_boundwater[lamval] = monitors[MonitorName("boundwater_nrgmon")][-1]
dg_freewater[lamval] = monitors[MonitorName("freewater_nrgmon")][-1]
windows = copy.deepcopy(lambda_values)
windows.sort()
if windows[-1] != 1:
windows.append(1)
if windows[0] != 0:
windows.insert(0,0)
bennetts_freenrgs.set( iteration, windows, dg_next, dg_prev )
fep_freenrgs.set( iteration, windows, dg_next, dg_prev )
ti_freenrgs.set( iteration, dg_f, dg_b, delta_lambda )
bound_freenrgs.set( iteration, dg_bound_f, dg_bound_b, delta_lambda )
free_freenrgs.set( iteration, dg_free_f, dg_free_b, delta_lambda )
print("\nTOTAL BINDING FREE ENERGY\n", file=FILE)
printFreeEnergy(ti_freenrgs[iteration], bound_freenrgs[iteration], free_freenrgs[iteration], FILE)
res_freenrgs.set( iteration, dg_residue )
bound_water_freenrgs.set( iteration, dg_boundwater )
free_water_freenrgs.set( iteration, dg_freewater )
print("\nRESIDUE FREE ENERGY COMPONENTS\n", file=FILE)
printComponents(res_freenrgs[iteration], FILE)
print("\nPROTEIN BOX WATER FREE ENERGY COMPONENTS\n", file=FILE)
printComponents(bound_water_freenrgs[iteration], FILE)
print("\nWATER BOX WATER FREE ENERGY COMPONENTS\n", file=FILE)
printComponents(free_water_freenrgs[iteration], FILE)
print("\n=============", file=FILE)
print("Binding free energy for iteration %d equals %s" % (iteration, \
-ti_freenrgs[iteration].integrate().values()[-1].y()), file=FILE)
print("==============", file=FILE)
@resolveParameters
def run():
"""This is a very high level function that does everything to run a WSRC simulation"""
t = QTime()
total_t = QTime()
total_t.start()
if os.path.exists(restart_file.val):
t.start()
(wsrc_system, wsrc_moves) = Sire.Stream.load(restart_file.val)
print("Loading the restart file took %d ms" % t.elapsed())
else:
# Load the WSRC protein and water boxes from the topology and coordinate
# files and merge together into the WSRC system and moves object
t.start()
if os.path.exists(sysmoves_file.val):
(wsrc_system, wsrc_moves) = Sire.Stream.load(sysmoves_file.val)
else:
(wsrc_system, wsrc_moves) = loadWSRC()
Sire.Stream.save( (wsrc_system, wsrc_moves), "pre_equil.s3")
# Should add in some equilibration here...
if nequilmoves.val:
print("Equilibrating the system (number of moves: %d)..." % nequilmoves.val)
wsrc_system = wsrc_moves.move(wsrc_system, nequilmoves.val, False)
print("...equilibration complete")
Sire.Stream.save( (wsrc_system, wsrc_moves), sysmoves_file.val)
# Now replicate the WSRC system across all lambda values so that we
# can run a RETI simulation
(wsrc_system, wsrc_moves) = makeRETI(wsrc_system, wsrc_moves)
Sire.Stream.save( (wsrc_system, wsrc_moves), restart_file.val )
print("Initialising the simulation took %d ms" % t.elapsed())
# see how many blocks of moves we still need to perform...
nattempted = wsrc_moves.nMoves()
print("Number of iterations to perform: %d. Number of iterations completed: %d." % (nmoves.val, nattempted))
# See if we have any existing free energy statistics files...
t.start()
freenrgs_file = "%s/freenrgs.s3" % outdir.val
if not os.path.exists(freenrgs_file):
bennetts_freenrgs = Bennetts()
fep_freenrgs = FEP()
ti_freenrgs = TI()
else:
[bennetts_freenrgs, fep_freenrgs, ti_freenrgs] = Sire.Stream.load(freenrgs_file)
freenrg_parts_file = "%s/freenrg_parts.s3" % outdir.val
if not os.path.exists(freenrg_parts_file):
bound_freenrgs = TI()
free_freenrgs = TI()
else:
[bound_freenrgs, free_freenrgs] = Sire.Stream.load(freenrg_parts_file)
freenrg_components_file = "%s/freenrg_components.s3" % outdir.val
if not os.path.exists(freenrg_components_file):
res_freenrgs = TIComponents()
bound_water_freenrgs = TIComponents()
free_water_freenrgs = TIComponents()
else:
[res_freenrgs, bound_water_freenrgs, free_water_freenrgs] = Sire.Stream.load(freenrg_components_file)
print("Initialising / loading the free energy files took %d ms" % t.elapsed())
for i in range(nattempted+1, nmoves.val+1):
t.start()
print("Performing iteration %d..." % i)
wsrc_moves.move(wsrc_system, 1, True)
print("...iteration complete (took %d ms)" % t.elapsed())
t.start()
print("Analysing iteration %d..." % i)
analyseWSRC(wsrc_system, i, bennetts_freenrgs, fep_freenrgs, ti_freenrgs, bound_freenrgs, free_freenrgs,
res_freenrgs, bound_water_freenrgs, free_water_freenrgs)
wsrc_system.clearAllStatistics()
print("...analysis complete (took %d ms)" % t.elapsed())
if i % restart_frequency.val == 0 or i == nmoves.val:
t.start()
print("Saving the free energy analysis files from iteration %d..." % i)
# save the old file to a backup
try:
shutil.copy(freenrgs_file, "%s.bak" % freenrgs_file)
except:
pass
try:
shutil.copy(freenrg_components_file, "%s.bak" % freenrg_components_file)
except:
pass
try:
shutil.copy(freenrg_parts_file, "%s.bak" % freenrg_parts_file)
except:
pass
Sire.Stream.save( [bennetts_freenrgs, fep_freenrgs, ti_freenrgs], freenrgs_file )
Sire.Stream.save( [bound_freenrgs, free_freenrgs], freenrg_parts_file )
Sire.Stream.save( [res_freenrgs, bound_water_freenrgs, free_water_freenrgs], freenrg_components_file )
print("...save complete (took %d ms)" % t.elapsed())
# write a restart file every N moves in case of crash or run out of time
if i % restart_frequency.val == 0 or i == nmoves.val:
t.start()
print("Saving the restart file from iteration %d..." % i)
# save the old file to a backup
try:
shutil.copy(restart_file.val, "%s.bak" % restart_file.val)
except:
pass
Sire.Stream.save( (wsrc_system, wsrc_moves), restart_file.val )
print("...save complete (took %d ms)" % t.elapsed())
print("All iterations complete. Total runtime was %d ms" % total_t.elapsed())
|
chryswoods/Sire
|
wrapper/Tools/WSRC.py
|
Python
|
gpl-2.0
| 136,830
|
[
"Amber"
] |
bcc0dfed103a81db43c9845694aadc96d7bc211b898ae098dcc67a6b8d6117ea
|
# -*- coding: utf-8 -*-
import re
from numbers import Integral
from collections import namedtuple
__all__ = ["countries"]
try:
basestring
except NameError:
basestring = str
Country = namedtuple('Country',
'name alpha2 alpha3 numeric apolitical_name')
_records = [
Country(u"Afghanistan", "AF", "AFG", "004", u"Afghanistan"),
Country(u"Åland Islands", "AX", "ALA", "248", u"Åland Islands"),
Country(u"Albania", "AL", "ALB", "008", u"Albania"),
Country(u"Algeria", "DZ", "DZA", "012", u"Algeria"),
Country(u"American Samoa", "AS", "ASM", "016", u"American Samoa"),
Country(u"Andorra", "AD", "AND", "020", u"Andorra"),
Country(u"Angola", "AO", "AGO", "024", u"Angola"),
Country(u"Anguilla", "AI", "AIA", "660", u"Anguilla"),
Country(u"Antarctica", "AQ", "ATA", "010", u"Antarctica"),
Country(u"Antigua and Barbuda", "AG", "ATG", "028",
u"Antigua and Barbuda"),
Country(u"Argentina", "AR", "ARG", "032", u"Argentina"),
Country(u"Armenia", "AM", "ARM", "051", u"Armenia"),
Country(u"Aruba", "AW", "ABW", "533", u"Aruba"),
Country(u"Australia", "AU", "AUS", "036", u"Australia"),
Country(u"Austria", "AT", "AUT", "040", u"Austria"),
Country(u"Azerbaijan", "AZ", "AZE", "031", u"Azerbaijan"),
Country(u"Bahamas", "BS", "BHS", "044", u"Bahamas"),
Country(u"Bahrain", "BH", "BHR", "048", u"Bahrain"),
Country(u"Bangladesh", "BD", "BGD", "050", u"Bangladesh"),
Country(u"Barbados", "BB", "BRB", "052", u"Barbados"),
Country(u"Belarus", "BY", "BLR", "112", u"Belarus"),
Country(u"Belgium", "BE", "BEL", "056", u"Belgium"),
Country(u"Belize", "BZ", "BLZ", "084", u"Belize"),
Country(u"Benin", "BJ", "BEN", "204", u"Benin"),
Country(u"Bermuda", "BM", "BMU", "060", u"Bermuda"),
Country(u"Bhutan", "BT", "BTN", "064", u"Bhutan"),
Country(u"Bolivia, Plurinational State of", "BO", "BOL", "068",
u"Bolivia, Plurinational State of"),
Country(u"Bonaire, Sint Eustatius and Saba", "BQ", "BES", "535",
u"Bonaire, Sint Eustatius and Saba"),
Country(u"Bosnia and Herzegovina", "BA", "BIH", "070",
u"Bosnia and Herzegovina"),
Country(u"Botswana", "BW", "BWA", "072", u"Botswana"),
Country(u"Bouvet Island", "BV", "BVT", "074", u"Bouvet Island"),
Country(u"Brazil", "BR", "BRA", "076", u"Brazil"),
Country(u"British Indian Ocean Territory", "IO", "IOT", "086",
u"British Indian Ocean Territory"),
Country(u"Brunei Darussalam", "BN", "BRN", "096",
u"Brunei Darussalam"),
Country(u"Bulgaria", "BG", "BGR", "100", u"Bulgaria"),
Country(u"Burkina Faso", "BF", "BFA", "854", u"Burkina Faso"),
Country(u"Burundi", "BI", "BDI", "108", u"Burundi"),
Country(u"Cambodia", "KH", "KHM", "116", u"Cambodia"),
Country(u"Cameroon", "CM", "CMR", "120", u"Cameroon"),
Country(u"Canada", "CA", "CAN", "124", u"Canada"),
Country(u"Cabo Verde", "CV", "CPV", "132", u"Cabo Verde"),
Country(u"Cayman Islands", "KY", "CYM", "136", u"Cayman Islands"),
Country(u"Central African Republic", "CF", "CAF", "140",
u"Central African Republic"),
Country(u"Chad", "TD", "TCD", "148", u"Chad"),
Country(u"Chile", "CL", "CHL", "152", u"Chile"),
Country(u"China", "CN", "CHN", "156", u"China"),
Country(u"Christmas Island", "CX", "CXR", "162", u"Christmas Island"),
Country(u"Cocos (Keeling) Islands", "CC", "CCK", "166",
u"Cocos (Keeling) Islands"),
Country(u"Colombia", "CO", "COL", "170", u"Colombia"),
Country(u"Comoros", "KM", "COM", "174", u"Comoros"),
Country(u"Congo", "CG", "COG", "178", u"Congo"),
Country(u"Congo, Democratic Republic of the", "CD", "COD", "180",
u"Congo, Democratic Republic of the"),
Country(u"Cook Islands", "CK", "COK", "184", u"Cook Islands"),
Country(u"Costa Rica", "CR", "CRI", "188", u"Costa Rica"),
Country(u"Côte d'Ivoire", "CI", "CIV", "384", u"Côte d'Ivoire"),
Country(u"Croatia", "HR", "HRV", "191", u"Croatia"),
Country(u"Cuba", "CU", "CUB", "192", u"Cuba"),
Country(u"Curaçao", "CW", "CUW", "531", u"Curaçao"),
Country(u"Cyprus", "CY", "CYP", "196", u"Cyprus"),
Country(u"Czech Republic", "CZ", "CZE", "203", u"Czech Republic"),
Country(u"Denmark", "DK", "DNK", "208", u"Denmark"),
Country(u"Djibouti", "DJ", "DJI", "262", u"Djibouti"),
Country(u"Dominica", "DM", "DMA", "212", u"Dominica"),
Country(u"Dominican Republic", "DO", "DOM", "214", u"Dominican Republic"),
Country(u"Ecuador", "EC", "ECU", "218", u"Ecuador"),
Country(u"Egypt", "EG", "EGY", "818", u"Egypt"),
Country(u"El Salvador", "SV", "SLV", "222", u"El Salvador"),
Country(u"Equatorial Guinea", "GQ", "GNQ", "226", u"Equatorial Guinea"),
Country(u"Eritrea", "ER", "ERI", "232", u"Eritrea"),
Country(u"Estonia", "EE", "EST", "233", u"Estonia"),
Country(u"Ethiopia", "ET", "ETH", "231", u"Ethiopia"),
Country(u"Falkland Islands (Malvinas)", "FK", "FLK", "238",
u"Falkland Islands (Malvinas)"),
Country(u"Faroe Islands", "FO", "FRO", "234", u"Faroe Islands"),
Country(u"Fiji", "FJ", "FJI", "242", u"Fiji"),
Country(u"Finland", "FI", "FIN", "246", u"Finland"),
Country(u"France", "FR", "FRA", "250", u"France"),
Country(u"French Guiana", "GF", "GUF", "254", u"French Guiana"),
Country(u"French Polynesia", "PF", "PYF", "258", u"French Polynesia"),
Country(u"French Southern Territories", "TF", "ATF", "260",
u"French Southern Territories"),
Country(u"Gabon", "GA", "GAB", "266", u"Gabon"),
Country(u"Gambia", "GM", "GMB", "270", u"Gambia"),
Country(u"Georgia", "GE", "GEO", "268", u"Georgia"),
Country(u"Germany", "DE", "DEU", "276", u"Germany"),
Country(u"Ghana", "GH", "GHA", "288", u"Ghana"),
Country(u"Gibraltar", "GI", "GIB", "292", u"Gibraltar"),
Country(u"Greece", "GR", "GRC", "300", u"Greece"),
Country(u"Greenland", "GL", "GRL", "304", u"Greenland"),
Country(u"Grenada", "GD", "GRD", "308", u"Grenada"),
Country(u"Guadeloupe", "GP", "GLP", "312", u"Guadeloupe"),
Country(u"Guam", "GU", "GUM", "316", u"Guam"),
Country(u"Guatemala", "GT", "GTM", "320", u"Guatemala"),
Country(u"Guernsey", "GG", "GGY", "831", u"Guernsey"),
Country(u"Guinea", "GN", "GIN", "324", u"Guinea"),
Country(u"Guinea-Bissau", "GW", "GNB", "624", u"Guinea-Bissau"),
Country(u"Guyana", "GY", "GUY", "328", u"Guyana"),
Country(u"Haiti", "HT", "HTI", "332", u"Haiti"),
Country(u"Heard Island and McDonald Islands", "HM", "HMD", "334",
u"Heard Island and McDonald Islands"),
Country(u"Holy See", "VA", "VAT", "336", u"Holy See"),
Country(u"Honduras", "HN", "HND", "340", u"Honduras"),
Country(u"Hong Kong", "HK", "HKG", "344", u"Hong Kong"),
Country(u"Hungary", "HU", "HUN", "348", u"Hungary"),
Country(u"Iceland", "IS", "ISL", "352", u"Iceland"),
Country(u"India", "IN", "IND", "356", u"India"),
Country(u"Indonesia", "ID", "IDN", "360", u"Indonesia"),
Country(u"Iran, Islamic Republic of", "IR", "IRN", "364",
u"Iran, Islamic Republic of"),
Country(u"Iraq", "IQ", "IRQ", "368", u"Iraq"),
Country(u"Ireland", "IE", "IRL", "372", u"Ireland"),
Country(u"Isle of Man", "IM", "IMN", "833", u"Isle of Man"),
Country(u"Israel", "IL", "ISR", "376", u"Israel"),
Country(u"Italy", "IT", "ITA", "380", u"Italy"),
Country(u"Jamaica", "JM", "JAM", "388", u"Jamaica"),
Country(u"Japan", "JP", "JPN", "392", u"Japan"),
Country(u"Jersey", "JE", "JEY", "832", u"Jersey"),
Country(u"Jordan", "JO", "JOR", "400", u"Jordan"),
Country(u"Kazakhstan", "KZ", "KAZ", "398", u"Kazakhstan"),
Country(u"Kenya", "KE", "KEN", "404", u"Kenya"),
Country(u"Kiribati", "KI", "KIR", "296", u"Kiribati"),
Country(u"Korea, Democratic People's Republic of", "KP", "PRK", "408",
u"Korea, Democratic People's Republic of"),
Country(u"Korea, Republic of", "KR", "KOR", "410", u"Korea, Republic of"),
Country(u"Kuwait", "KW", "KWT", "414", u"Kuwait"),
Country(u"Kyrgyzstan", "KG", "KGZ", "417", u"Kyrgyzstan"),
Country(u"Lao People's Democratic Republic", "LA", "LAO", "418",
u"Lao People's Democratic Republic"),
Country(u"Latvia", "LV", "LVA", "428", u"Latvia"),
Country(u"Lebanon", "LB", "LBN", "422", u"Lebanon"),
Country(u"Lesotho", "LS", "LSO", "426", u"Lesotho"),
Country(u"Liberia", "LR", "LBR", "430", u"Liberia"),
Country(u"Libya", "LY", "LBY", "434", u"Libya"),
Country(u"Liechtenstein", "LI", "LIE", "438", u"Liechtenstein"),
Country(u"Lithuania", "LT", "LTU", "440", u"Lithuania"),
Country(u"Luxembourg", "LU", "LUX", "442", u"Luxembourg"),
Country(u"Macao", "MO", "MAC", "446", u"Macao"),
Country(u"Macedonia, the former Yugoslav Republic of", "MK", "MKD", "807",
u"Macedonia, the former Yugoslav Republic of"),
Country(u"Madagascar", "MG", "MDG", "450", u"Madagascar"),
Country(u"Malawi", "MW", "MWI", "454", u"Malawi"),
Country(u"Malaysia", "MY", "MYS", "458", u"Malaysia"),
Country(u"Maldives", "MV", "MDV", "462", u"Maldives"),
Country(u"Mali", "ML", "MLI", "466", u"Mali"),
Country(u"Malta", "MT", "MLT", "470", u"Malta"),
Country(u"Marshall Islands", "MH", "MHL", "584", u"Marshall Islands"),
Country(u"Martinique", "MQ", "MTQ", "474", u"Martinique"),
Country(u"Mauritania", "MR", "MRT", "478", u"Mauritania"),
Country(u"Mauritius", "MU", "MUS", "480", u"Mauritius"),
Country(u"Mayotte", "YT", "MYT", "175", u"Mayotte"),
Country(u"Mexico", "MX", "MEX", "484", u"Mexico"),
Country(u"Micronesia, Federated States of", "FM", "FSM", "583",
u"Micronesia, Federated States of"),
Country(u"Moldova, Republic of", "MD", "MDA", "498",
u"Moldova, Republic of"),
Country(u"Monaco", "MC", "MCO", "492", u"Monaco"),
Country(u"Mongolia", "MN", "MNG", "496", u"Mongolia"),
Country(u"Montenegro", "ME", "MNE", "499", u"Montenegro"),
Country(u"Montserrat", "MS", "MSR", "500", u"Montserrat"),
Country(u"Morocco", "MA", "MAR", "504", u"Morocco"),
Country(u"Mozambique", "MZ", "MOZ", "508", u"Mozambique"),
Country(u"Myanmar", "MM", "MMR", "104", u"Myanmar"),
Country(u"Namibia", "NA", "NAM", "516", u"Namibia"),
Country(u"Nauru", "NR", "NRU", "520", u"Nauru"),
Country(u"Nepal", "NP", "NPL", "524", u"Nepal"),
Country(u"Netherlands", "NL", "NLD", "528", u"Netherlands"),
Country(u"New Caledonia", "NC", "NCL", "540", u"New Caledonia"),
Country(u"New Zealand", "NZ", "NZL", "554", u"New Zealand"),
Country(u"Nicaragua", "NI", "NIC", "558", u"Nicaragua"),
Country(u"Niger", "NE", "NER", "562", u"Niger"),
Country(u"Nigeria", "NG", "NGA", "566", u"Nigeria"),
Country(u"Niue", "NU", "NIU", "570", u"Niue"),
Country(u"Norfolk Island", "NF", "NFK", "574", u"Norfolk Island"),
Country(u"Northern Mariana Islands", "MP", "MNP", "580",
u"Northern Mariana Islands"),
Country(u"Norway", "NO", "NOR", "578", u"Norway"),
Country(u"Oman", "OM", "OMN", "512", u"Oman"),
Country(u"Pakistan", "PK", "PAK", "586", u"Pakistan"),
Country(u"Palau", "PW", "PLW", "585", u"Palau"),
Country(u"Palestine, State of", "PS", "PSE", "275",
u"Palestine"),
Country(u"Panama", "PA", "PAN", "591", u"Panama"),
Country(u"Papua New Guinea", "PG", "PNG", "598",
u"Papua New Guinea"),
Country(u"Paraguay", "PY", "PRY", "600", u"Paraguay"),
Country(u"Peru", "PE", "PER", "604", u"Peru"),
Country(u"Philippines", "PH", "PHL", "608", u"Philippines"),
Country(u"Pitcairn", "PN", "PCN", "612", u"Pitcairn"),
Country(u"Poland", "PL", "POL", "616", u"Poland"),
Country(u"Portugal", "PT", "PRT", "620", u"Portugal"),
Country(u"Puerto Rico", "PR", "PRI", "630", u"Puerto Rico"),
Country(u"Qatar", "QA", "QAT", "634", u"Qatar"),
Country(u"Réunion", "RE", "REU", "638", u"Réunion"),
Country(u"Romania", "RO", "ROU", "642", u"Romania"),
Country(u"Russian Federation", "RU", "RUS", "643",
u"Russian Federation"),
Country(u"Rwanda", "RW", "RWA", "646", u"Rwanda"),
Country(u"Saint Barthélemy", "BL", "BLM", "652",
u"Saint Barthélemy"),
Country(u"Saint Helena, Ascension and Tristan da Cunha",
"SH", "SHN", "654",
u"Saint Helena, Ascension and Tristan da Cunha"),
Country(u"Saint Kitts and Nevis", "KN", "KNA", "659",
u"Saint Kitts and Nevis"),
Country(u"Saint Lucia", "LC", "LCA", "662", u"Saint Lucia"),
Country(u"Saint Martin (French part)", "MF", "MAF", "663",
u"Saint Martin (French part)"),
Country(u"Saint Pierre and Miquelon", "PM", "SPM", "666",
u"Saint Pierre and Miquelon"),
Country(u"Saint Vincent and the Grenadines", "VC", "VCT", "670",
u"Saint Vincent and the Grenadines"),
Country(u"Samoa", "WS", "WSM", "882", u"Samoa"),
Country(u"San Marino", "SM", "SMR", "674", u"San Marino"),
Country(u"Sao Tome and Principe", "ST", "STP", "678",
u"Sao Tome and Principe"),
Country(u"Saudi Arabia", "SA", "SAU", "682", u"Saudi Arabia"),
Country(u"Senegal", "SN", "SEN", "686", u"Senegal"),
Country(u"Serbia", "RS", "SRB", "688", u"Serbia"),
Country(u"Seychelles", "SC", "SYC", "690", u"Seychelles"),
Country(u"Sierra Leone", "SL", "SLE", "694", u"Sierra Leone"),
Country(u"Singapore", "SG", "SGP", "702", u"Singapore"),
Country(u"Sint Maarten (Dutch part)", "SX", "SXM", "534",
u"Sint Maarten (Dutch part)"),
Country(u"Slovakia", "SK", "SVK", "703", u"Slovakia"),
Country(u"Slovenia", "SI", "SVN", "705", u"Slovenia"),
Country(u"Solomon Islands", "SB", "SLB", "090", u"Solomon Islands"),
Country(u"Somalia", "SO", "SOM", "706", u"Somalia"),
Country(u"South Africa", "ZA", "ZAF", "710", u"South Africa"),
Country(u"South Georgia and the South Sandwich Islands",
"GS", "SGS", "239",
u"South Georgia and the South Sandwich Islands",),
Country(u"South Sudan", "SS", "SSD", "728", u"South Sudan"),
Country(u"Spain", "ES", "ESP", "724", u"Spain"),
Country(u"Sri Lanka", "LK", "LKA", "144", u"Sri Lanka"),
Country(u"Sudan", "SD", "SDN", "729", u"Sudan"),
Country(u"Suriname", "SR", "SUR", "740", u"Suriname"),
Country(u"Svalbard and Jan Mayen", "SJ", "SJM", "744",
u"Svalbard and Jan Mayen"),
Country(u"Swaziland", "SZ", "SWZ", "748", u"Swaziland"),
Country(u"Sweden", "SE", "SWE", "752", u"Sweden"),
Country(u"Switzerland", "CH", "CHE", "756", u"Switzerland"),
Country(u"Syrian Arab Republic", "SY", "SYR", "760",
u"Syrian Arab Republic"),
Country(u"Taiwan, Province of China", "TW", "TWN", "158",
u"Taiwan"),
Country(u"Tajikistan", "TJ", "TJK", "762", u"Tajikistan"),
Country(u"Tanzania, United Republic of", "TZ", "TZA", "834",
u"Tanzania, United Republic of"),
Country(u"Thailand", "TH", "THA", "764", u"Thailand"),
Country(u"Timor-Leste", "TL", "TLS", "626", u"Timor-Leste"),
Country(u"Togo", "TG", "TGO", "768", u"Togo"),
Country(u"Tokelau", "TK", "TKL", "772", u"Tokelau"),
Country(u"Tonga", "TO", "TON", "776", u"Tonga"),
Country(u"Trinidad and Tobago", "TT", "TTO", "780",
u"Trinidad and Tobago"),
Country(u"Tunisia", "TN", "TUN", "788", u"Tunisia"),
Country(u"Turkey", "TR", "TUR", "792", u"Turkey"),
Country(u"Turkmenistan", "TM", "TKM", "795", u"Turkmenistan"),
Country(u"Turks and Caicos Islands", "TC", "TCA", "796",
u"Turks and Caicos Islands"),
Country(u"Tuvalu", "TV", "TUV", "798", u"Tuvalu"),
Country(u"Uganda", "UG", "UGA", "800", u"Uganda"),
Country(u"Ukraine", "UA", "UKR", "804", u"Ukraine"),
Country(u"United Arab Emirates", "AE", "ARE", "784",
u"United Arab Emirates"),
Country(u"United Kingdom", "GB", "GBR", "826", u"United Kingdom"),
Country(u"United States", "US", "USA", "840", u"United States"),
Country(u"United States Minor Outlying Islands", "UM", "UMI", "581",
u"United States Minor Outlying Islands"),
Country(u"Uruguay", "UY", "URY", "858", u"Uruguay"),
Country(u"Uzbekistan", "UZ", "UZB", "860", u"Uzbekistan"),
Country(u"Vanuatu", "VU", "VUT", "548", u"Vanuatu"),
Country(u"Venezuela, Bolivarian Republic of", "VE", "VEN", "862",
u"Venezuela, Bolivarian Republic of"),
Country(u"Viet Nam", "VN", "VNM", "704", u"Viet Nam"),
Country(u"Virgin Islands, British", "VG", "VGB", "092",
u"Virgin Islands, British"),
Country(u"Virgin Islands, U.S.", "VI", "VIR", "850",
u"Virgin Islands, U.S."),
Country(u"Wallis and Futuna", "WF", "WLF", "876", u"Wallis and Futuna"),
Country(u"Western Sahara", "EH", "ESH", "732", u"Western Sahara"),
Country(u"Yemen", "YE", "YEM", "887", u"Yemen"),
Country(u"Zambia", "ZM", "ZMB", "894", u"Zambia"),
Country(u"Zimbabwe", "ZW", "ZWE", "716", u"Zimbabwe")]
def _build_index(idx):
return dict((r[idx].upper(), r) for r in _records)
# Internal country indexes
_by_alpha2 = _build_index(1)
_by_alpha3 = _build_index(2)
_by_numeric = _build_index(3)
_by_name = _build_index(0)
_by_apolitical_name = _build_index(4)
# Documented accessors for the country indexes
countries_by_alpha2 = _by_alpha2
countries_by_alpha3 = _by_alpha3
countries_by_numeric = _by_numeric
countries_by_name = _by_name
countries_by_apolitical_name = _by_apolitical_name
NOT_FOUND = object()
class _CountryLookup(object):
def get(self, key, default=NOT_FOUND):
if isinstance(key, Integral):
r = _by_numeric.get("%03d" % key, default)
elif isinstance(key, basestring):
k = key.upper()
if len(k) == 2:
r = _by_alpha2.get(k, default)
elif len(k) == 3 and re.match(r"[0-9]{3}", k):
r = _by_numeric.get(k, default)
elif len(k) == 3:
r = _by_alpha3.get(k, default)
elif k in _by_name:
r = _by_name.get(k, default)
else:
r = _by_apolitical_name.get(k, default)
else:
r = default
if r == NOT_FOUND:
raise KeyError(key)
return r
__getitem__ = get
def __len__(self):
return len(_records)
def __iter__(self):
return iter(_records)
def __contains__(self, item):
try:
self.get(item)
return True
except KeyError:
return False
countries = _CountryLookup()
|
VertNet/api-geospatial
|
lib/iso3166/__init__.py
|
Python
|
gpl-2.0
| 18,709
|
[
"BWA"
] |
7b693024b2ddedb8b02e7328eac313a835f6134e964d579a9efa66e5e58ef509
|
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import base64
import datetime
import imp
import json
import os
import shlex
import zipfile
import random
import re
from distutils.version import LooseVersion
from io import BytesIO
from ansible.release import __version__, __author__
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes, to_text, to_native
from ansible.plugins.loader import module_utils_loader, ps_module_utils_loader
from ansible.plugins.shell.powershell import async_watchdog, async_wrapper, become_wrapper, leaf_exec, exec_wrapper
# Must import strategy and use write_locks from there
# If we import write_locks directly then we end up binding a
# variable to the object and then it never gets updated.
from ansible.executor import action_write_locks
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
REPLACER = b"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_VERSION = b"\"<<ANSIBLE_VERSION>>\""
REPLACER_COMPLEX = b"\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
REPLACER_WINDOWS = b"# POWERSHELL_COMMON"
REPLACER_JSONARGS = b"<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
REPLACER_SELINUX = b"<<SELINUX_SPECIAL_FILESYSTEMS>>"
# We could end up writing out parameters with unicode characters so we need to
# specify an encoding for the python source file
ENCODING_STRING = u'# -*- coding: utf-8 -*-'
b_ENCODING_STRING = b'# -*- coding: utf-8 -*-'
# module_common is relative to module_utils, so fix the path
_MODULE_UTILS_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
# ******************************************************************************
ANSIBALLZ_TEMPLATE = u'''%(shebang)s
%(coding)s
_ANSIBALLZ_WRAPPER = True # For test-module script to tell this is a ANSIBALLZ_WRAPPER
# This code is part of Ansible, but is an independent component.
# The code in this particular templatable string, and this templatable string
# only, is BSD licensed. Modules which end up using this snippet, which is
# dynamically combined together by Ansible still belong to the author of the
# module, and they may assign their own license to the complete work.
#
# Copyright (c), James Cammarata, 2016
# Copyright (c), Toshio Kuratomi, 2016
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def _ansiballz_main():
import os
import os.path
import sys
import __main__
# For some distros and python versions we pick up this script in the temporary
# directory. This leads to problems when the ansible module masks a python
# library that another import needs. We have not figured out what about the
# specific distros and python versions causes this to behave differently.
#
# Tested distros:
# Fedora23 with python3.4 Works
# Ubuntu15.10 with python2.7 Works
# Ubuntu15.10 with python3.4 Fails without this
# Ubuntu16.04.1 with python3.5 Fails without this
# To test on another platform:
# * use the copy module (since this shadows the stdlib copy module)
# * Turn off pipelining
# * Make sure that the destination file does not exist
# * ansible ubuntu16-test -m copy -a 'src=/etc/motd dest=/var/tmp/m'
# This will traceback in shutil. Looking at the complete traceback will show
# that shutil is importing copy which finds the ansible module instead of the
# stdlib module
scriptdir = None
try:
scriptdir = os.path.dirname(os.path.realpath(__main__.__file__))
except (AttributeError, OSError):
# Some platforms don't set __file__ when reading from stdin
# OSX raises OSError if using abspath() in a directory we don't have
# permission to read (realpath calls abspath)
pass
if scriptdir is not None:
sys.path = [p for p in sys.path if p != scriptdir]
import base64
import imp
import shutil
import tempfile
import zipfile
if sys.version_info < (3,):
bytes = str
MOD_DESC = ('.py', 'U', imp.PY_SOURCE)
PY3 = False
else:
unicode = str
MOD_DESC = ('.py', 'r', imp.PY_SOURCE)
PY3 = True
ZIPDATA = """%(zipdata)s"""
# Note: temp_path isn't needed once we switch to zipimport
def invoke_module(modlib_path, temp_path, json_params):
# When installed via setuptools (including python setup.py install),
# ansible may be installed with an easy-install.pth file. That file
# may load the system-wide install of ansible rather than the one in
# the module. sitecustomize is the only way to override that setting.
z = zipfile.ZipFile(modlib_path, mode='a')
# py3: modlib_path will be text, py2: it's bytes. Need bytes at the end
sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% modlib_path
sitecustomize = sitecustomize.encode('utf-8')
# Use a ZipInfo to work around zipfile limitation on hosts with
# clocks set to a pre-1980 year (for instance, Raspberry Pi)
zinfo = zipfile.ZipInfo()
zinfo.filename = 'sitecustomize.py'
zinfo.date_time = ( %(year)i, %(month)i, %(day)i, %(hour)i, %(minute)i, %(second)i)
z.writestr(zinfo, sitecustomize)
# Note: Remove the following section when we switch to zipimport
# Write the module to disk for imp.load_module
module = os.path.join(temp_path, '__main__.py')
with open(module, 'wb') as f:
f.write(z.read('__main__.py'))
f.close()
# End pre-zipimport section
z.close()
# Put the zipped up module_utils we got from the controller first in the python path so that we
# can monkeypatch the right basic
sys.path.insert(0, modlib_path)
# Monkeypatch the parameters into basic
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = json_params
%(coverage)s
# Run the module! By importing it as '__main__', it thinks it is executing as a script
with open(module, 'rb') as mod:
imp.load_module('__main__', mod, module, MOD_DESC)
# Ansible modules must exit themselves
print('{"msg": "New-style module did not handle its own exit", "failed": true}')
sys.exit(1)
def debug(command, zipped_mod, json_params):
# The code here normally doesn't run. It's only used for debugging on the
# remote machine.
#
# The subcommands in this function make it easier to debug ansiballz
# modules. Here's the basic steps:
#
# Run ansible with the environment variable: ANSIBLE_KEEP_REMOTE_FILES=1 and -vvv
# to save the module file remotely::
# $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible host1 -m ping -a 'data=october' -vvv
#
# Part of the verbose output will tell you where on the remote machine the
# module was written to::
# [...]
# <host1> SSH: EXEC ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o
# PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o
# ControlPath=/home/badger/.ansible/cp/ansible-ssh-%%h-%%p-%%r -tt rhel7 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
# LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping'"'"''
# [...]
#
# Login to the remote machine and run the module file via from the previous
# step with the explode subcommand to extract the module payload into
# source files::
# $ ssh host1
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping explode
# Module expanded into:
# /home/badger/.ansible/tmp/ansible-tmp-1461173408.08-279692652635227/ansible
#
# You can now edit the source files to instrument the code or experiment with
# different parameter values. When you're ready to run the code you've modified
# (instead of the code from the actual zipped module), use the execute subcommand like this::
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping execute
# Okay to use __file__ here because we're running from a kept file
basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir')
args_path = os.path.join(basedir, 'args')
script_path = os.path.join(basedir, '__main__.py')
if command == 'excommunicate':
print('The excommunicate debug command is deprecated and will be removed in 2.11. Use execute instead.')
command = 'execute'
if command == 'explode':
# transform the ZIPDATA into an exploded directory of code and then
# print the path to the code. This is an easy way for people to look
# at the code on the remote machine for debugging it in that
# environment
z = zipfile.ZipFile(zipped_mod)
for filename in z.namelist():
if filename.startswith('/'):
raise Exception('Something wrong with this module zip file: should not contain absolute paths')
dest_filename = os.path.join(basedir, filename)
if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename):
os.makedirs(dest_filename)
else:
directory = os.path.dirname(dest_filename)
if not os.path.exists(directory):
os.makedirs(directory)
f = open(dest_filename, 'wb')
f.write(z.read(filename))
f.close()
# write the args file
f = open(args_path, 'wb')
f.write(json_params)
f.close()
print('Module expanded into:')
print('%%s' %% basedir)
exitcode = 0
elif command == 'execute':
# Execute the exploded code instead of executing the module from the
# embedded ZIPDATA. This allows people to easily run their modified
# code on the remote machine to see how changes will affect it.
# Set pythonpath to the debug dir
sys.path.insert(0, basedir)
# read in the args file which the user may have modified
with open(args_path, 'rb') as f:
json_params = f.read()
# Monkeypatch the parameters into basic
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = json_params
# Run the module! By importing it as '__main__', it thinks it is executing as a script
import imp
with open(script_path, 'r') as f:
importer = imp.load_module('__main__', f, script_path, ('.py', 'r', imp.PY_SOURCE))
# Ansible modules must exit themselves
print('{"msg": "New-style module did not handle its own exit", "failed": true}')
sys.exit(1)
else:
print('WARNING: Unknown debug command. Doing nothing.')
exitcode = 0
return exitcode
#
# See comments in the debug() method for information on debugging
#
ANSIBALLZ_PARAMS = %(params)s
if PY3:
ANSIBALLZ_PARAMS = ANSIBALLZ_PARAMS.encode('utf-8')
try:
# There's a race condition with the controller removing the
# remote_tmpdir and this module executing under async. So we cannot
# store this in remote_tmpdir (use system tempdir instead)
# Only need to use [ansible_module]_payload_ in the temp_path until we move to zipimport
# (this helps ansible-test produce coverage stats)
temp_path = tempfile.mkdtemp(prefix='ansible_%(ansible_module)s_payload_')
zipped_mod = os.path.join(temp_path, 'ansible_%(ansible_module)s_payload.zip')
with open(zipped_mod, 'wb') as modlib:
modlib.write(base64.b64decode(ZIPDATA))
if len(sys.argv) == 2:
exitcode = debug(sys.argv[1], zipped_mod, ANSIBALLZ_PARAMS)
else:
# Note: temp_path isn't needed once we switch to zipimport
invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)
finally:
try:
shutil.rmtree(temp_path)
except (NameError, OSError):
# tempdir creation probably failed
pass
sys.exit(exitcode)
if __name__ == '__main__':
_ansiballz_main()
'''
ANSIBALLZ_COVERAGE_TEMPLATE = '''
# Access to the working directory is required by coverage.
# Some platforms, such as macOS, may not allow querying the working directory when using become to drop privileges.
try:
os.getcwd()
except OSError:
os.chdir('/')
os.environ['COVERAGE_FILE'] = '%(coverage_output)s'
import atexit
import coverage
cov = coverage.Coverage(config_file='%(coverage_config)s')
def atexit_coverage():
cov.stop()
cov.save()
atexit.register(atexit_coverage)
cov.start()
'''
def _strip_comments(source):
# Strip comments and blank lines from the wrapper
buf = []
for line in source.splitlines():
l = line.strip()
if not l or l.startswith(u'#'):
continue
buf.append(line)
return u'\n'.join(buf)
if C.DEFAULT_KEEP_REMOTE_FILES:
# Keep comments when KEEP_REMOTE_FILES is set. That way users will see
# the comments with some nice usage instructions
ACTIVE_ANSIBALLZ_TEMPLATE = ANSIBALLZ_TEMPLATE
else:
# ANSIBALLZ_TEMPLATE stripped of comments for smaller over the wire size
ACTIVE_ANSIBALLZ_TEMPLATE = _strip_comments(ANSIBALLZ_TEMPLATE)
class ModuleDepFinder(ast.NodeVisitor):
# Caveats:
# This code currently does not handle:
# * relative imports from py2.6+ from . import urls
IMPORT_PREFIX_SIZE = len('ansible.module_utils.')
def __init__(self, *args, **kwargs):
"""
Walk the ast tree for the python module.
Save submodule[.submoduleN][.identifier] into self.submodules
self.submodules will end up with tuples like:
- ('basic',)
- ('urls', 'fetch_url')
- ('database', 'postgres')
- ('database', 'postgres', 'quote')
It's up to calling code to determine whether the final element of the
dotted strings are module names or something else (function, class, or
variable names)
"""
super(ModuleDepFinder, self).__init__(*args, **kwargs)
self.submodules = set()
def visit_Import(self, node):
# import ansible.module_utils.MODLIB[.MODLIBn] [as asname]
for alias in (a for a in node.names if a.name.startswith('ansible.module_utils.')):
py_mod = alias.name[self.IMPORT_PREFIX_SIZE:]
py_mod = tuple(py_mod.split('.'))
self.submodules.add(py_mod)
self.generic_visit(node)
def visit_ImportFrom(self, node):
# Specialcase: six is a special case because of its
# import logic
if node.names[0].name == '_six':
self.submodules.add(('_six',))
elif node.module.startswith('ansible.module_utils'):
where_from = node.module[self.IMPORT_PREFIX_SIZE:]
if where_from:
# from ansible.module_utils.MODULE1[.MODULEn] import IDENTIFIER [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [,IDENTIFIER] [as asname]
py_mod = tuple(where_from.split('.'))
for alias in node.names:
self.submodules.add(py_mod + (alias.name,))
else:
# from ansible.module_utils import MODLIB [,MODLIB2] [as asname]
for alias in node.names:
self.submodules.add((alias.name,))
self.generic_visit(node)
class PSModuleDepFinder():
def __init__(self):
self.modules = dict()
self.ps_version = None
self.os_version = None
self.become = False
self._re_module = re.compile(to_bytes(r'(?i)^#\s*requires\s+\-module(?:s?)\s*(Ansible\.ModuleUtils\..+)'))
self._re_ps_version = re.compile(to_bytes(r'(?i)^#requires\s+\-version\s+([0-9]+(\.[0-9]+){0,3})$'))
self._re_os_version = re.compile(to_bytes(r'(?i)^#ansiblerequires\s+\-osversion\s+([0-9]+(\.[0-9]+){0,3})$'))
self._re_become = re.compile(to_bytes(r'(?i)^#ansiblerequires\s+\-become$'))
def scan_module(self, module_data):
lines = module_data.split(b'\n')
module_utils = set()
for line in lines:
module_util_match = self._re_module.match(line)
if module_util_match:
# tolerate windows line endings by stripping any remaining newline chars
module_util_name = to_text(module_util_match.group(1).rstrip())
if module_util_name not in self.modules.keys():
module_utils.add(module_util_name)
ps_version_match = self._re_ps_version.match(line)
if ps_version_match:
self._parse_version_match(ps_version_match, "ps_version")
os_version_match = self._re_os_version.match(line)
if os_version_match:
self._parse_version_match(os_version_match, "os_version")
# once become is set, no need to keep on checking recursively
if not self.become:
become_match = self._re_become.match(line)
if become_match:
self.become = True
# recursively drill into each Requires to see if there are any more
# requirements
for m in set(module_utils):
m = to_text(m)
mu_path = ps_module_utils_loader.find_plugin(m, ".psm1")
if not mu_path:
raise AnsibleError('Could not find imported module support code for \'%s\'.' % m)
module_util_data = to_bytes(_slurp(mu_path))
self.modules[m] = module_util_data
self.scan_module(module_util_data)
def _parse_version_match(self, match, attribute):
new_version = to_text(match.group(1)).rstrip()
# PowerShell cannot cast a string of "1" to Version, it must have at
# least the major.minor for it to be valid so we append 0
if match.group(2) is None:
new_version = "%s.0" % new_version
existing_version = getattr(self, attribute, None)
if existing_version is None:
setattr(self, attribute, new_version)
else:
# determine which is the latest version and set that
if LooseVersion(new_version) > LooseVersion(existing_version):
setattr(self, attribute, new_version)
def _slurp(path):
if not os.path.exists(path):
raise AnsibleError("imported module support code does not exist at %s" % os.path.abspath(path))
fd = open(path, 'rb')
data = fd.read()
fd.close()
return data
def _get_shebang(interpreter, task_vars, templar, args=tuple()):
"""
Note not stellar API:
Returns None instead of always returning a shebang line. Doing it this
way allows the caller to decide to use the shebang it read from the
file rather than trust that we reformatted what they already have
correctly.
"""
interpreter_config = u'ansible_%s_interpreter' % os.path.basename(interpreter).strip()
if interpreter_config not in task_vars:
return (None, interpreter)
interpreter = templar.template(task_vars[interpreter_config].strip())
shebang = u'#!' + interpreter
if args:
shebang = shebang + u' ' + u' '.join(args)
return (shebang, interpreter)
def recursive_finder(name, data, py_module_names, py_module_cache, zf):
"""
Using ModuleDepFinder, make sure we have all of the module_utils files that
the module its module_utils files needs.
"""
# Parse the module and find the imports of ansible.module_utils
tree = ast.parse(data)
finder = ModuleDepFinder()
finder.visit(tree)
#
# Determine what imports that we've found are modules (vs class, function.
# variable names) for packages
#
normalized_modules = set()
# Loop through the imports that we've found to normalize them
# Exclude paths that match with paths we've already processed
# (Have to exclude them a second time once the paths are processed)
module_utils_paths = [p for p in module_utils_loader._get_paths(subdirs=False) if os.path.isdir(p)]
module_utils_paths.append(_MODULE_UTILS_PATH)
for py_module_name in finder.submodules.difference(py_module_names):
module_info = None
if py_module_name[0] == 'six':
# Special case the python six library because it messes up the
# import process in an incompatible way
module_info = imp.find_module('six', module_utils_paths)
py_module_name = ('six',)
idx = 0
elif py_module_name[0] == '_six':
# Special case the python six library because it messes up the
# import process in an incompatible way
module_info = imp.find_module('_six', [os.path.join(p, 'six') for p in module_utils_paths])
py_module_name = ('six', '_six')
idx = 0
else:
# Check whether either the last or the second to last identifier is
# a module name
for idx in (1, 2):
if len(py_module_name) < idx:
break
try:
module_info = imp.find_module(py_module_name[-idx],
[os.path.join(p, *py_module_name[:-idx]) for p in module_utils_paths])
break
except ImportError:
continue
# Could not find the module. Construct a helpful error message.
if module_info is None:
msg = ['Could not find imported module support code for %s. Looked for' % (name,)]
if idx == 2:
msg.append('either %s.py or %s.py' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
# Found a byte compiled file rather than source. We cannot send byte
# compiled over the wire as the python version might be different.
# imp.find_module seems to prefer to return source packages so we just
# error out if imp.find_module returns byte compiled files (This is
# fragile as it depends on undocumented imp.find_module behaviour)
if module_info[2][2] not in (imp.PY_SOURCE, imp.PKG_DIRECTORY):
msg = ['Could not find python source for imported module support code for %s. Looked for' % name]
if idx == 2:
msg.append('either %s.py or %s.py' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
if idx == 2:
# We've determined that the last portion was an identifier and
# thus, not part of the module name
py_module_name = py_module_name[:-1]
# If not already processed then we've got work to do
# If not in the cache, then read the file into the cache
# We already have a file handle for the module open so it makes
# sense to read it now
if py_module_name not in py_module_cache:
if module_info[2][2] == imp.PKG_DIRECTORY:
# Read the __init__.py instead of the module file as this is
# a python package
normalized_name = py_module_name + ('__init__',)
if normalized_name not in py_module_names:
normalized_path = os.path.join(os.path.join(module_info[1], '__init__.py'))
normalized_data = _slurp(normalized_path)
py_module_cache[normalized_name] = (normalized_data, normalized_path)
normalized_modules.add(normalized_name)
else:
normalized_name = py_module_name
if normalized_name not in py_module_names:
normalized_path = module_info[1]
normalized_data = module_info[0].read()
module_info[0].close()
py_module_cache[normalized_name] = (normalized_data, normalized_path)
normalized_modules.add(normalized_name)
# Make sure that all the packages that this module is a part of
# are also added
for i in range(1, len(py_module_name)):
py_pkg_name = py_module_name[:-i] + ('__init__',)
if py_pkg_name not in py_module_names:
pkg_dir_info = imp.find_module(py_pkg_name[-1],
[os.path.join(p, *py_pkg_name[:-1]) for p in module_utils_paths])
normalized_modules.add(py_pkg_name)
py_module_cache[py_pkg_name] = (_slurp(pkg_dir_info[1]), pkg_dir_info[1])
# FIXME: Currently the AnsiBallZ wrapper monkeypatches module args into a global
# variable in basic.py. If a module doesn't import basic.py, then the AnsiBallZ wrapper will
# traceback when it tries to monkypatch. So, for now, we have to unconditionally include
# basic.py.
#
# In the future we need to change the wrapper to monkeypatch the args into a global variable in
# their own, separate python module. That way we won't require basic.py. Modules which don't
# want basic.py can import that instead. AnsibleModule will need to change to import the vars
# from the separate python module and mirror the args into its global variable for backwards
# compatibility.
if ('basic',) not in py_module_names:
pkg_dir_info = imp.find_module('basic', module_utils_paths)
normalized_modules.add(('basic',))
py_module_cache[('basic',)] = (_slurp(pkg_dir_info[1]), pkg_dir_info[1])
# End of AnsiballZ hack
#
# iterate through all of the ansible.module_utils* imports that we haven't
# already checked for new imports
#
# set of modules that we haven't added to the zipfile
unprocessed_py_module_names = normalized_modules.difference(py_module_names)
for py_module_name in unprocessed_py_module_names:
py_module_path = os.path.join(*py_module_name)
py_module_file_name = '%s.py' % py_module_path
zf.writestr(os.path.join("ansible/module_utils",
py_module_file_name), py_module_cache[py_module_name][0])
display.vvvvv("Using module_utils file %s" % py_module_cache[py_module_name][1])
# Add the names of the files we're scheduling to examine in the loop to
# py_module_names so that we don't re-examine them in the next pass
# through recursive_finder()
py_module_names.update(unprocessed_py_module_names)
for py_module_file in unprocessed_py_module_names:
recursive_finder(py_module_file, py_module_cache[py_module_file][0], py_module_names, py_module_cache, zf)
# Save memory; the file won't have to be read again for this ansible module.
del py_module_cache[py_module_file]
def _is_binary(b_module_data):
textchars = bytearray(set([7, 8, 9, 10, 12, 13, 27]) | set(range(0x20, 0x100)) - set([0x7f]))
start = b_module_data[:1024]
return bool(start.translate(None, textchars))
def _create_powershell_wrapper(b_module_data, module_args, environment,
async_timeout, become, become_method,
become_user, become_password, become_flags,
scan_dependencies=True):
# creates the manifest/wrapper used in PowerShell modules to enable things
# like become and async - this is also called in action/script.py
exec_manifest = dict(
module_entry=to_text(base64.b64encode(b_module_data)),
powershell_modules=dict(),
module_args=module_args,
actions=['exec'],
environment=environment
)
exec_manifest['exec'] = to_text(base64.b64encode(to_bytes(leaf_exec)))
if async_timeout > 0:
exec_manifest["actions"].insert(0, 'async_watchdog')
exec_manifest["async_watchdog"] = to_text(
base64.b64encode(to_bytes(async_watchdog)))
exec_manifest["actions"].insert(0, 'async_wrapper')
exec_manifest["async_wrapper"] = to_text(
base64.b64encode(to_bytes(async_wrapper)))
exec_manifest["async_jid"] = str(random.randint(0, 999999999999))
exec_manifest["async_timeout_sec"] = async_timeout
if become and become_method == 'runas':
exec_manifest["actions"].insert(0, 'become')
exec_manifest["become_user"] = become_user
exec_manifest["become_password"] = become_password
exec_manifest['become_flags'] = become_flags
exec_manifest["become"] = to_text(
base64.b64encode(to_bytes(become_wrapper)))
finder = PSModuleDepFinder()
# we don't want to scan for any module_utils or other module related flags
# if scan_dependencies=False - action/script sets to False
if scan_dependencies:
finder.scan_module(b_module_data)
for name, data in finder.modules.items():
b64_data = to_text(base64.b64encode(data))
exec_manifest['powershell_modules'][name] = b64_data
exec_manifest['min_ps_version'] = finder.ps_version
exec_manifest['min_os_version'] = finder.os_version
if finder.become and 'become' not in exec_manifest['actions']:
exec_manifest['actions'].insert(0, 'become')
exec_manifest['become_user'] = 'SYSTEM'
exec_manifest['become_password'] = None
exec_manifest['become_flags'] = None
exec_manifest['become'] = to_text(
base64.b64encode(to_bytes(become_wrapper)))
# FUTURE: smuggle this back as a dict instead of serializing here;
# the connection plugin may need to modify it
b_json = to_bytes(json.dumps(exec_manifest))
b_data = exec_wrapper.replace(b"$json_raw = ''",
b"$json_raw = @'\r\n%s\r\n'@" % b_json)
return b_data
def _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression, async_timeout, become,
become_method, become_user, become_password, become_flags, environment):
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
module_substyle = module_style = 'old'
# module_style is something important to calling code (ActionBase). It
# determines how arguments are formatted (json vs k=v) and whether
# a separate arguments file needs to be sent over the wire.
# module_substyle is extra information that's useful internally. It tells
# us what we have to look to substitute in the module files and whether
# we're using module replacer or ansiballz to format the module itself.
if _is_binary(b_module_data):
module_substyle = module_style = 'binary'
elif REPLACER in b_module_data:
# Do REPLACER before from ansible.module_utils because we need make sure
# we substitute "from ansible.module_utils basic" for REPLACER
module_style = 'new'
module_substyle = 'python'
b_module_data = b_module_data.replace(REPLACER, b'from ansible.module_utils.basic import *')
elif b'from ansible.module_utils.' in b_module_data:
module_style = 'new'
module_substyle = 'python'
elif REPLACER_WINDOWS in b_module_data:
module_style = 'new'
module_substyle = 'powershell'
b_module_data = b_module_data.replace(REPLACER_WINDOWS, b'#Requires -Module Ansible.ModuleUtils.Legacy')
elif re.search(b'#Requires -Module', b_module_data, re.IGNORECASE) \
or re.search(b'#Requires -Version', b_module_data, re.IGNORECASE)\
or re.search(b'#AnsibleRequires -OSVersion', b_module_data, re.IGNORECASE):
module_style = 'new'
module_substyle = 'powershell'
elif REPLACER_JSONARGS in b_module_data:
module_style = 'new'
module_substyle = 'jsonargs'
elif b'WANT_JSON' in b_module_data:
module_substyle = module_style = 'non_native_want_json'
shebang = None
# Neither old-style, non_native_want_json nor binary modules should be modified
# except for the shebang line (Done by modify_module)
if module_style in ('old', 'non_native_want_json', 'binary'):
return b_module_data, module_style, shebang
output = BytesIO()
py_module_names = set()
if module_substyle == 'python':
params = dict(ANSIBLE_MODULE_ARGS=module_args,)
python_repred_params = repr(json.dumps(params))
try:
compression_method = getattr(zipfile, module_compression)
except AttributeError:
display.warning(u'Bad module compression string specified: %s. Using ZIP_STORED (no compression)' % module_compression)
compression_method = zipfile.ZIP_STORED
lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ansiballz_cache')
cached_module_filename = os.path.join(lookup_path, "%s-%s" % (module_name, module_compression))
zipdata = None
# Optimization -- don't lock if the module has already been cached
if os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: using cached module: %s' % cached_module_filename)
zipdata = open(cached_module_filename, 'rb').read()
else:
if module_name in action_write_locks.action_write_locks:
display.debug('ANSIBALLZ: Using lock for %s' % module_name)
lock = action_write_locks.action_write_locks[module_name]
else:
# If the action plugin directly invokes the module (instead of
# going through a strategy) then we don't have a cross-process
# Lock specifically for this module. Use the "unexpected
# module" lock instead
display.debug('ANSIBALLZ: Using generic lock for %s' % module_name)
lock = action_write_locks.action_write_locks[None]
display.debug('ANSIBALLZ: Acquiring lock')
with lock:
display.debug('ANSIBALLZ: Lock acquired: %s' % id(lock))
# Check that no other process has created this while we were
# waiting for the lock
if not os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: Creating module')
# Create the module zip data
zipoutput = BytesIO()
zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
# Note: If we need to import from release.py first,
# remember to catch all exceptions: https://github.com/ansible/ansible/issues/16523
zf.writestr('ansible/__init__.py',
b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n__version__="' +
to_bytes(__version__) + b'"\n__author__="' +
to_bytes(__author__) + b'"\n')
zf.writestr('ansible/module_utils/__init__.py', b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n')
zf.writestr('__main__.py', b_module_data)
py_module_cache = {('__init__',): (b'', '[builtin]')}
recursive_finder(module_name, b_module_data, py_module_names, py_module_cache, zf)
zf.close()
zipdata = base64.b64encode(zipoutput.getvalue())
# Write the assembled module to a temp file (write to temp
# so that no one looking for the file reads a partially
# written file)
if not os.path.exists(lookup_path):
# Note -- if we have a global function to setup, that would
# be a better place to run this
os.makedirs(lookup_path)
display.debug('ANSIBALLZ: Writing module')
with open(cached_module_filename + '-part', 'wb') as f:
f.write(zipdata)
# Rename the file into its final position in the cache so
# future users of this module can read it off the
# filesystem instead of constructing from scratch.
display.debug('ANSIBALLZ: Renaming module')
os.rename(cached_module_filename + '-part', cached_module_filename)
display.debug('ANSIBALLZ: Done creating module')
if zipdata is None:
display.debug('ANSIBALLZ: Reading module after lock')
# Another process wrote the file while we were waiting for
# the write lock. Go ahead and read the data from disk
# instead of re-creating it.
try:
zipdata = open(cached_module_filename, 'rb').read()
except IOError:
raise AnsibleError('A different worker process failed to create module file. '
'Look at traceback for that process for debugging information.')
zipdata = to_text(zipdata, errors='surrogate_or_strict')
shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars, templar)
if shebang is None:
shebang = u'#!/usr/bin/python'
# Enclose the parts of the interpreter in quotes because we're
# substituting it into the template as a Python string
interpreter_parts = interpreter.split(u' ')
interpreter = u"'{0}'".format(u"', '".join(interpreter_parts))
coverage_config = os.environ.get('_ANSIBLE_COVERAGE_CONFIG')
if coverage_config:
# Enable code coverage analysis of the module.
# This feature is for internal testing and may change without notice.
coverage = ANSIBALLZ_COVERAGE_TEMPLATE % dict(
coverage_config=coverage_config,
coverage_output=os.environ['_ANSIBLE_COVERAGE_OUTPUT']
)
else:
coverage = ''
now = datetime.datetime.utcnow()
output.write(to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict(
zipdata=zipdata,
ansible_module=module_name,
params=python_repred_params,
shebang=shebang,
interpreter=interpreter,
coding=ENCODING_STRING,
year=now.year,
month=now.month,
day=now.day,
hour=now.hour,
minute=now.minute,
second=now.second,
coverage=coverage,
)))
b_module_data = output.getvalue()
elif module_substyle == 'powershell':
# Powershell/winrm don't actually make use of shebang so we can
# safely set this here. If we let the fallback code handle this
# it can fail in the presence of the UTF8 BOM commonly added by
# Windows text editors
shebang = u'#!powershell'
# create the common exec wrapper payload and set that as the module_data
# bytes
b_module_data = _create_powershell_wrapper(
b_module_data, module_args, environment, async_timeout, become,
become_method, become_user, become_password, become_flags,
scan_dependencies=True
)
elif module_substyle == 'jsonargs':
module_args_json = to_bytes(json.dumps(module_args))
# these strings could be included in a third-party module but
# officially they were included in the 'basic' snippet for new-style
# python modules (which has been replaced with something else in
# ansiballz) If we remove them from jsonargs-style module replacer
# then we can remove them everywhere.
python_repred_args = to_bytes(repr(module_args_json))
b_module_data = b_module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__)))
b_module_data = b_module_data.replace(REPLACER_COMPLEX, python_repred_args)
b_module_data = b_module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS)))
# The main event -- substitute the JSON args string into the module
b_module_data = b_module_data.replace(REPLACER_JSONARGS, module_args_json)
facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='surrogate_or_strict')
b_module_data = b_module_data.replace(b'syslog.LOG_USER', facility)
return (b_module_data, module_style, shebang)
def modify_module(module_name, module_path, module_args, templar, task_vars=None, module_compression='ZIP_STORED', async_timeout=0, become=False,
become_method=None, become_user=None, become_password=None, become_flags=None, environment=None):
"""
Used to insert chunks of code into modules before transfer rather than
doing regular python imports. This allows for more efficient transfer in
a non-bootstrapping scenario by not moving extra files over the wire and
also takes care of embedding arguments in the transferred modules.
This version is done in such a way that local imports can still be
used in the module code, so IDEs don't have to be aware of what is going on.
Example:
from ansible.module_utils.basic import *
... will result in the insertion of basic.py into the module
from the module_utils/ directory in the source tree.
For powershell, this code effectively no-ops, as the exec wrapper requires access to a number of
properties not available here.
"""
task_vars = {} if task_vars is None else task_vars
environment = {} if environment is None else environment
with open(module_path, 'rb') as f:
# read in the module source
b_module_data = f.read()
(b_module_data, module_style, shebang) = _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression,
async_timeout=async_timeout, become=become, become_method=become_method,
become_user=become_user, become_password=become_password, become_flags=become_flags,
environment=environment)
if module_style == 'binary':
return (b_module_data, module_style, to_text(shebang, nonstring='passthru'))
elif shebang is None:
b_lines = b_module_data.split(b"\n", 1)
if b_lines[0].startswith(b"#!"):
b_shebang = b_lines[0].strip()
# shlex.split on python-2.6 needs bytes. On python-3.x it needs text
args = shlex.split(to_native(b_shebang[2:], errors='surrogate_or_strict'))
# _get_shebang() takes text strings
args = [to_text(a, errors='surrogate_or_strict') for a in args]
interpreter = args[0]
b_new_shebang = to_bytes(_get_shebang(interpreter, task_vars, templar, args[1:])[0],
errors='surrogate_or_strict', nonstring='passthru')
if b_new_shebang:
b_lines[0] = b_shebang = b_new_shebang
if os.path.basename(interpreter).startswith(u'python'):
b_lines.insert(1, b_ENCODING_STRING)
shebang = to_text(b_shebang, nonstring='passthru', errors='surrogate_or_strict')
else:
# No shebang, assume a binary module?
pass
b_module_data = b"\n".join(b_lines)
return (b_module_data, module_style, shebang)
|
maartenq/ansible
|
lib/ansible/executor/module_common.py
|
Python
|
gpl-3.0
| 46,743
|
[
"VisIt"
] |
fb4d867e0c4966cc822349f47d7a8a9f355fae90f729472745c5646206a01d87
|
from __future__ import print_function
from math import sqrt
import numpy as np
from ase.atoms import Atoms
from ase.calculators.singlepoint import SinglePointCalculator
from ase.constraints import FixAtoms
from ase.data import covalent_radii
from ase.gui.defaults import read_defaults
from ase.io import read, write, string2index
class Images:
def __init__(self, images=None):
if images is not None:
self.initialize(images)
def initialize(self, images, filenames=None, init_magmom=False):
self.natoms = len(images[0])
self.nimages = len(images)
if filenames is None:
filenames = [None] * self.nimages
self.filenames = filenames
if hasattr(images[0], 'get_shapes'):
self.Q = np.empty((self.nimages, self.natoms, 4))
self.shapes = images[0].get_shapes()
import os as os
if os.path.exists('shapes'):
shapesfile = open('shapes')
lines = shapesfile.readlines()
shapesfile.close()
if '#{type:(shape_x,shape_y,shape_z), .....,}' in lines[0]:
shape = eval(lines[1])
shapes = []
for an in images[0].get_atomic_numbers():
shapes.append(shape[an])
self.shapes = np.array(shapes)
else:
print('shape file has wrong format')
else:
print('no shapesfile found: default shapes were used!')
else:
self.shapes = None
self.P = np.empty((self.nimages, self.natoms, 3))
self.V = np.empty((self.nimages, self.natoms, 3))
self.E = np.empty(self.nimages)
self.K = np.empty(self.nimages)
self.F = np.empty((self.nimages, self.natoms, 3))
self.M = np.empty((self.nimages, self.natoms))
self.T = np.empty((self.nimages, self.natoms), int)
self.A = np.empty((self.nimages, 3, 3))
self.D = np.empty((self.nimages, 3))
self.Z = images[0].get_atomic_numbers()
self.q = np.empty((self.nimages, self.natoms))
self.pbc = images[0].get_pbc()
self.covalent_radii = covalent_radii
config = read_defaults()
if config['covalent_radii'] is not None:
for data in config['covalent_radii']:
self.covalent_radii[data[0]] = data[1]
warning = False
for i, atoms in enumerate(images):
natomsi = len(atoms)
if (natomsi != self.natoms or
(atoms.get_atomic_numbers() != self.Z).any()):
raise RuntimeError('Can not handle different images with ' +
'different numbers of atoms or different ' +
'kinds of atoms!')
self.P[i] = atoms.get_positions()
self.V[i] = atoms.get_velocities()
if hasattr(self, 'Q'):
self.Q[i] = atoms.get_quaternions()
self.A[i] = atoms.get_cell()
self.D[i] = atoms.get_celldisp().reshape((3,))
if (atoms.get_pbc() != self.pbc).any():
warning = True
try:
self.E[i] = atoms.get_potential_energy()
except RuntimeError:
self.E[i] = np.nan
self.K[i] = atoms.get_kinetic_energy()
try:
self.F[i] = atoms.get_forces(apply_constraint=False)
except RuntimeError:
self.F[i] = np.nan
try:
if init_magmom:
self.M[i] = atoms.get_initial_magnetic_moments()
else:
M = atoms.get_magnetic_moments()
if M.ndim == 2:
M = M[:, 2]
self.M[i] = M
except (RuntimeError, AttributeError):
self.M[i] = atoms.get_initial_magnetic_moments()
self.q[i] = atoms.get_initial_charges()
# added support for tags
try:
self.T[i] = atoms.get_tags()
except RuntimeError:
self.T[i] = 0
if warning:
print('WARNING: Not all images have the same bondary conditions!')
self.selected = np.zeros(self.natoms, bool)
self.selected_ordered = []
self.atoms_to_rotate_0 = np.zeros(self.natoms, bool)
self.visible = np.ones(self.natoms, bool)
self.nselected = 0
self.set_dynamic(constraints=images[0].constraints)
self.repeat = np.ones(3, int)
self.set_radii(config['radii_scale'])
def prepare_new_atoms(self):
"Marks that the next call to append_atoms should clear the images."
self.next_append_clears = True
def append_atoms(self, atoms, filename=None):
"Append an atoms object to the images already stored."
assert len(atoms) == self.natoms
if self.next_append_clears:
i = 0
else:
i = self.nimages
for name in ('P', 'V', 'E', 'K', 'F', 'M', 'A', 'T', 'D', 'q'):
a = getattr(self, name)
newa = np.empty((i + 1,) + a.shape[1:], a.dtype)
if not self.next_append_clears:
newa[:-1] = a
setattr(self, name, newa)
self.next_append_clears = False
self.P[i] = atoms.get_positions()
self.V[i] = atoms.get_velocities()
self.A[i] = atoms.get_cell()
self.D[i] = atoms.get_celldisp().reshape((3,))
self.q[i] = atoms.get_initial_charges()
try:
self.E[i] = atoms.get_potential_energy()
except RuntimeError:
self.E[i] = np.nan
self.K[i] = atoms.get_kinetic_energy()
try:
self.F[i] = atoms.get_forces(apply_constraint=False)
except RuntimeError:
self.F[i] = np.nan
try:
self.M[i] = atoms.get_magnetic_moments()
except (RuntimeError, AttributeError):
self.M[i] = np.nan
try:
self.T[i] = atoms.get_tags()
except AttributeError:
if i == 0:
self.T[i] = 0
else:
self.T[i] = self.T[i - 1]
self.nimages = i + 1
self.filenames.append(filename)
self.set_dynamic()
return self.nimages
def set_radii(self, scale):
if self.shapes is None:
self.r = self.covalent_radii[self.Z] * scale
else:
self.r = np.sqrt(np.sum(self.shapes**2, axis=1)) * scale
def read(self, filenames, index=-1, filetype=None):
images = []
names = []
for filename in filenames:
i = read(filename, index, filetype)
if not isinstance(i, list):
i = [i]
images.extend(i)
names.extend([filename] * len(i))
self.initialize(images, names)
for image in images:
if 'radii' in image.info:
self.set_radii(image.info['radii'])
break
def import_atoms(self, filename, cur_frame):
if filename:
filename = filename[0]
old_a = self.get_atoms(cur_frame)
imp_a = read(filename, -1)
new_a = old_a + imp_a
self.initialize([new_a], [filename])
def repeat_images(self, repeat):
n = self.repeat.prod()
repeat = np.array(repeat)
self.repeat = repeat
N = repeat.prod()
natoms = self.natoms // n
P = np.empty((self.nimages, natoms * N, 3))
V = np.empty((self.nimages, natoms * N, 3))
M = np.empty((self.nimages, natoms * N))
T = np.empty((self.nimages, natoms * N), int)
F = np.empty((self.nimages, natoms * N, 3))
Z = np.empty(natoms * N, int)
r = np.empty(natoms * N)
dynamic = np.empty(natoms * N, bool)
a0 = 0
for i0 in range(repeat[0]):
for i1 in range(repeat[1]):
for i2 in range(repeat[2]):
a1 = a0 + natoms
for i in range(self.nimages):
P[i, a0:a1] = (self.P[i, :natoms] +
np.dot((i0, i1, i2), self.A[i]))
V[:, a0:a1] = self.V[:, :natoms]
F[:, a0:a1] = self.F[:, :natoms]
M[:, a0:a1] = self.M[:, :natoms]
T[:, a0:a1] = self.T[:, :natoms]
Z[a0:a1] = self.Z[:natoms]
r[a0:a1] = self.r[:natoms]
dynamic[a0:a1] = self.dynamic[:natoms]
a0 = a1
self.P = P
self.V = V
self.F = F
self.Z = Z
self.T = T
self.M = M
self.r = r
self.dynamic = dynamic
self.natoms = natoms * N
self.selected = np.zeros(natoms * N, bool)
self.atoms_to_rotate_0 = np.zeros(self.natoms, bool)
self.visible = np.ones(natoms * N, bool)
self.nselected = 0
def center(self):
""" center each image in the existing unit cell, keeping the cell constant. """
c = self.A.sum(axis=1) / 2.0 - self.P.mean(axis=1)
self.P += c[:, np.newaxis, :]
def graph(self, expr):
""" routine to create the data in ase-gui graphs, defined by the string expr. """
import ase.units as units
code = compile(expr + ',', 'atoms.py', 'eval')
n = self.nimages
def d(n1, n2):
return sqrt(((R[n1] - R[n2])**2).sum())
def a(n1, n2, n3):
v1 = R[n1]-R[n2]
v2 = R[n3]-R[n2]
arg = np.vdot(v1,v2)/(sqrt((v1**2).sum()*(v2**2).sum()))
if arg > 1.0: arg = 1.0
if arg < -1.0: arg = -1.0
return 180.0*np.arccos(arg)/np.pi
def dih(n1, n2, n3, n4):
# vector 0->1, 1->2, 2->3 and their normalized cross products:
a = R[n2]-R[n1]
b = R[n3]-R[n2]
c = R[n4]-R[n3]
bxa = np.cross(b,a)
bxa /= np.sqrt(np.vdot(bxa,bxa))
cxb = np.cross(c,b)
cxb /= np.sqrt(np.vdot(cxb,cxb))
angle = np.vdot(bxa,cxb)
# check for numerical trouble due to finite precision:
if angle < -1: angle = -1
if angle > 1: angle = 1
angle = np.arccos(angle)
if (np.vdot(bxa,c)) > 0: angle = 2*np.pi-angle
return angle*180.0/np.pi
# get number of mobile atoms for temperature calculation
ndynamic = 0
for dyn in self.dynamic:
if dyn: ndynamic += 1
S = self.selected
D = self.dynamic[:, np.newaxis]
E = self.E
s = 0.0
data = []
for i in range(n):
R = self.P[i]
V = self.V[i]
F = self.F[i]
A = self.A[i]
M = self.M[i]
f = ((F * D)**2).sum(1)**.5
fmax = max(f)
fave = f.mean()
epot = E[i]
ekin = self.K[i]
e = epot + ekin
T = 2.0 * ekin / (3.0 * ndynamic * units.kB)
data = eval(code)
if i == 0:
m = len(data)
xy = np.empty((m, n))
xy[:, i] = data
if i + 1 < n:
s += sqrt(((self.P[i + 1] - R)**2).sum())
return xy
def set_dynamic(self, constraints=None):
self.dynamic = np.ones(self.natoms, bool)
if constraints is not None:
for con in constraints:
if isinstance(con, FixAtoms):
self.dynamic[con.index] = False
def write(self, filename, rotations='', show_unit_cell=False, bbox=None,
**kwargs):
indices = range(self.nimages)
p = filename.rfind('@')
if p != -1:
try:
slice = string2index(filename[p + 1:])
except ValueError:
pass
else:
indices = indices[slice]
filename = filename[:p]
if isinstance(indices, int):
indices = [indices]
images = [self.get_atoms(i) for i in indices]
if len(filename) > 4 and filename[-4:] in ['.eps', '.png', '.pov']:
write(filename, images,
rotation=rotations, show_unit_cell=show_unit_cell,
bbox=bbox, **kwargs)
else:
write(filename, images, **kwargs)
def get_atoms(self, frame, remove_hidden=False):
atoms = Atoms(positions=self.P[frame],
numbers=self.Z,
magmoms=self.M[0],
tags=self.T[frame],
cell=self.A[frame],
pbc=self.pbc)
if not np.isnan(self.V).any():
atoms.set_velocities(self.V[frame])
# check for constrained atoms and add them accordingly:
if not self.dynamic.all():
atoms.set_constraint(FixAtoms(mask=1 - self.dynamic))
# Remove hidden atoms if applicable
if remove_hidden:
atoms = atoms[self.visible]
f = self.F[frame][self.visible]
else:
f = self.F[frame]
atoms.set_calculator(SinglePointCalculator(atoms,
energy=self.E[frame],
forces=f))
return atoms
def delete(self, i):
self.nimages -= 1
P = np.empty((self.nimages, self.natoms, 3))
V = np.empty((self.nimages, self.natoms, 3))
F = np.empty((self.nimages, self.natoms, 3))
A = np.empty((self.nimages, 3, 3))
E = np.empty(self.nimages)
P[:i] = self.P[:i]
P[i:] = self.P[i + 1:]
self.P = P
V[:i] = self.V[:i]
V[i:] = self.V[i + 1:]
self.V = V
F[:i] = self.F[:i]
F[i:] = self.F[i + 1:]
self.F = F
A[:i] = self.A[:i]
A[i:] = self.A[i + 1:]
self.A = A
E[:i] = self.E[:i]
E[i:] = self.E[i + 1:]
self.E = E
del self.filenames[i]
def aneb(self):
n = self.nimages
assert n % 5 == 0
levels = n // 5
n = self.nimages = 2 * levels + 3
P = np.empty((self.nimages, self.natoms, 3))
V = np.empty((self.nimages, self.natoms, 3))
F = np.empty((self.nimages, self.natoms, 3))
E = np.empty(self.nimages)
for L in range(levels):
P[L] = self.P[L * 5]
P[n - L - 1] = self.P[L * 5 + 4]
V[L] = self.V[L * 5]
V[n - L - 1] = self.V[L * 5 + 4]
F[L] = self.F[L * 5]
F[n - L - 1] = self.F[L * 5 + 4]
E[L] = self.E[L * 5]
E[n - L - 1] = self.E[L * 5 + 4]
for i in range(3):
P[levels + i] = self.P[levels * 5 - 4 + i]
V[levels + i] = self.V[levels * 5 - 4 + i]
F[levels + i] = self.F[levels * 5 - 4 + i]
E[levels + i] = self.E[levels * 5 - 4 + i]
self.P = P
self.V = V
self.F = F
self.E = E
def interpolate(self, m):
assert self.nimages == 2
self.nimages = 2 + m
P = np.empty((self.nimages, self.natoms, 3))
V = np.empty((self.nimages, self.natoms, 3))
F = np.empty((self.nimages, self.natoms, 3))
A = np.empty((self.nimages, 3, 3))
E = np.empty(self.nimages)
T = np.empty((self.nimages, self.natoms), int)
D = np.empty((self.nimages, 3))
P[0] = self.P[0]
V[0] = self.V[0]
F[0] = self.F[0]
A[0] = self.A[0]
E[0] = self.E[0]
T[:] = self.T[0]
for i in range(1, m + 1):
x = i / (m + 1.0)
y = 1 - x
P[i] = y * self.P[0] + x * self.P[1]
V[i] = y * self.V[0] + x * self.V[1]
F[i] = y * self.F[0] + x * self.F[1]
A[i] = y * self.A[0] + x * self.A[1]
E[i] = y * self.E[0] + x * self.E[1]
D[i] = y * self.D[0] + x * self.D[1]
P[-1] = self.P[1]
V[-1] = self.V[1]
F[-1] = self.F[1]
A[-1] = self.A[1]
E[-1] = self.E[1]
D[-1] = self.D[1]
self.P = P
self.V = V
self.F = F
self.A = A
self.E = E
self.T = T
self.D = D
self.filenames[1:1] = [None] * m
|
suttond/MODOI
|
ase/gui/images.py
|
Python
|
lgpl-3.0
| 16,663
|
[
"ASE"
] |
2e35f2972ac56127008ab7474771776021a5c5bd04667bc462a6c50070955dd4
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tfr_gen: Generate mlir tfr decomposition function from python code."""
# pylint: disable=invalid-name
# pylint: disable=missing-function-docstring
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import os
import re
import types
from typing import List, Tuple
import gast as ast
from tensorflow.compiler.mlir.tfr import tfr_wrapper as tfr
from tensorflow.core.framework import types_pb2
from tensorflow.python.autograph.converters import control_flow
from tensorflow.python.autograph.converters import return_statements
from tensorflow.python.autograph.impl import api
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct import transpiler
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.python.autograph.pyct.static_analysis import reaching_fndefs
from tensorflow.python.autograph.pyct.static_analysis import type_inference
from tensorflow.python.framework import load_library
from tensorflow.python.framework import op_def_registry
from tensorflow.python.util import tf_inspect
class TFRTypes(enum.Enum):
"""All the supported types.
1-3: tfr types
4-99: mlir built-in types
100-199: TF related translator internal types
200- : Python related translator internal types
"""
TENSOR = 1
TENSOR_LIST = 2
ATTR = 3
NONE = 4
SHAPE = 5 # shape -> !shape.shape
I1 = 21
I32 = 22
I64 = 23
F32 = 24
INDEX = 25
AG_UNDEFINED_VAL = 100
AG_BUILTIN_FUNC = 101
TF_RAW_OP = 102
TF_REGION = 103
TF_TENSOR_SHAPE_FUNC = 104 # shape.as_list
TF_TENSOR_SHAPE_LIST = 105 # shape.as_list()
PY_BUILTIN_FUNC = 200
# As these are not real types, __getattribute__ helps them appear more like
# actual types (i.e. class definitions).
def __getattribute__(self, name):
if name == 'shape' and object.__getattribute__(self, 'value') == 1:
return TFRTypes.SHAPE
if name == 'as_list' and object.__getattribute__(self, 'value') == 5:
return TFRTypes.TF_TENSOR_SHAPE_FUNC
return object.__getattribute__(self, name)
def __str__(self):
if self.value < 4: # pylint: disable=comparison-with-callable
return '!tfr.' + self.name.lower()
elif self.value < 10: # pylint: disable=comparison-with-callable
return '!shape.' + self.name.lower()
else:
return self.name.lower()
_attribute_types = [
TFRTypes.I1, TFRTypes.I32, TFRTypes.I64, TFRTypes.F32, TFRTypes.INDEX,
TFRTypes.ATTR
]
def _get_type_from_proto(arg_def=None, attr_def=None):
if not arg_def:
if attr_def.type == 'bool':
return TFRTypes.I1
elif attr_def.type == 'int32':
return TFRTypes.I32
elif attr_def.type == 'int' or attr_def.type == 'int64':
return TFRTypes.I64
elif attr_def.type == 'float':
return TFRTypes.F32
else:
return TFRTypes.ATTR
if arg_def.number_attr or arg_def.type_list_attr:
return TFRTypes.TENSOR_LIST
else:
return TFRTypes.TENSOR
def _get_type_info_from_proto(arg_def=None, attr_def=None):
attr_type = _get_type_from_proto(arg_def, attr_def)
if not arg_def:
return '{}{{tfr.name="{}"}}'.format(attr_type, attr_def.name)
else:
attr_names = []
if arg_def.number_attr:
attr_names.append(arg_def.number_attr)
if arg_def.type_attr:
attr_names.append(arg_def.type_attr)
if arg_def.type_list_attr:
attr_names.append(arg_def.type_list_attr)
# TODO(fengliuai): currently we don't support backward type inference, so we
# have to store these non-derivable type in the signatures, and then they
# can be used to cast the values when raising to tf ops.
if arg_def.type == types_pb2.DT_FLOAT:
attr_names.append('f32_')
elif arg_def.type == types_pb2.DT_INT32:
attr_names.append('i32_')
elif arg_def.type == types_pb2.DT_INT64:
attr_names.append('i64_')
elif arg_def.type == types_pb2.DT_BOOL:
attr_names.append('i1_')
if not attr_names:
return str(attr_type)
else:
return '{}<{}>'.format(attr_type, ','.join(attr_names))
def _get_val_from_proto(attr_type, attr_val):
if attr_type == TFRTypes.I1:
return 'true' if attr_val.b else 'false'
elif attr_type == TFRTypes.I32 or attr_type == TFRTypes.I64:
return attr_val.i
elif attr_type == TFRTypes.F32:
return attr_val.f
elif attr_type == TFRTypes.ATTR:
# string
if attr_val.HasField('s'):
return '"{}"'.format(attr_val.s.decode())
# type
if attr_val.HasField('type'):
if attr_val.type == types_pb2.DT_FLOAT:
return 'f32'
elif attr_val.type == types_pb2.DT_INT32:
return 'i32'
elif attr_val.type == types_pb2.DT_INT64:
return 'i64'
elif attr_val.type == types_pb2.DT_BOOL:
return 'i1'
# list
if attr_val.HasField('list'):
if attr_val.list.f:
elt_ty = TFRTypes.F32
values = attr_val.list.f
elif attr_val.list.i:
elt_ty = TFRTypes.I64
values = attr_val.list.i
else:
elt_ty = TFRTypes.NONE
values = []
array_attr_elts = ['{}:{}'.format(val, elt_ty) for val in values]
return '[{}]'.format(','.join(array_attr_elts))
raise NotImplementedError(
'Proto AttrValue not recoganized. type: {}, value: {}'.format(
attr_type, attr_val))
def _collect_derived_attrs_from_proto(op_def):
derived_attrs = set()
for arg in op_def.input_arg:
if arg.type_attr:
derived_attrs.add(arg.type_attr)
if arg.number_attr:
derived_attrs.add(arg.number_attr)
if arg.type_list_attr:
derived_attrs.add(arg.type_list_attr)
# TODO(fengliuai): currently we don't support backward type inference, so we
# have to store these non-derivable type in the signatures, and then they
# can be used to cast the values when raising to tf ops.
if arg.type == types_pb2.DT_FLOAT:
derived_attrs.add('f32_')
elif arg.type == types_pb2.DT_INT32:
derived_attrs.add('i32_')
elif arg.type == types_pb2.DT_INT64:
derived_attrs.add('i64_')
elif arg.type == types_pb2.DT_BOOL:
derived_attrs.add('i1_')
return derived_attrs
def _require_tensor_list(arg_def):
return arg_def.type_list_attr or arg_def.number_attr
def _camel_to_snake(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
class OpDefCache(object):
"""A Dict to cache the OpDef for the Python function name."""
def __init__(self):
self._op_defs = {}
def lookup(self, f_name, func_def=None, optional=False):
if f_name in self._op_defs.keys():
return self._op_defs[f_name]
if isinstance(func_def, types.FunctionType):
if not hasattr(func_def, '_tfr_op_name'):
# skip a non-composition function
if optional:
return (None, None)
else:
raise KeyError('OpDef does not exist: ' + f_name)
op_name = getattr(func_def, '_tfr_op_name')
elif not func_def:
op_name = f_name
else:
# TODO(fengliuai): create one utility method to match different apis.
compose_dec = []
for dec in func_def.decorator_list:
if isinstance(dec, ast.Call):
if isinstance(dec.func,
ast.Attribute) and dec.func.attr == 'Composite':
compose_dec.append(dec)
if isinstance(dec.func, ast.Name) and dec.func.id == 'Composite':
compose_dec.append(dec)
if not compose_dec:
# skip a non-composition function
if optional:
return (None, None)
else:
raise KeyError('OpDef does not exist: ' + f_name)
elif len(compose_dec) > 1:
raise KeyError('More than one TF ops decomposes for.')
else:
op_name = compose_dec[0].args[0].value
op_def = op_def_registry.get(op_name)
if not op_def:
raise ValueError('Not a registered op: ' + op_name)
derived_attrs = _collect_derived_attrs_from_proto(op_def)
self._op_defs[f_name] = (op_def, derived_attrs)
return (op_def, derived_attrs)
def mlir_external_funcs(self):
tfr_funcs = []
for op_def, derived_attrs in self._op_defs.values():
tfr_func = '\ntfr.func @tf__{}_('.format(_camel_to_snake(op_def.name))
# tensor inputs
inputs = [
_get_type_info_from_proto(arg_def) for arg_def in op_def.input_arg
]
# attribute inputs. The attribute with default values are moved backwards.
non_derived_attrs = [
attr for attr in op_def.attr if attr.name not in derived_attrs
]
attrs_no_default = [
attr for attr in non_derived_attrs
if not attr.HasField('default_value')
]
attrs_with_default = [
attr for attr in non_derived_attrs if attr.HasField('default_value')
]
attr_names = set()
for attr_def in attrs_no_default + attrs_with_default:
inputs.append(_get_type_info_from_proto(None, attr_def))
attr_names.add(attr_def.name)
# tensor outputs
outputs = [
_get_type_info_from_proto(arg_def) for arg_def in op_def.output_arg
]
inputs = ','.join(inputs)
outputs = ','.join(outputs)
attrs = ','.join(sorted(derived_attrs.union(attr_names)))
tfr_funcs.append('{}{}) -> ({}) attributes {{{}}}'.format(
tfr_func, inputs, outputs, attrs))
return tfr_funcs
_PY_TYPE_TO_TFR = {
bool: TFRTypes.I1,
int: TFRTypes.I64,
float: TFRTypes.F32,
}
_AG_FIXED_RETURN_TYPE = {
'for_stmt': type(None),
'if_stmt': type(None),
'Undefined': TFRTypes.AG_UNDEFINED_VAL,
}
QN = qual_names.QN
# TODO(mdan): Fix this with an importable module.
AG_MODULE = api._TRANSPILER._extra_locals['ag__'] # pylint:disable=protected-access
class TFRTypeResolver(type_inference.Resolver):
"""Resolve types for the external names, calls and arguments."""
def __init__(self, op_defs):
super(TFRTypeResolver, self).__init__()
self._op_defs = op_defs
# This pattern matching mechanism works with the functional form generated
# by autograph:
#
# for i in data:
# print(i)
#
# generates:
#
# def loop_body(itr):
# i = itr
# print(i)
# ag__.for_stmt(target)
#
# The mechanism lets us infer the type of the itr argument based on that of
# target.
self._for_loop_target_types = {} # Maps body function name to iterated.
self._for_loop_body_fns = {} # Used only to avoid collisions.
def res_name(self, ns, types_ns, name):
name_str = str(name)
if name_str in ns:
ns_val = ns[name_str]
return {type(ns_val)}, ns_val
if name_str in __builtins__:
return {TFRTypes.PY_BUILTIN_FUNC}, __builtins__[name_str]
# This name is not in the namespace because the autograph transformation
# is not backloaded into Python.
if name_str == 'ag__':
return {type(AG_MODULE)}, AG_MODULE
return None, None
def res_value(self, ns, value):
if value is None:
return {TFRTypes.NONE}
if value in (TFRTypes.SHAPE, TFRTypes.TF_TENSOR_SHAPE_FUNC):
# See TFRTypes.__getattrbute__.
# TODO(mdan): Replacing the enum with classes would avoid this overlap.
return {value}
# TODO(mdan): Index more efficiently. Could do a name check instead.
if any(v is value for v in AG_MODULE.__dict__.values()):
return {TFRTypes.AG_BUILTIN_FUNC}
if getattr(value, '__name__', None) == 'tensorflow.raw_ops':
return {types.ModuleType}
if hasattr(value, '__module__'):
# All the imported operations, which are not autograph built-ins, are
# considered to be TF raw ops.
# TODO(fengliuai): refine the condition so we only matche tensorflow
# ops here.
return {TFRTypes.TF_RAW_OP}
# TODO(mdan): Is ATTR equivalent to string?
return {_PY_TYPE_TO_TFR.get(type(value), TFRTypes.ATTR)}
def res_call(self, ns, types_ns, node, f_type, args, keywords):
name = anno.Basic.QN.of(node.func)
if f_type == (TFRTypes.AG_BUILTIN_FUNC,):
if name == QN(QN('ag__'), attr='if_stmt'):
nouts = node.args[6].value
# TODO(mdan): Look at the actual types out of if_body.
side_effects = {
qual_names.QN(n.value): {TFRTypes.TENSOR}
for n in node.args[5].elts[:nouts]
}
return {type(None)}, side_effects
if name == QN(QN('ag__'), attr='for_stmt'):
assert isinstance(node.args[2], ast.Name)
body_fn_name = str(anno.Basic.QN.of(node.args[2]))
assert body_fn_name not in self._for_loop_body_fns, (
'Previously used here: {}. Are you reusing the Resolver across '
'transformations?').format(self._for_loop_body_fns[body_fn_name])
self._for_loop_body_fns[body_fn_name] = anno.Basic.ORIGIN.of(node)
iterated_type = args[0]
assert iterated_type & {
TFRTypes.TENSOR_LIST, TFRTypes.TENSOR, List[int]
}, (
iterated_type)
self._for_loop_target_types[body_fn_name] = iterated_type
return {type(None)}, None
# TODO(mdan): Actually resolve the type here instead.
ret_type = _AG_FIXED_RETURN_TYPE.get(name.qn[1], None)
if ret_type is not None:
return {ret_type}, None
raise NotImplementedError('return type of {}'.format(name))
elif f_type == (TFRTypes.TF_RAW_OP,):
op_name = name.qn[1]
op_def, _ = self._op_defs.lookup(op_name)
if len(op_def.output_arg) == 1:
return {_get_type_from_proto(op_def.output_arg[0])}, None
return ({tuple(_get_type_from_proto(arg) for arg in op_def.output_arg)},
None)
elif f_type == (TFRTypes.PY_BUILTIN_FUNC,):
assert name.is_simple()
if name == QN('range'):
return {List[int]}, None
if name == QN('len'):
return {TFRTypes.INDEX}, None
elif f_type == (TFRTypes.TF_TENSOR_SHAPE_FUNC,):
return {TFRTypes.TF_TENSOR_SHAPE_LIST}, None
raise NotImplementedError('Function:', name, f_type)
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):
if f_is_local:
f_name_str = str(f_name)
if f_name_str in self._for_loop_target_types:
# See autograph/converters/control_flow.py - the function has a single
# argument, the iterate before any expansion.
assert self._for_loop_target_types[f_name_str] & {List[int]}
# Assume all loops are TF loops. Then the iterates are autoboxed into
# Tensors.
return {TFRTypes.INDEX}
else:
return None
func = ns[f_name]
op_def, derived_attrs = self._op_defs.lookup(f_name, func)
if op_def is None:
return None
pos = tf_inspect.getfullargspec(func).args.index(str(name))
if pos < len(op_def.input_arg):
arg_def = op_def.input_arg[pos]
return {_get_type_from_proto(arg_def)}
elif pos < len(op_def.input_arg) + len(op_def.attr) - len(derived_attrs):
non_derived_attr_pos = pos - len(op_def.input_arg)
for attr_def in op_def.attr:
# derived attribute, skip this one and continue to the next one.
if attr_def.name in derived_attrs:
continue
if non_derived_attr_pos == 0:
return {_get_type_from_proto(None, attr_def)}
non_derived_attr_pos -= 1
raise ValueError('Argument is not defined in OpDef: ' + str(name))
def res_subscript(self, ns, types_ns, node_or_slice, value, slice_):
assert len(value) == 1
value, = tuple(value)
if value == TFRTypes.TF_TENSOR_SHAPE_LIST:
# TODO(mdan): This is not entirely correct for multi-element slices.
return {int}
elif value in (TFRTypes.TENSOR_LIST, TFRTypes.TENSOR):
# TODO(mdan): This is not entirely correct for multi-element slices.
return {TFRTypes.TENSOR}
raise NotImplementedError('slice of {}'.format(value))
def res_compare(self, ns, types_ns, node, left, right):
# TODO(fengliuai): make sure left and right are compatible
return {TFRTypes.I1}
def res_binop(self, ns, types_ns, node, left, right):
# TODO(fengliuai): make sure left and right are compatible
return left
class SymbolTable(object):
"""Symbol Table for python code."""
def __init__(self):
self.symbols = []
self.enter_scope()
self.scf_scope = 0
# reserved key words
self.insert_symbol('len', 'len', TFRTypes.PY_BUILTIN_FUNC)
def enter_scope(self, scf_scope=False):
"""Enter a new scope - at function level."""
self.symbols.append({'types': {}, 'symbols': {}})
self.curr_table = self.symbols[len(self.symbols) - 1]
if scf_scope:
self.scf_scope += 1
def insert_symbol(self, name, value, type_):
self.curr_table['symbols'][name] = (value, type_)
# TODO(mdan): Use the inferred type rather than tracking it here.
# The following field is decrepcated.
self.curr_table['types'][name] = type_
return value
def exit_scope(self):
self.symbols.pop()
self.curr_table = self.symbols[len(self.symbols) - 1]
if self.scf_scope > 0:
self.scf_scope -= 1
def in_scf_scope(self):
return self.scf_scope > 0
def lookup(self, name):
curr_idx = len(self.symbols) - 1
while curr_idx >= 0 and (name not in self.symbols[curr_idx]['symbols']):
curr_idx -= 1
if curr_idx < 0:
return None
return self.symbols[curr_idx]['symbols'][name]
class TFRGen(transformer.CodeGenerator):
"""Visit the AST and generate MLIR TFR functions."""
def __init__(self, ctx, op_defs):
super(TFRGen, self).__init__(ctx)
self.ctx = ctx
self.symbol_table = SymbolTable()
self._op_defs = op_defs
def _create_mlir_loc(self, loc):
"""Creates mlir location from autograph ORIGIN value.
Args:
loc: OriginInfo
Returns:
A serialized mlir location string.
"""
if loc is not None and loc.loc.filename:
file_name = os.path.basename(loc.loc.filename)
return 'loc("{}":{}:{})'.format(file_name, loc.loc.lineno,
loc.loc.col_offset)
else:
return 'loc(unknown)'
def _emit_with_loc(self, op_str, node=None):
"""Emit the mlir operation with the location associated with the node.
Args:
op_str: The mlir operation string to be emitted.
node: The node of the AST tree, the mlir operation translated from.
"""
loc = ''
if node:
loc = self._create_mlir_loc(
anno.getanno(node, anno.Basic.ORIGIN, default=None))
self.emit(op_str + ' ' + loc)
def _get_inferred_type(self, node, default=None):
types_ = anno.getanno(node, anno.Static.TYPES, None)
if not types_:
print('WARN: no Static.TYPES annotation. Fix the type inference pass: ')
self.debug_print(node)
return default
if types_ and len(types_) > 1:
raise ValueError('ambiguous inferred type for "{}": {}'.format(
node, types_))
type_, = types_
# TODO(fengliuai): Tuple is added here to make return tuple work.
if type_ is list or type_ is Tuple:
# TODO(fengliuai): Seems like we need to move the followed list handling
# to the type inference and we shouldn't just put 'list' there. Otherwise
# we couldn't find out the right type for the Name node.
if not isinstance(node, ast.List):
return default
all_types = [
anno.getanno(elt, anno.Static.TYPES, None) for elt in node.elts
]
if (TFRTypes.TENSOR,) in all_types:
# For the elt which is not tfr.tensor, tfr.constant_tensor needs to be
# use to cast it to a tfr.tensor.
return TFRTypes.TENSOR_LIST
else:
return TFRTypes.ATTR
if default is not None and type_ != default:
print('WARN: type annotation {}({}) does not match {}({})'.format(
type_, type(type_), default, type(default)))
self.debug_print(node)
return type_
def _pack_tensor_list(self, value):
# This is packing a list of tensors, then the axis is 0.
axis = self._ssa_name('zero')
self._emit_with_loc('\n{} = constant 0 : i64'.format(axis))
casted = self._ssa_name('pack')
self.emit('\n{} = tfr.call @tf__pack({}, {})'.format(casted, value, axis))
self._emit_with_loc(' : (!tfr.tensor_list, i64) -> !tfr.tensor')
# load the op def of tf.Pack
self._op_defs.lookup('Pack')
return casted, TFRTypes.TENSOR
def _index_to_I64(self, value, ty):
if ty == TFRTypes.INDEX:
casted = self._ssa_name('casted')
self._emit_with_loc('\n{} = index_cast {} : index to i64'.format(
casted, value))
return casted, TFRTypes.I64
else:
return value, ty
def _value_to_tensor(self, value, ty, node):
value, ty = self._index_to_I64(value, ty)
cst_tensor = self._ssa_name('cst')
self.emit('\n{} = "tfr.constant_tensor"({})'.format(cst_tensor, value))
self._emit_with_loc(' : ({}) -> !tfr.tensor'.format(ty), node)
return cst_tensor, TFRTypes.TENSOR
def _ssa_name(self, prefix):
if isinstance(prefix, qual_names.QN):
assert prefix.is_simple(), 'ANF transform should have cleaned this up'
prefix = prefix.ssf()
return '%' + self.ctx.namer.new_symbol(prefix, set())
def _op_def(self, op_name):
return op_def_registry.get(op_name)
def visit_block(self, block):
return [self.visit(item) for item in block]
def visit_Pass(self, node):
if self.symbol_table.in_scf_scope():
self._emit_with_loc('\nscf.yield', node)
else:
self._emit_with_loc('\ntfr.return', node)
def visit_Attribute(self, node):
node_type = self._get_inferred_type(node, None)
if isinstance(node.value, ast.Name):
if node.value.id == 'ag__':
# some variables are assigned with 'ag__.xxx' method, we should handle
# them following the autograph convensions.
return (node.attr, TFRTypes.AG_BUILTIN_FUNC)
if node_type == TFRTypes.TF_RAW_OP:
# This branch is used when it is inside tensorflow
return (node.attr, TFRTypes.TF_RAW_OP)
value, _ = self.visit(node.value)
tensor_type = self._get_inferred_type(node.value, None)
# TODO(fengliuai): use node_type once it
if node_type == TFRTypes.SHAPE:
print('TODO: use "node_type"')
if node.attr == 'shape' and tensor_type == TFRTypes.TENSOR:
ssa_value = self._ssa_name('shape')
self._emit_with_loc(
'\n{} = tfr.get_shape {} -> !shape.shape'.format(ssa_value, value),
node)
return (ssa_value, TFRTypes.SHAPE)
if isinstance(node.value, ast.Attribute):
if isinstance(node.value.value, ast.Name):
if node.value.value.id == 'tf' and node.value.attr == 'raw_ops':
# This branch is used when it is outside tensorflow
return (node.attr, TFRTypes.TF_RAW_OP)
value, ty = self.visit(node.value)
# TODO(fengliuai): use node_type once it
if node_type == TFRTypes.TF_TENSOR_SHAPE_FUNC:
print('TODO: use "node_type"')
if ty == TFRTypes.SHAPE and node.attr == 'as_list':
return (value, TFRTypes.TF_TENSOR_SHAPE_FUNC)
raise NotImplementedError('Attribute kind not recoganized.')
def visit_Assign(self, node):
values = self.visit(node.value)
if isinstance(node.targets[0], ast.Tuple):
targets = [elt.id for elt in node.targets[0].elts]
elif isinstance(node.targets[0], ast.Name):
targets = [node.targets[0].id]
else:
raise NotImplementedError('Assignment target type not recoganized.')
if isinstance(values, list):
if len(targets) == len(values):
for key, value in zip(targets, values):
ssa_value, ty_ = value
ty = self._get_inferred_type(node.value, ty_)
self.symbol_table.insert_symbol(key, ssa_value, ty)
elif len(values) == 1:
n, ty = values[0]
assert ty == TFRTypes.TENSOR_LIST
# assign a tensor_list to multiple variables
for idx, key in enumerate(targets):
idx_name = self._ssa_name('idx')
self._emit_with_loc(
'\n{} = constant {} : index'.format(idx_name, idx), node)
elt_name = self._ssa_name('elt')
self.emit('\n{} = tfr.get_element {}[{}]'.format(
elt_name, n, idx_name))
self._emit_with_loc(' : (!tfr.tensor_list, index) -> !tfr.tensor',
node)
self.symbol_table.insert_symbol(key, elt_name, TFRTypes.TENSOR)
elif len(targets) == 1:
ssa_names = [n for n, _ in values]
tys = [t for _, t in values]
self.symbol_table.insert_symbol(targets[0], ssa_names, tys)
else:
self.symbol_table.insert_symbol(targets[0], values[0], values[1])
def _emit_binary_op(self, op, lhs, lhs_ty, rhs, rhs_ty):
assert lhs_ty, rhs_ty
if isinstance(op, ast.Sub):
code = 'sub'
elif isinstance(op, ast.Add):
code = 'add'
else:
raise NotImplementedError('BinOp operator not recognized' + op)
if lhs_ty == TFRTypes.I64:
suffix = 'i'
elif lhs_ty == TFRTypes.F32:
suffix = 'f'
else:
raise NotImplementedError('BinOp operand type not recognized' + op)
ret = self._ssa_name(code)
self._emit_with_loc(
'\n{} = {}{} {}, {} : {}'.format(ret, code, suffix, lhs, rhs, lhs_ty),
op)
return ret, lhs_ty
def visit_AugAssign(self, node):
lhs, lhs_ty = self.visit(node.target)
rhs, rhs_ty = self.visit(node.value)
ret, ret_ty = self._emit_binary_op(node.op, lhs, lhs_ty, rhs, rhs_ty)
self.symbol_table.insert_symbol(node.target.id, ret, ret_ty)
def visit_BinOp(self, node):
lhs, lhs_ty = self.visit(node.left)
rhs, rhs_ty = self.visit(node.right)
return self._emit_binary_op(node.op, lhs, lhs_ty, rhs, rhs_ty)
def visit_BoolOp(self, node):
values = [self.visit(value) for value in node.values]
# TODO(fengliuai): Handle more ast node types.
if isinstance(node.op, ast.Or):
raise NotImplementedError('Or operator not recognized')
elif isinstance(node.op, ast.And):
raise NotImplementedError('And operator not recognized')
def visit_Call(self, node):
func_name, func_type = self.visit(node.func)
_ = self._get_inferred_type(node.func, func_type)
if func_type == TFRTypes.AG_BUILTIN_FUNC:
if func_name == 'if_stmt':
cond, _ = self.visit(node.args[0])
body, _ = self.visit(node.args[1])
orelse, _ = self.visit(node.args[2])
get_state, _ = self.visit(node.args[3])
nouts = int(node.args[6].value)
out_symbols = []
# The out symbols are just a Tuple of names
for out in node.args[5].elts[:nouts]:
val, ty = self.symbol_table.lookup(out.value)
if ty != TFRTypes.AG_UNDEFINED_VAL:
raise ValueError('if stmt out symbol is not defined.')
out_symbols.append(out.value)
return self._visit_if_stmt(cond, body, orelse, get_state, out_symbols,
node)
elif func_name == 'for_stmt':
range_ = self._visit_iter(node.args[0])
body, _ = self.visit(node.args[2])
get_state, _ = self.visit(node.args[3])
loop_carried = [out.value for out in node.args[5].elts]
# TODO(fengliuai): opt is not used here.
return self._visit_for_stmt(range_, body, get_state, loop_carried, node)
elif func_name == 'Undefined':
val = self._ssa_name(node.args[0].value)
return (val, TFRTypes.AG_UNDEFINED_VAL)
elif func_name == 'UndefinedReturnValue':
val = self._ssa_name('return_val')
return (val, TFRTypes.AG_UNDEFINED_VAL)
if func_type == TFRTypes.TF_RAW_OP:
return self._visit_tf_op(func_name, node.args, node.keywords, node)
if func_type == TFRTypes.TF_TENSOR_SHAPE_FUNC:
return (func_name, TFRTypes.TF_TENSOR_SHAPE_LIST)
if func_type == TFRTypes.PY_BUILTIN_FUNC:
if func_name == 'len':
arg, ty = self.visit(node.args[0])
ty = self._get_inferred_type(node.args[0], ty)
assert ty == TFRTypes.TF_TENSOR_SHAPE_LIST, ty
len_value = self._ssa_name('len')
self._emit_with_loc(
'\n{} = shape.rank {} : !shape.shape -> !shape.size'.format(
len_value, arg), node)
size_value = self._ssa_name('len_size')
self._emit_with_loc(
'\n{} = shape.size_to_index {} : !shape.size'.format(
size_value, len_value), node)
return (size_value, TFRTypes.INDEX)
raise NotImplementedError('call operator not recognized: {} {}'.format(
func_name, func_type))
def visit_Compare(self, node):
lhs, lhs_ty = self.visit(node.left)
for op, right in zip(node.ops, node.comparators):
rhs, _ = self.visit(right)
if isinstance(op, ast.Eq):
pred = 'eq'
elif isinstance(op, ast.Lt):
pred = 'ult'
elif isinstance(op, ast.LtE):
pred = 'ule'
elif isinstance(op, ast.Gt):
pred = 'ugt'
elif isinstance(op, ast.GtE):
pred = 'uge'
elif isinstance(op, ast.NotEq):
pred = 'ne'
else:
raise NotImplementedError('Compare operator not recognized')
ret = self._ssa_name(pred)
if lhs_ty == TFRTypes.ATTR:
self._emit_with_loc(
'\n{} = tfr.equal {}, {} -> i1'.format(ret, lhs, rhs), node)
else:
if lhs_ty == TFRTypes.I64:
code = 'cmpi'
elif lhs_ty == TFRTypes.F32:
code = 'cmpf'
else:
raise NotImplementedError('Compare operand type not recognized')
self._emit_with_loc(
'\n{} = {} "{}", {}, {} : {}'.format(ret, code, pred, lhs, rhs,
lhs_ty), node)
return ret, TFRTypes.I1
def visit_Constant(self, node):
cst_name = self._ssa_name('cst')
if node.value is None:
cst_ty = TFRTypes.NONE
elif isinstance(node.value, bool):
cst_ty = self._get_inferred_type(node)
cst_val = str(node.value).lower()
self._emit_with_loc('\n{} = constant {}'.format(cst_name, cst_val), node)
else:
cst_ty = self._get_inferred_type(node)
cst_val = node.value
if cst_ty == TFRTypes.ATTR:
self._emit_with_loc(
'\n{} = tfr.constant "{}" -> {}'.format(cst_name, cst_val, cst_ty),
node)
else:
self._emit_with_loc(
'\n{} = constant {} : {}'.format(cst_name, cst_val, cst_ty), node)
return cst_name, cst_ty
def visit_FunctionDef(self, node):
op_def, derived_attrs = self._op_defs.lookup(node.name, node, True)
if op_def is None:
# Nested function. Insert it to symbol table for looking up later.
self.symbol_table.insert_symbol(node.name, node, None)
return
op_name = op_def.name
if self.symbol_table.lookup(op_name):
raise LookupError('Composition has not been registered for op: ' +
op_name)
else:
self.symbol_table.insert_symbol(node.name, None, None)
self.symbol_table.enter_scope()
self.emit('\ntfr.func @tf__{0}('.format(_camel_to_snake(op_name)))
arg_list = []
idx = 0
max_idx = len(op_def.input_arg) + len(op_def.attr)
for arg in node.args.args:
arg_name = self._ssa_name(anno.getanno(arg, anno.Basic.QN))
arg_type = anno.getanno(arg, anno.Static.TYPES)[0]
arg_attr = ''
if idx >= len(op_def.input_arg):
attr_def = op_def.attr[idx - len(op_def.input_arg)]
# skip the derived attributes
while attr_def.name in derived_attrs and (idx + 1) < max_idx:
idx += 1
attr_def = op_def.attr[idx - len(op_def.input_arg)]
if idx >= max_idx:
raise ValueError('Argument is not defined in OpDef: ' + arg_name)
arg_attr += '{{tfr.name="{}"'.format(attr_def.name)
if attr_def.HasField('default_value'):
default_val = _get_val_from_proto(arg_type, attr_def.default_value)
arg_attr += ',tfr.default={}'.format(default_val)
arg_attr += '}'
idx += 1
arg_str = '{}: {}{}'.format(arg_name, arg_type, arg_attr)
arg_list.append(arg_str)
self.symbol_table.insert_symbol(arg.id, arg_name, arg_type)
ret_type_list = []
for ret_def in op_def.output_arg:
if ret_def.number_attr or ret_def.type_list_attr:
ret_type_list.append(str(TFRTypes.TENSOR_LIST))
else:
ret_type_list.append(str(TFRTypes.TENSOR))
self.emit('{}) -> ({}) {{'.format(', '.join(arg_list),
', '.join(ret_type_list)))
self.visit_block(node.body)
self._emit_with_loc('\n}', node)
self.symbol_table.exit_scope()
def visit_arguments(self, node):
# TODO(fengliuai): return ordered the types and names.
# We need to order the arguments to match the assumption in the TFR dialect.
raise NotImplementedError('arguments not supported.')
def visit_Lambda(self, node):
raise NotImplementedError('Lambda not supported.')
def _get_mlir_ssa_values(self, name_prefix, out_types):
"""Create MLIR convention SSA values."""
out_ssa_values = []
if not out_types:
return '', out_ssa_values
out_name = self._ssa_name(name_prefix)
if len(out_types) == 1:
out_name_suffix = ''
out_ssa_values.append(out_name)
else:
# For multiple returns, MLIR uses '%s:i' when they are defined and
# '%s#i' when they are used.
out_name_suffix = ':{}'.format(len(out_types))
for idx, _ in enumerate(out_types):
out_ssa_values.append('{}#{}'.format(out_name, idx))
return '{}{}'.format(out_name, out_name_suffix), out_ssa_values
def _visit_if_stmt(self, cond, body_def, orelse_def, get_state, out_symbols,
node):
self.emit('\n')
ret_str, ret_ssa_values = self._get_mlir_ssa_values(
'if_stmt', [TFRTypes.TENSOR] * len(out_symbols))
if ret_ssa_values:
self.emit(ret_str + ' = ')
# add ssa values to the symbol table
out_types = []
for symbol, ssa_value in zip(out_symbols, ret_ssa_values):
self.symbol_table.insert_symbol(symbol, ssa_value, TFRTypes.TENSOR)
out_types.append(str(TFRTypes.TENSOR))
self.emit('scf.if {} -> ({}) {{'.format(cond, ', '.join(out_types)))
# Create a new scope in case the local variables are leaked.
self.symbol_table.enter_scope(scf_scope=True)
self.visit_block(body_def.body)
self.visit_block(get_state.body)
self.symbol_table.exit_scope()
self.emit('\n} else {')
# Create a new scope in case the local variables are leaked.
self.symbol_table.enter_scope(scf_scope=True)
self.visit_block(orelse_def.body)
self.visit_block(get_state.body)
self.symbol_table.exit_scope()
self._emit_with_loc('\n}', node)
return list(zip(ret_ssa_values, out_types))
def _visit_iter(self, node):
if isinstance(node, ast.Call):
f_name = anno.getanno(node.func, anno.Basic.QN)
if f_name == QN('range'):
args = [self.visit(arg) for arg in node.args]
begin = None
step = None
end = None
if len(args) == 1:
end, end_ty = args[0]
elif len(args) == 2:
begin, begin_ty = args[0]
end, end_ty = args[1]
elif len(args) == 3:
begin, begin_ty = args[0]
end, end_ty = args[1]
step, step_ty = args[2]
if begin is None:
begin = self._ssa_name('begin')
self._emit_with_loc('\n{} = constant 0 : index'.format(begin), node)
elif begin_ty != TFRTypes.INDEX:
begin_ = self._ssa_name('begin')
self._emit_with_loc(
'\n{} = index_cast {} : {} to index'.format(
begin_, begin, begin_ty), node)
begin = begin_
if end_ty != TFRTypes.INDEX:
end_ = self._ssa_name('end')
self._emit_with_loc(
'\n{} = index_cast {} : {} to index'.format(end_, end, end_ty),
node)
end = end_
if step is None:
step = self._ssa_name('step')
self._emit_with_loc('\n{} = constant 1 : index'.format(step), node)
elif step_ty != TFRTypes.INDEX:
step_ = self._ssa_name('step')
self._emit_with_loc(
'\n{} = index_cast {} : {} to index'.format(step_, step, step_ty),
node)
step = step_
return begin, end, step
raise NotImplementedError('Iterator entity not supported.' + node)
def _visit_for_stmt(self, range_, body_def, get_state, loop_carried, node):
self.emit('\n')
ret_str, ret_ssa_values = self._get_mlir_ssa_values(
'for_stmt', [TFRTypes.TENSOR] * len(loop_carried))
if ret_ssa_values:
self.emit(ret_str + ' = ')
# Before enter the loop, we use the original ssa values as the initial
# values to the loop iteration arguments. We also create new ssa values as
# the returns of the scf for statements. The symbol table needs to be
# updated to these new ssa values before it enters the scope of the loop.
out_types = []
init_values = []
for symbol, ssa_value in zip(loop_carried, ret_ssa_values):
init, ty = self.symbol_table.lookup(symbol)
self.symbol_table.insert_symbol(symbol, ssa_value, ty)
out_types.append(str(ty))
init_values.append((init, ty))
# Create a new scope in case the local variables are leaked.
self.symbol_table.enter_scope(scf_scope=True)
# Create the iteration variable with index type
assert len(body_def.args.args) == 1
it_name = body_def.args.args[0].id
it = self._ssa_name(it_name)
self.symbol_table.insert_symbol(it_name, it, TFRTypes.INDEX)
self.emit('scf.for {} = {} to {} step {} '.format(it, range_[0], range_[1],
range_[2]))
if loop_carried:
iter_args = []
for symbol, init in zip(loop_carried, init_values):
# create new ssa values for the loop carried variables
it_arg = self._ssa_name('it_arg')
self.symbol_table.insert_symbol(symbol, it_arg, init[1])
iter_args.append('{} = {}'.format(it_arg, init[0]))
self.emit('iter_args({}) '.format(', '.join(iter_args)))
self.emit('-> ({}) {{'.format(', '.join(out_types)))
else:
self.emit(' {')
self.visit_block(body_def.body)
self.visit_block(get_state.body)
self.symbol_table.exit_scope()
self._emit_with_loc('\n}', node)
return list(zip(ret_ssa_values, out_types))
def _emit_default_constant_from_proto(self, attr_def):
"""emit mlir constant statement from default value of the ArgDef proto."""
name = self._ssa_name('cst')
cst_ty = _get_type_from_proto(None, attr_def)
cst_val = _get_val_from_proto(cst_ty, attr_def.default_value)
if cst_ty == TFRTypes.ATTR:
self._emit_with_loc('\n{} = tfr.constant {} -> {}'.format(
name, cst_val, cst_ty))
elif cst_ty == TFRTypes.I1:
self._emit_with_loc('\n{} = constant {}'.format(name, cst_val))
else:
self._emit_with_loc('\n{} = constant {} : {}'.format(
name, cst_val, cst_ty))
return name, cst_ty
def visit_keyword(self, node):
return node.arg, self.visit(node.value)
def _visit_tf_op(self, op_name, args, keywords, node):
op_def, derived_attrs = self._op_defs.lookup(op_name)
ret_tys = [_get_type_from_proto(arg) for arg in op_def.output_arg]
ret_str, ret_ssa_values = self._get_mlir_ssa_values(op_name, ret_tys)
arg_strs = []
ty_strs = []
for arg in args:
value, ty = self.visit(arg)
arg_strs.append(value)
ty_strs.append(str(ty))
input_args = [arg for arg in op_def.input_arg]
attrs_no_default = [
attr for attr in op_def.attr
if not attr.HasField('default_value') and attr.name not in derived_attrs
]
attrs_with_default = [
attr for attr in op_def.attr
if attr.HasField('default_value') and attr.name not in derived_attrs
]
kw_args = {}
for arg in keywords:
value, (ssa_name, ty) = self.visit(arg)
ty = self._get_inferred_type(arg.value, ty)
# TODO(fengliuai): implement the "rename_to" for the customization in
# tensorflow/core/api_def/base_api/*
if value == 'axis':
value = 'split_dim'
kw_args[value] = (ssa_name, ty)
# tensor arguments and attribute arguments
ordered_args = input_args + attrs_no_default + attrs_with_default
for attr_def in ordered_args[len(args):]:
if attr_def.name in kw_args:
value, ty = kw_args[attr_def.name]
if attr_def in input_args:
if ty in _attribute_types:
# the argument shouldn't be used as tf op calls directly.
value, ty = self._value_to_tensor(value, ty, node)
if ty is TFRTypes.TENSOR_LIST and not _require_tensor_list(attr_def):
value, ty = self._pack_tensor_list(value)
else:
value, ty = self._emit_default_constant_from_proto(attr_def)
arg_strs.append(value)
ty_strs.append(str(ty))
if ret_ssa_values:
self.emit('\n{} = '.format(ret_str))
self.emit('tfr.call @tf__{}('.format(_camel_to_snake(op_name)))
arg_str = ', '.join(arg_strs)
arg_ty_str = ', '.join(ty_strs)
ret_ty_str = ', '.join([str(ty) for ty in ret_tys])
self._emit_with_loc(
'{}) : ({}) -> ({})'.format(arg_str, arg_ty_str, ret_ty_str), node)
return list(zip(ret_ssa_values, ret_tys))
def visit_If(self, node):
raise NotImplementedError('If not supported.')
def visit_Name(self, node):
val, lookup_type = self.symbol_table.lookup(node.id)
type_ = self._get_inferred_type(node, lookup_type)
return val, type_
def visit_Return(self, node):
values = self.visit(node.value)
if self.symbol_table.in_scf_scope():
self.emit('\nscf.yield ')
else:
self.emit('\ntfr.return ')
if not values:
return
if isinstance(values, list):
vals, tys = zip(*values)
else:
vals = values[0]
tys = values[1]
if isinstance(tys, list) or isinstance(tys, tuple):
tys = [str(t) for t in tys]
self._emit_with_loc('{} : {}'.format(', '.join(vals), ', '.join(tys)),
node)
elif tys != TFRTypes.NONE:
# TODO(fengliuai): scf region yield uses this branch. Fix it.
self._emit_with_loc('{} : {}'.format(vals, tys), node)
def visit_Subscript(self, node):
val, ty = self.visit(node.value)
type_ = self._get_inferred_type(node.value, ty)
# TODO(fengliuai): Here we hardcode the node.slice here to get the index
# type. Use the visit method once the type inference is done.
# slice_val, slice_ty = self.visit(node.slice)
if isinstance(node.slice, ast.Index):
if isinstance(node.slice.value, ast.Constant):
# TODO(fengliuai): promote to an assignment
idx_val = self._ssa_name('cst')
self._emit_with_loc(
'\n{} = constant {} : index'.format(idx_val,
node.slice.value.value), node)
else:
idx_val, _ = self.visit(node.slice.value)
else:
raise NotImplementedError('non-index slice not supported.')
elt = self._ssa_name('elt')
if type_ == TFRTypes.TENSOR_LIST:
self.emit('\n{} = tfr.get_element {}[{}] '.format(elt, val, idx_val))
self._emit_with_loc(': (!tfr.tensor_list, index) -> !tfr.tensor', node)
return (elt, TFRTypes.TENSOR)
elif type_ == TFRTypes.TF_TENSOR_SHAPE_LIST:
size_ = self._ssa_name('size')
self.emit('\n{} = shape.get_extent {}, {}'.format(size_, val, idx_val))
self._emit_with_loc(': !shape.shape, index -> !shape.size', node)
self._emit_with_loc(
'\n{} = shape.size_to_index {} : !shape.size'.format(elt, size_),
node)
return (elt, TFRTypes.INDEX)
def visit_List(self, node):
out_type = self._get_inferred_type(node)
vals = []
tys = []
for elt in node.elts:
val, ty = self.visit(elt)
if ty in _attribute_types and out_type == TFRTypes.TENSOR_LIST:
# This list is a tensor list, then cast all the input values to tensors.
val, ty = self._value_to_tensor(val, ty, node)
else:
# We shouldn't use index type to build the list because list will be use
# as attribute.
val, ty = self._index_to_I64(val, ty)
vals.append(val)
tys.append(str(ty))
list_val = self._ssa_name('list')
self.emit('\n{} = "tfr.build_list"({})'.format(list_val, ', '.join(vals)))
self._emit_with_loc(' : ({}) -> {}'.format(', '.join(tys), out_type), node)
return (list_val, out_type)
def visit_Tuple(self, node):
return [self.visit(elt) for elt in node.elts]
def visit_UnaryOp(self, node):
value, ty = self.visit(node.operand)
if isinstance(node.op, ast.USub):
zero_value = self._ssa_name('zero')
self._emit_with_loc('\n{} = constant 0 : {}'.format(zero_value, ty), node)
ssa_value = self._ssa_name('cst')
if ty == TFRTypes.I32 or ty == TFRTypes.I64:
self._emit_with_loc(
'\n{} = subi {}, {} : {}'.format(ssa_value, zero_value, value, ty),
node)
elif ty == TFRTypes.F32:
self._emit_with_loc(
'\n{} = subf {}, {} : {}'.format(ssa_value, zero_value, value, ty),
node)
else:
raise NotImplementedError('USub type not recognized: ' + str(ty))
return ssa_value, ty
raise NotImplementedError('USub operator not recognized')
def visit_For(self, node):
raise NotImplementedError('For operator not recognized')
def visit_While(self, node):
raise NotImplementedError('While operator not recognized')
def visit_Try(self, node):
# Only handles the body of the try statement.
self.visit_block(node.body)
def _apply_py_to_tf_passes(node, ctx):
"""Apply transformations from PyToTF to match tf.function tracing."""
# TODO(fengliuai): we don't know which passes are required, thus we evalute
# each one when the corresponding node is handled.
# copied from PyToTF.transform_ast
node = return_statements.transform(node, ctx, False)
node = control_flow.transform(node, ctx)
return node
class TfrGen(transpiler.GenericTranspiler):
"""Transforms Python objects into TFR MLIR source code."""
def __init__(self, op_defs):
self._op_defs = op_defs
def transform_ast(self, node, ctx):
node = _apply_py_to_tf_passes(node, ctx)
# TODO(mdan): Enable this.
# node = anf.transform(node, ctx)
graphs = cfg.build(node)
node = qual_names.resolve(node)
node = activity.resolve(node, ctx)
node = reaching_definitions.resolve(node, ctx, graphs)
node = reaching_fndefs.resolve(node, ctx, graphs)
node = type_inference.resolve(node, ctx, graphs,
TFRTypeResolver(self._op_defs))
mlir_generator = TFRGen(ctx, self._op_defs)
mlir_generator.visit(node)
return mlir_generator.code_buffer
def tfr_gen(func, op_defs):
"""Parse a function and emit the TFR functions."""
mlir_code, _ = TfrGen(op_defs).transform(func, None)
assert tfr.verify(mlir_code), 'mlir code not verified: {}'.format(mlir_code)
return mlir_code
def tfr_gen_from_module(source, method_prefix=None, op_libraries=None):
"""Parse a python code and emit the TFR functions from a target class."""
op_defs = OpDefCache()
if op_libraries:
for m in op_libraries:
lib_dir = os.path.dirname(m.__file__)
prefix_len = len('gen_')
lib_name = os.path.basename(m.__file__)[prefix_len:].replace('.py', '.so')
# Load the op library so the op is added to the op registry. This is
# required when the op cc_library couldn't be statically linked in open
# source.
# This is a no op if the op shared library couldn't be found in the same
# directory of the op Python API.
load_library.load_op_library(os.path.join(lib_dir, lib_name))
mlir_funcs = [
tfr_gen(func, op_defs)
for name, func in tf_inspect.getmembers(source, tf_inspect.isfunction)
if not method_prefix or name.startswith(method_prefix)
]
return '\n'.join(mlir_funcs + op_defs.mlir_external_funcs())
|
karllessard/tensorflow
|
tensorflow/compiler/mlir/tfr/python/tfr_gen.py
|
Python
|
apache-2.0
| 49,382
|
[
"VisIt"
] |
c2734f667d1452dc2b6cd007c423a9d474077404cc6193b27895fbaf695bd278
|
import numpy as np
import sklearn.decomposition as decomp
import scipy.optimize as opt
def get_fit_on_sta(sta_array):
"""fit sta to get spaital and temporal sta and also gaussian fit
Input:
sta_array: array of shape (# units, stim size 0, stim size 1, # color channels, # frames)
Output:
spatial_sta: array of shape (# units, stim size 0, stim size 1)
temporal_sta: array of shape (# units, # color channels, # frames)
gaussian_fit: array of shape (# units, 6)
column 1: amplitude of gaussian fit
column 2,3: x, y location
column 4,5: x, y sd size
column 6: angle
"""
n_units, stim_size0, stim_size1, n_channels, n_frames = sta_array.shape
spatial_sta = np.zeros((n_units, stim_size0, stim_size1))
temporal_sta = np.zeros((n_units, n_channels, n_frames))
gaussian_fit = np.zeros((n_units, 6))
max_per_frame = np.max(np.abs(sta_array[:,:,:,1]-0.5), (1,2))
max_frames = max_per_frame.argmax(1)
max_val = max_per_frame.max(1)
peak_frame = int(np.median(max_frames))
frames_in = np.arange(peak_frame-1, peak_frame+2)
for j in range(sta_array.shape[0]):
spatial_sta[j], temp_, gau_ =fit_sta(sta_array[j], frames_in)
if temp_ is not None:
temporal_sta[j] = temp_
gaussian_fit[j] = gau_
temporal_sta = denoise_sta(temporal_sta, frames_in)
return spatial_sta, temporal_sta, gaussian_fit
def fit_sta(sta, frames_in):
stim_size0, stim_size1, n_channel, n_frames = sta.shape
# center it
sta = sta - 0.5
# get spatial sta
spatial_sta = np.mean(sta[:, :, 1, frames_in], 2)
# Create x and y indices for grid for Gaussian fit
x = np.arange(0, stim_size1, 1)
y = np.arange(0, stim_size0, 1)
x, y = np.meshgrid(x, y)
# Get initial guess for Gaussian parameters (helps with fitting)
this_STA = spatial_sta.reshape(-1)
init_amp = this_STA[np.argmax(np.abs(this_STA))] # get amplitude guess from most extreme (max or min) amplitude of this_STA
init_x,init_y = np.unravel_index(np.argmax(np.abs(this_STA)),(stim_size0, stim_size1)) # guess center of Gaussian as indices of most extreme (max or min) amplitude
initial_guess = (init_amp,init_y,init_x,1,1,0)
# Try to fit, if it doesn't converge, log that cell
try:
popt, pcov = opt.curve_fit(twoD_Gaussian, (x, y), this_STA, initial_guess)
gaussian_param = np.copy(popt)
gaussian_param[3:5] = np.abs(popt[3:5]) # sometimes sds are negative (in Gaussian def above, they're always squared)
# sign of fit
sign = np.sign(gaussian_param[0])
# get temporal sta
gaussian_image = twoD_Gaussian((x,y), *gaussian_param).reshape(stim_size0, stim_size1)
temporal_sta = sign*np.sum(sta*gaussian_image[:,:,None,None], (0, 1))
except:
temporal_sta = None
gaussian_param = None
return spatial_sta, temporal_sta, gaussian_param
def twoD_Gaussian(xdata_tuple, amplitude, xo, yo, sigma_x, sigma_y, theta):
## Define 2D Gaussian that we'll fit to spatial STAs
(x, y) = xdata_tuple
xo = float(xo)
yo = float(yo)
a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2)
b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2)
c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2)
g = amplitude*np.exp( - (a*((x-xo)**2) + 2*b*(x-xo)*(y-yo)+c*((y-yo)**2)))
return g.ravel()
def denoise_sta(temporal_sta, frames_in):
temporal_sta_reshaped = temporal_sta.reshape(-1, temporal_sta.shape[2])
temp_sta_mean = np.mean(temporal_sta_reshaped, 1, keepdims=True)
temp_sta_std = np.std(temporal_sta_reshaped, 1, keepdims=True)
temp_sta_std[temp_sta_std==0] = 1
temporal_sta_std = (temporal_sta_reshaped - temp_sta_mean)/temp_sta_std
good_sta = temporal_sta_reshaped[np.abs(temporal_sta_std[:, frames_in]).max(1) > 3]
if len(good_sta) <= 3:
idx = np.argsort(np.abs(temporal_sta_std[:, frames_in]).max(1))[::-1][:5]
good_sta = temporal_sta_reshaped[idx]
pca = decomp.PCA(n_components = 3)
pca.fit(good_sta)
temporal_sta_denoised = np.zeros_like(temporal_sta)
for j in range(temporal_sta.shape[1]):
temp_ = temporal_sta[:,j]
temporal_sta_denoised[:,j] = pca.inverse_transform(pca.transform(temp_))
return temporal_sta_denoised
|
paninski-lab/yass
|
src/yass/rf/sta_fit.py
|
Python
|
apache-2.0
| 4,548
|
[
"Gaussian"
] |
30a6a4f9f20f1cf0805dd07a13ea1f3a0d8447de3b2bd000b1e29c9e646186f5
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Deposition data model classes.
Classes for wrapping BibWorkflowObject and friends to make it easier to
work with the data attributes.
"""
from uuid import uuid4
import json
import os
from datetime import datetime
from dateutil.tz import tzutc
from sqlalchemy.orm.exc import NoResultFound
from werkzeug.datastructures import MultiDict
from werkzeug.utils import secure_filename
from flask import redirect, render_template, flash, url_for, request, \
session, current_app
from flask_login import current_user
from flask_restful import fields, marshal
from invenio.ext.restful import UTCISODateTime
from invenio.base.helpers import unicodifier
from invenio.ext.sqlalchemy import db
from invenio.modules.workflows.models import BibWorkflowObject, Workflow, \
ObjectVersion
from invenio.modules.workflows.engine import WorkflowStatus
from .form import CFG_FIELD_FLAGS, DataExporter
from .signals import file_uploaded
from .storage import Storage, DepositionStorage
#
# Exceptions
#
class DepositionError(Exception):
"""Base class for deposition errors."""
pass
class InvalidDepositionType(DepositionError):
"""Raise when a deposition type cannot be found."""
pass
class InvalidDepositionAction(DepositionError):
"""Raise when deposition is in an invalid state for action."""
pass
class DepositionDoesNotExists(DepositionError):
"""Raise when a deposition does not exists."""
pass
class DraftDoesNotExists(DepositionError):
"""Raise when a draft does not exists."""
pass
class FormDoesNotExists(DepositionError):
"""Raise when a draft does not exists."""
pass
class FileDoesNotExists(DepositionError):
"""Raise when a draft does not exists."""
pass
class DepositionNotDeletable(DepositionError):
"""Raise when a deposition cannot be deleted."""
pass
class FilenameAlreadyExists(DepositionError):
"""Raise when an identical filename is already present in a deposition."""
pass
class ForbiddenAction(DepositionError):
"""Raise when action on a deposition, draft or file is not authorized."""
pass
class InvalidApiAction(DepositionError):
"""Raise when an invalid API action is requested."""
pass
#
# Helpers
#
class FactoryMixin(object):
"""Mix-in class to help create objects from persisted object state."""
@classmethod
def factory(cls, state, *args, **kwargs):
obj = cls(*args, **kwargs)
obj.__setstate__(state)
return obj
#
# Primary classes
#
class DepositionType(object):
"""
A base class for the deposition types to ensure certain
properties are defined on each type.
A deposition type is just a BibWorkflow with a couple of extra methods.
To customize rendering behavior of the workflow for a given deposition type
you can override the render_error(), render_step() and render_completed()
methods.
"""
workflow = []
""" Workflow definition """
name = ""
""" Display name for this deposition type """
name_plural = ""
""" Plural version of display name for this deposition type """
enabled = False
""" Determines if type is enabled - TODO: REMOVE"""
default = False
"""
Determines if type is the default - warnings are issed if conflicts exsists
TODO: remove
"""
deletable = False
"""
Determine if a deposition is deletable after submission.
"""
editable = False
"""
Determine if a deposition is editable after submission.
"""
stopable = False
"""
Determine if a deposition workflow can be stopped (i.e. discard changes).
"""
group = None
""" Name of group to include this type in. """
api = False
"""
Determines if API is enabled for this type (requires workflow to be
compatible with the API).
"""
draft_definitions = {'_default': None}
"""
Dictionary of all drafts for this deposition type
"""
marshal_file_fields = dict(
checksum=fields.String,
filename=fields.String(attribute='name'),
id=fields.String(attribute='uuid'),
filesize=fields.String(attribute='size'),
)
""" REST API structure of a file """
marshal_draft_fields = dict(
metadata=fields.Raw(attribute='values'),
completed=fields.Boolean,
id=fields.String,
)
""" REST API structure of a draft """
marshal_deposition_fields = dict(
id=fields.Integer,
title=fields.String,
created=UTCISODateTime,
modified=UTCISODateTime,
owner=fields.Integer(attribute='user_id'),
state=fields.String,
submitted=fields.Boolean,
files=fields.Nested(marshal_file_fields),
drafts=fields.Nested(marshal_draft_fields, attribute='drafts_list'),
)
""" REST API structure of a deposition """
@classmethod
def default_draft_id(cls, deposition):
return '_default'
@classmethod
def render_error(cls, dummy_deposition):
"""
Render a page when deposition had an workflow error.
Method can be overwritten by subclasses to provide custom
user interface.
"""
flash('%(name)s deposition has returned error.' %
{'name': cls.name}, 'error')
return redirect(url_for('.index'))
@classmethod
def render_step(self, deposition):
"""
Render a page for a given deposition step.
Method can be overwritten by subclasses to provide custom
user interface.
"""
ctx = deposition.get_render_context()
if ctx:
return render_template(**ctx)
else:
return render_template('deposit/error.html', **dict(
depostion=deposition,
deposition_type=(
None if deposition.type.is_default()
else deposition.type.get_identifier()
),
uuid=deposition.id,
my_depositions=Deposition.get_depositions(
current_user, type=deposition.type
),
))
@classmethod
def render_completed(cls, dummy_deposition):
"""
Render page when deposition was successfully completed (i.e workflow
just finished successfully).
Method can be overwritten by subclasses to provide custom
user interface.
"""
flash('%(name)s was successfully finished.' %
{'name': cls.name}, 'success')
return redirect(url_for('.index'))
@classmethod
def render_final(cls, deposition):
"""
Render page when deposition was *already* successfully completed (i.e
a finished workflow is being executed a second time).
This allows you render e.g. a preview of the record. The distinction
between render_completed and render_final is primarily useful for the
REST API (see api_final and api_completed)
Method can be overwritten by subclasses to provide custom
user interface.
"""
return cls.render_completed(deposition)
@classmethod
def api_completed(cls, deposition):
"""
Workflow just finished processing so return an 202 Accepted, since
usually further background processing may happen.
"""
return deposition.marshal(), 202
@classmethod
def api_final(cls, deposition):
"""
Workflow already finished, and the user tries to re-execute the
workflow, so send a 400 Bad Request back.
"""
return dict(
message="Deposition workflow already completed",
status=400,
), 400
@classmethod
def api_step(cls, deposition):
"""
Workflow was halted during processing. The workflow task that halted
processing is expected to provide a response to send back to the
client.
The default response code is 500 Internal Server Error. A workflow task
is expected to use Deposition.set_render_context() with a dictionary
which is returned to the client. Set the key 'status', to change the
status code, e.g.::
d.set_render_context(dict(status=400, message="Bad request"))
If no response is provided by the workflow task, it is regarded as
an internal server error.
"""
ctx = deposition.get_render_context()
if ctx:
return ctx.get('response', {}), ctx.get('status', 500)
return cls.api_error(deposition)
@classmethod
def api_error(cls, deposition):
return dict(message='Internal Server Error', status=500), 500
@classmethod
def api_action(cls, deposition, action_id):
if action_id == 'run':
return deposition.run_workflow(headless=True)
elif action_id == 'reinitialize':
deposition.reinitialize_workflow()
return deposition.run_workflow(headless=True)
elif action_id == 'stop':
deposition.stop_workflow()
return deposition.run_workflow(headless=True)
raise InvalidApiAction(action_id)
@classmethod
def api_metadata_schema(cls, draft_id):
"""
Get the input validation schema for this draft_id
Allows you to override API defaults.
"""
from wtforms.fields.core import FieldList, FormField
if draft_id in cls.draft_definitions:
schema = dict()
formclass = cls.draft_definitions[draft_id]
for fname, fclass in formclass()._fields.items():
if isinstance(fclass, FieldList):
schema[fname] = dict(type='list')
elif isinstance(fclass, FormField):
schema[fname] = dict(type='dict')
else:
schema[fname] = dict(type='any')
return dict(type='dict', schema=schema)
return None
@classmethod
def marshal_deposition(cls, obj):
"""
Generate a JSON representation for REST API of a Deposition
"""
return marshal(obj, cls.marshal_deposition_fields)
@classmethod
def marshal_draft(cls, obj):
"""
Generate a JSON representation for REST API of a DepositionDraft
"""
return marshal(obj, cls.marshal_draft_fields)
@classmethod
def marshal_file(cls, obj):
"""
Generate a JSON representation for REST API of a DepositionFile
"""
return marshal(obj, cls.marshal_file_fields)
@classmethod
def authorize(cls, deposition, action):
if action == 'create':
return True # Any authenticated user
elif action == 'delete':
if deposition.has_sip():
return True # deposition.type.deletable
return True
elif action == 'reinitialize':
return deposition.type.editable
elif action == 'stop':
return deposition.type.stopable
elif action in ['add_file', 'remove_file', 'sort_files']:
# Don't allow to add/remove/sort files after first submission
return not deposition.has_sip()
elif action in ['add_draft', ]:
# Allow adding drafts when inprogress (independent of SIP exists
# or not).
return deposition.state == 'inprogress'
else:
return not deposition.has_sip()
@classmethod
def authorize_draft(cls, deposition, draft, action):
if action == 'update':
# If deposition allows adding a draft, then allow editing the
# draft.
return cls.authorize(deposition, 'add_draft')
return cls.authorize(deposition, 'add_draft')
@classmethod
def authorize_file(cls, deposition, deposition_file, action):
return cls.authorize(deposition, 'add_file')
@classmethod
def get_identifier(cls):
""" Get type identifier (identical to workflow name) """
return cls.__name__
@classmethod
def is_enabled(cls):
""" Check if workflow is enabled """
# Wrapping in a method to eventually allow enabling/disabling
# via configuration.
return cls.enabled
@classmethod
def is_default(cls):
""" Check if workflow is the default """
# Wrapping in a method to eventually allow configuration
# via configuration.
return cls.default
@classmethod
def run_workflow(cls, deposition):
"""
Run workflow for the given BibWorkflowObject.
Usually not invoked directly, but instead indirectly through
Deposition.run_workflow().
"""
if deposition.workflow_object.workflow is None or (
deposition.workflow_object.version == ObjectVersion.INITIAL
and
deposition.workflow_object.workflow.status ==
WorkflowStatus.NEW):
return deposition.workflow_object.start_workflow(
workflow_name=cls.get_identifier(),
id_user=deposition.workflow_object.id_user,
module_name="webdeposit"
)
else:
return deposition.workflow_object.continue_workflow(
start_point="restart_task",
)
@classmethod
def reinitialize_workflow(cls, deposition):
# Only reinitialize if really needed (i.e. you can only
# reinitialize a fully completed workflow).
wo = deposition.workflow_object
if wo.version == ObjectVersion.COMPLETED and \
wo.workflow.status == WorkflowStatus.COMPLETED:
wo.version = ObjectVersion.INITIAL
wo.workflow.status = WorkflowStatus.NEW
# Clear deposition drafts
deposition.drafts = {}
@classmethod
def stop_workflow(cls, deposition):
# Only stop workflow if really needed
wo = deposition.workflow_object
if wo.version != ObjectVersion.COMPLETED and \
wo.workflow.status != WorkflowStatus.COMPLETED:
# Only workflows which has been fully completed once before
# can be stopped
if deposition.has_sip():
wo.version = ObjectVersion.COMPLETED
wo.workflow.status = WorkflowStatus.COMPLETED
# Clear all drafts
deposition.drafts = {}
# Set title - FIXME: find better way to set title
sip = deposition.get_latest_sip(sealed=True)
title = sip.metadata.get('title', 'Untitled')
deposition.title = title
@classmethod
def all(cls):
""" Get a dictionary of deposition types """
from .registry import deposit_types
return deposit_types.mapping()
@classmethod
def get(cls, identifier):
try:
return cls.all()[identifier]
except KeyError:
raise InvalidDepositionType(identifier)
@classmethod
def keys(cls):
""" Get a list of deposition type names """
return cls.all().keys()
@classmethod
def values(cls):
""" Get a list of deposition type names """
return cls.all().values()
@classmethod
def get_default(cls):
""" Get a list of deposition type names """
from .registry import deposit_default_type
return deposit_default_type.get()
def __unicode__(self):
""" Return a name for this class """
return self.get_identifier()
class DepositionFile(FactoryMixin):
"""
Represents an uploaded file
Creating a normal deposition file::
uploaded_file = request.files['file']
filename = secure_filename(uploaded_file.filename)
backend = DepositionStorage(deposition_id)
d = DepositionFile(backend=backend)
d.save(uploaded_file, filename)
Creating a chunked deposition file::
uploaded_file = request.files['file']
filename = secure_filename(uploaded_file.filename)
chunk = request.files['chunk']
chunks = request.files['chunks']
backend = ChunkedDepositionStorage(deposition_id)
d = DepositionFile(id=file_id, backend=backend)
d.save(uploaded_file, filename, chunk, chunks)
if chunk == chunks:
d.save(finish=True, filename=filename)
Reading a file::
d = DepositionFile.from_json(data)
if d.is_local():
send_file(d.get_syspath())
else:
redirect(d.get_url())
d.delete()
Deleting a file::
d = DepositionFile.from_json(data)
d.delete()
"""
def __init__(self, uuid=None, backend=None):
self.uuid = uuid or str(uuid4())
self._backend = backend
self.name = ''
def __getstate__(self):
# TODO: Add content_type attributes
return dict(
id=self.uuid,
path=self.path,
name=self.name,
size=self.size,
checksum=self.checksum,
#bibdoc=self.bibdoc
)
def __setstate__(self, state):
self.uuid = state['id']
self._path = state['path']
self.name = state['name']
self.size = state['size']
self.checksum = state['checksum']
def __repr__(self):
data = self.__getstate__()
del data['path']
return json.dumps(data)
@property
def backend(self):
if not self._backend:
self._backend = Storage(None)
return self._backend
@property
def path(self):
if self._path is None:
raise Exception("No path set")
return self._path
def save(self, incoming_file, filename=None, *args, **kwargs):
self.name = secure_filename(filename or incoming_file.filename)
(self._path, self.size, self.checksum, result) = self.backend.save(
incoming_file, filename, *args, **kwargs
)
return result
def delete(self):
""" Delete the file on storage """
return self.backend.delete(self.path)
def is_local(self):
""" Determine if file is a local file """
return self.backend.is_local(self.path)
def get_url(self):
""" Get a URL for the file """
return self.backend.get_url(self.path)
def get_syspath(self):
""" Get a local system path to the file """
return self.backend.get_syspath(self.path)
class DepositionDraftCacheManager(object):
"""
Draft cache manager takes care of storing draft values in the cache prior
to a workflow being run. The data can be loaded by the prefill_draft()
workflow task.
"""
def __init__(self, user_id):
self.user_id = user_id
self.data = {}
@classmethod
def from_request(cls):
"""
Create a new draft cache from the current request.
"""
obj = cls(current_user.get_id())
# First check if we can get it via a json
data = request.get_json(silent=True)
if not data:
# If, not simply merge all both query parameters and request body
# parameters.
data = request.values.to_dict()
obj.data = data
return obj
@classmethod
def get(cls):
obj = cls(current_user.get_id())
obj.load()
return obj
def save(self):
""" Save data to session """
if self.has_data():
session['deposit_prefill'] = self.data
session.modified = True
else:
self.delete()
def load(self):
""" Load data from session """
self.data = session.get('deposit_prefill', {})
def delete(self):
""" Delete data in session """
if 'deposit_prefill' in session:
del session['deposit_prefill']
session.modified = True
def has_data(self):
"""
Determine if the cache has data.
"""
return bool(self.data)
def fill_draft(self, deposition, draft_id, clear=True):
"""
Fill a draft with cached draft values
"""
draft = deposition.get_or_create_draft(draft_id)
draft.process(self.data)
if clear:
self.data = {}
self.delete()
return draft
class DepositionDraft(FactoryMixin):
"""
Represents the state of a form
"""
def __init__(self, draft_id, form_class=None, deposition_ref=None):
self.id = draft_id
self.completed = False
self.form_class = form_class
self.values = {}
self.flags = {}
self._form = None
# Back reference to the depositions
self._deposition_ref = deposition_ref
self.validate = False
def __getstate__(self):
return dict(
completed=self.completed,
values=self.values,
flags=self.flags,
validate=self.validate,
)
def __setstate__(self, state):
self.completed = state['completed']
self.form_class = None
if self._deposition_ref:
self.form_class = self._deposition_ref.type.draft_definitions.get(
self.id
)
self.values = state['values']
self.flags = state['flags']
self.validate = state.get('validate', True)
def is_completed(self):
return self.completed
def has_form(self):
return self.form_class is not None
def authorize(self, action):
if not self._deposition_ref:
return True # Not connected to deposition so authorize anything.
return self._deposition_ref.type.authorize_draft(
self._deposition_ref, self, action
)
def complete(self):
"""
Set state of draft to completed.
"""
self.completed = True
def update(self, form):
"""
Update draft values and flags with data from form.
"""
data = dict((key, value) for key, value in form.data.items()
if value is not None)
self.values = data
self.flags = form.get_flags()
def process(self, data, complete_form=False):
"""
Process, validate and store incoming form data and return response.
"""
if not self.authorize('update'):
raise ForbiddenAction('update', self)
if not self.has_form():
raise FormDoesNotExists(self.id)
# The form is initialized with form and draft data. The original
# draft_data is accessible in Field.object_data, Field.raw_data is the
# new form data and Field.data is the processed form data or the
# original draft data.
#
# Behind the scences, Form.process() is called, which in turns call
# Field.process_data(), Field.process_formdata() and any filters
# defined.
#
# Field.object_data contains the value of process_data(), while
# Field.data contains the value of process_formdata() and any filters
# applied.
form = self.get_form(formdata=data)
# Run form validation which will call Field.pre_valiate(),
# Field.validators, Form.validate_<field>() and Field.post_validate().
# Afterwards Field.data has been validated and any errors will be
# present in Field.errors.
validated = form.validate()
# Call Form.run_processors() which in turn will call
# Field.run_processors() that allow fields to set flags (hide/show)
# and values of other fields after the entire formdata has been
# processed and validated.
validated_flags, validated_data, validated_msgs = (
form.get_flags(), form.data, form.messages
)
form.post_process(formfields=[] if complete_form else data.keys())
post_processed_flags, post_processed_data, post_processed_msgs = (
form.get_flags(), form.data, form.messages
)
# Save form values
self.update(form)
# Build result dictionary
process_field_names = None if complete_form else data.keys()
# Determine if some fields where changed during post-processing.
changed_values = dict(
(name, value) for name, value in post_processed_data.items()
if validated_data[name] != value
)
# Determine changed flags
changed_flags = dict(
(name, flags) for name, flags in post_processed_flags.items()
if validated_flags.get(name, []) != flags
)
# Determine changed messages
changed_msgs = dict(
(name, messages) for name, messages in post_processed_msgs.items()
if validated_msgs.get(name, []) != messages
or process_field_names is None or name in process_field_names
)
result = {}
if changed_msgs:
result['messages'] = changed_msgs
if changed_values:
result['values'] = changed_values
if changed_flags:
for flag in CFG_FIELD_FLAGS:
fields = [
(name, flag in field_flags)
for name, field_flags in changed_flags.items()
]
result[flag + '_on'] = map(
lambda x: x[0], filter(lambda x: x[1], fields)
)
result[flag + '_off'] = map(
lambda x: x[0], filter(lambda x: not x[1], fields)
)
return form, validated, result
def get_form(self, formdata=None, load_draft=True,
validate_draft=False):
"""
Create form instance with draft data and form data if provided.
:param formdata: Incoming form data.
:param files: Files to ingest into form
:param load_draft: True to initialize form with draft data.
:param validate_draft: Set to true to validate draft data, when no form
data is provided.
"""
if not self.has_form():
raise FormDoesNotExists(self.id)
# If a field is not present in formdata, Form.process() will assume it
# is blank instead of using the draft_data value. Most of the time we
# are only submitting a single field in JSON via AJAX requests. We
# therefore reset non-submitted fields to the draft_data value with
# form.reset_field_data().
# WTForms deal with unicode - we deal with UTF8 so convert all
draft_data = unicodifier(self.values) if load_draft else {}
formdata = MultiDict(formdata or {})
form = self.form_class(
formdata=formdata, **draft_data
)
if formdata:
form.reset_field_data(exclude=formdata.keys())
# Set field flags
if load_draft and self.flags:
form.set_flags(self.flags)
# Ingest files in form
if self._deposition_ref:
form.files = self._deposition_ref.files
else:
form.files = []
if validate_draft and draft_data and formdata is None:
form.validate()
return form
@classmethod
def merge_data(cls, drafts):
"""
Merge data of multiple drafts
Duplicate keys will be overwritten without warning.
"""
data = {}
# Don't include *) disabled fields, and *) empty optional fields
func = lambda f: not f.flags.disabled and (f.flags.required or f.data)
for d in drafts:
if d.has_form():
visitor = DataExporter(
filter_func=func
)
visitor.visit(d.get_form())
data.update(visitor.data)
else:
data.update(d.values)
return data
class Deposition(object):
"""
Wraps a BibWorkflowObject
Basically an interface to work with BibWorkflowObject data attribute in an
easy manner.
"""
def __init__(self, workflow_object, type=None, user_id=None):
self.workflow_object = workflow_object
if not workflow_object:
self.files = []
self.drafts = {}
self.type = self.get_type(type)
self.title = ''
self.sips = []
self.workflow_object = BibWorkflowObject.create_object(
id_user=user_id,
)
# Ensure default data is set for all objects.
self.update()
else:
self.__setstate__(workflow_object.get_data())
self.engine = None
#
# Properties proxies to BibWorkflowObject
#
@property
def id(self):
return self.workflow_object.id
@property
def user_id(self):
return self.workflow_object.id_user
@user_id.setter
def user_id(self, value):
self.workflow_object.id_user = value
self.workflow_object.workflow.id_user = value
@property
def created(self):
return self.workflow_object.created
@property
def modified(self):
return self.workflow_object.modified
@property
def drafts_list(self):
# Needed for easy marshaling by API
return self.drafts.values()
#
# Proxy methods
#
def authorize(self, action):
"""
Determine if certain action is authorized
Delegated to deposition type to allow overwriting default behavior.
"""
return self.type.authorize(self, action)
#
# Serialization related methods
#
def marshal(self):
"""
API representation of an object.
Delegated to the DepositionType, to allow overwriting default
behaviour.
"""
return self.type.marshal_deposition(self)
def __getstate__(self):
"""
Serialize deposition state for storing in the BibWorkflowObject
"""
# The bibworkflow object id and owner is implicit, as the Deposition
# object only wraps the data attribute of a BibWorkflowObject.
# FIXME: Find better solution for setting the title.
for d in self.drafts.values():
if 'title' in d.values:
self.title = d.values['title']
break
return dict(
type=self.type.get_identifier(),
title=self.title,
files=[f.__getstate__() for f in self.files],
drafts=dict(
[(d_id, d.__getstate__()) for d_id, d in self.drafts.items()]
),
sips=[f.__getstate__() for f in self.sips],
)
def __setstate__(self, state):
"""
Deserialize deposition from state stored in BibWorkflowObject
"""
self.type = DepositionType.get(state['type'])
self.title = state['title']
self.files = [
DepositionFile.factory(
f_state,
uuid=f_state['id'],
backend=DepositionStorage(self.id),
)
for f_state in state['files']
]
self.drafts = dict(
[(d_id, DepositionDraft.factory(d_state, d_id,
deposition_ref=self))
for d_id, d_state in state['drafts'].items()]
)
self.sips = [
SubmissionInformationPackage.factory(s_state, uuid=s_state['id'])
for s_state in state.get('sips', [])
]
#
# Persistence related methods
#
def update(self):
"""
Update workflow object with latest data.
"""
data = self.__getstate__()
# BibWorkflow calls get_data() before executing any workflow task, and
# and calls set_data() after. Hence, unless we update the data
# attribute it will be overwritten.
try:
self.workflow_object.data = data
except AttributeError:
pass
self.workflow_object.set_data(data)
def reload(self):
"""
Get latest data from workflow object
"""
self.__setstate__(self.workflow_object.get_data())
def save(self):
"""
Save the state of the deposition.
Uses the __getstate__ method to make a JSON serializable
representation which, sets this as data on the workflow object
and saves it.
"""
self.update()
self.workflow_object.save()
def delete(self):
"""
Delete the current deposition
"""
if not self.authorize('delete'):
raise DepositionNotDeletable(self)
for f in self.files:
f.delete()
if self.workflow_object.id_workflow:
Workflow.delete(uuid=self.workflow_object.id_workflow)
BibWorkflowObject.query.filter_by(
id_workflow=self.workflow_object.id_workflow
).delete()
else:
db.session.delete(self.workflow_object)
db.session.commit()
#
# Workflow execution
#
def run_workflow(self, headless=False):
"""
Execute the underlying workflow
If you made modifications to the deposition you must save if before
running the workflow, using the save() method.
"""
if self.workflow_object.workflow is not None:
current_status = self.workflow_object.workflow.status
if current_status == WorkflowStatus.COMPLETED:
return self.type.api_final(self) if headless \
else self.type.render_final(self)
self.update()
self.engine = self.type.run_workflow(self)
self.reload()
status = self.engine.status
if status == WorkflowStatus.ERROR:
return self.type.api_error(self) if headless else \
self.type.render_error(self)
elif status != WorkflowStatus.COMPLETED:
return self.type.api_step(self) if headless else \
self.type.render_step(self)
elif status == WorkflowStatus.COMPLETED:
return self.type.api_completed(self) if headless else \
self.type.render_completed(self)
def reinitialize_workflow(self):
"""
Reinitialize a workflow object (i.e. prepare it for editing)
"""
if self.state != 'done':
raise InvalidDepositionAction("Action only allowed for "
"depositions in state 'done'.")
if not self.authorize('reinitialize'):
raise ForbiddenAction('reinitialize', self)
self.type.reinitialize_workflow(self)
def stop_workflow(self):
"""
Stop a running workflow object (e.g. discard changes while editing).
"""
if self.state != 'inprogress' or not self.submitted:
raise InvalidDepositionAction("Action only allowed for "
"depositions in state 'inprogress'.")
if not self.authorize('stop'):
raise ForbiddenAction('stop', self)
self.type.stop_workflow(self)
def set_render_context(self, ctx):
"""
Set rendering context - used in workflow tasks to set what is to be
rendered (either by API or UI)
"""
self.workflow_object.deposition_context = ctx
def get_render_context(self):
"""
Get rendering context - used by DepositionType.render_step/api_step
"""
return getattr(self.workflow_object, 'deposition_context', {})
@property
def state(self):
"""
Return simplified workflow state - inprogress, done or error
"""
try:
status = self.workflow_object.workflow.status
if status == WorkflowStatus.ERROR:
return "error"
elif status == WorkflowStatus.COMPLETED:
return "done"
except AttributeError:
pass
return "inprogress"
#
# Draft related methods
#
def get_draft(self, draft_id):
"""
Get draft
"""
if draft_id not in self.drafts:
raise DraftDoesNotExists(draft_id)
return self.drafts[draft_id]
def get_or_create_draft(self, draft_id):
"""
Get or create a draft for given draft_id
"""
if draft_id not in self.drafts:
if draft_id not in self.type.draft_definitions:
raise DraftDoesNotExists(draft_id)
if not self.authorize('add_draft'):
raise ForbiddenAction('add_draft', self)
self.drafts[draft_id] = DepositionDraft(
draft_id,
form_class=self.type.draft_definitions[draft_id],
deposition_ref=self,
)
return self.drafts[draft_id]
def get_default_draft_id(self):
"""
Get the default draft id for this deposition.
"""
return self.type.default_draft_id(self)
#
# Submission information package related methods
#
def get_latest_sip(self, sealed=None):
"""
Get the latest submission information package
:param sealed: Set to true to only returned latest sealed SIP. Set to
False to only return latest unsealed SIP.
"""
if len(self.sips) > 0:
for sip in reversed(self.sips):
if sealed is None:
return sip
elif sealed and sip.is_sealed():
return sip
elif not sealed and not sip.is_sealed():
return sip
return None
def create_sip(self):
"""
Create a new submission information package (SIP) with metadata from
the drafts.
"""
metadata = DepositionDraft.merge_data(self.drafts.values())
metadata['files'] = map(
lambda x: dict(path=x.path, name=os.path.splitext(x.name)[0]),
self.files
)
sip = SubmissionInformationPackage(metadata=metadata)
self.sips.append(sip)
return sip
def has_sip(self, sealed=True):
"""
Determine if deposition has a sealed submission information package.
"""
for sip in self.sips:
if (sip.is_sealed() and sealed) or \
(not sealed and not sip.is_sealed()):
return True
return False
@property
def submitted(self):
return self.has_sip()
#
# File related methods
#
def get_file(self, file_id):
for f in self.files:
if f.uuid == file_id:
return f
return None
def add_file(self, deposition_file):
if not self.authorize('add_file'):
raise ForbiddenAction('add_file', self)
for f in self.files:
if f.name == deposition_file.name:
raise FilenameAlreadyExists(deposition_file.name)
self.files.append(deposition_file)
file_uploaded.send(
self.type.get_identifier(),
deposition=self,
deposition_file=deposition_file,
)
def remove_file(self, file_id):
if not self.authorize('remove_file'):
raise ForbiddenAction('remove_file', self)
idx = None
for i, f in enumerate(self.files):
if f.uuid == file_id:
idx = i
if idx is not None:
return self.files.pop(idx)
return None
def sort_files(self, file_id_list):
"""
Order the files according the list of ids provided to this function.
"""
if not self.authorize('sort_files'):
raise ForbiddenAction('sort_files', self)
search_dict = dict(
[(f, i) for i, f in enumerate(file_id_list)]
)
def _sort_files_cmp(f_x, f_y):
i_x = search_dict.get(f_x.uuid, None)
i_y = search_dict.get(f_y.uuid, None)
if i_x == i_y:
return 0
elif i_x is None or i_x > i_y:
return 1
elif i_y is None or i_x < i_y:
return -1
self.files = sorted(self.files, _sort_files_cmp)
#
# Class methods
#
@classmethod
def get_type(self, type_or_id):
if type_or_id and isinstance(type_or_id, type) and \
issubclass(type_or_id, DepositionType):
return type_or_id
else:
return DepositionType.get(type_or_id) if type_or_id else \
DepositionType.get_default()
@classmethod
def create(cls, user, type=None):
"""
Create a new deposition object.
To persist the deposition, you must call save() on the created object.
If no type is defined, the default deposition type will be assigned.
@param user: The owner of the deposition
@param type: Deposition type identifier.
"""
t = cls.get_type(type)
if not t.authorize(None, 'create'):
raise ForbiddenAction('create')
# Note: it is correct to pass 'type' and not 't' below to constructor.
obj = cls(None, type=type, user_id=user.get_id())
return obj
@classmethod
def get(cls, object_id, user=None, type=None):
"""
Get the deposition with specified object id.
@param object_id: The BibWorkflowObject id.
@param user: Owner of the BibWorkflowObject
@param type: Deposition type identifier.
"""
if type:
type = DepositionType.get(type)
try:
workflow_object = BibWorkflowObject.query.filter(
BibWorkflowObject.id == object_id,
# id_user!=0 means current version, as opposed to some snapshot
# version.
BibWorkflowObject.id_user != 0,
).one()
except NoResultFound:
raise DepositionDoesNotExists(object_id)
if user and workflow_object.id_user != user.get_id() and not 'International Atomic Energy Agency (IAEA)' in user.info['group']:
raise DepositionDoesNotExists(object_id)
obj = cls(workflow_object)
if type and obj.type != type:
raise DepositionDoesNotExists(object_id, type)
return obj
@classmethod
def get_depositions(cls, user=None, type=None, date_from=None, date_to=None):
params = [
Workflow.module_name == 'webdeposit',
]
if user:
params.append(BibWorkflowObject.id_user == user.get_id())
else:
params.append(BibWorkflowObject.id_user != 0)
if type:
params.append(Workflow.name == type.get_identifier())
if date_from:
params.append(BibWorkflowObject.modified > date_from)
if date_to:
params.append(BibWorkflowObject.modified < date_to)
objects = BibWorkflowObject.query.join("workflow").options(
db.contains_eager('workflow')).filter(*params).order_by(
BibWorkflowObject.modified.desc()).all()
def _create_obj(o):
try:
obj = cls(o)
except InvalidDepositionType as err:
current_app.logger.exception(err)
return None
if type is None or obj.type == type:
return obj
return None
return filter(lambda x: x is not None, map(_create_obj, objects))
class SubmissionInformationPackage(FactoryMixin):
"""Submission information package (SIP).
:param uuid: Unique identifier for this SIP
:param metadata: Metadata in JSON for this submission information package
:param package: Full generated metadata for this package (i.e. normally
MARC for records, but could anything).
:param timestamp: UTC timestamp in ISO8601 format of when package was
sealed.
:param agents: List of agents for this package (e.g. creator, ...)
:param task_ids: List of task ids submitted to ingest this package (may be
appended to after SIP has been sealed).
"""
def __init__(self, uuid=None, metadata={}):
self.uuid = uuid or str(uuid4())
self.metadata = metadata
self.package = ""
self.timestamp = None
self.agents = []
self.task_ids = []
def __getstate__(self):
return dict(
id=self.uuid,
metadata=self.metadata,
package=self.package,
timestamp=self.timestamp,
task_ids=self.task_ids,
agents=[a.__getstate__() for a in self.agents],
)
def __setstate__(self, state):
self.uuid = state['id']
self._metadata = state.get('metadata', {})
self.package = state.get('package', None)
self.timestamp = state.get('timestamp', None)
self.agents = [Agent.factory(a_state)
for a_state in state.get('agents', [])]
self.task_ids = state.get('task_ids', [])
def seal(self):
self.timestamp = datetime.now(tzutc()).isoformat()
def is_sealed(self):
return self.timestamp is not None
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, value):
import datetime
import json
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (datetime.datetime, datetime.date)):
encoded_object = obj.isoformat()
else:
encoded_object = json.JSONEncoder.default(self, obj)
return encoded_object
data = json.dumps(value, cls=DateTimeEncoder)
self._metadata = json.loads(data)
class Agent(FactoryMixin):
"""Agent."""
def __init__(self, role=None, from_request_context=False):
self.role = role
self.user_id = None
self.ip_address = None
self.email_address = None
if from_request_context:
self.from_request_context()
def __getstate__(self):
return dict(
role=self.role,
user_id=self.user_id,
ip_address=self.ip_address,
email_address=self.email_address,
)
def __setstate__(self, state):
self.role = state['role']
self.user_id = state['user_id']
self.ip_address = state['ip_address']
self.email_address = state['email_address']
def from_request_context(self):
from flask import request
from invenio.ext.login import current_user
self.ip_address = request.remote_addr
self.user_id = current_user.get_id()
self.email_address = current_user.info.get('email', '')
|
SDSG-Invenio/invenio
|
invenio/modules/deposit/models.py
|
Python
|
gpl-2.0
| 47,636
|
[
"VisIt"
] |
ada5bd9e497553df783e45701698eb9177019007e8413b6fa6f36d48c41dfc7b
|
from DIRAC.Core.Base import Script
Script.parseCommandLine()
import unittest,types,time
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
#from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
testUser = 'atsareg'
testGroup = 'dirac_user'
testDir = '/vo.formation.idgrilles.fr/user/a/atsareg/testdir'
testFile = '/vo.formation.idgrilles.fr/user/a/atsareg/testdir/testfile'
class FileCatalogDBTestCase(unittest.TestCase):
""" Base class for the FileCatalogDB test cases
"""
def setUp(self):
print "Creating FileCatalog Client"
self.fc = FileCatalogClient()
class UserGroupCase(FileCatalogDBTestCase):
def test_userOperations(self):
result = self.fc.addUser( testUser )
self.assert_( result['OK'] )
result = self.fc.getUsers()
self.assert_( result['OK'] )
if result['OK']:
self.assert_( testUser in result['Value'] )
def test_groupOperations(self):
result = self.fc.addGroup( testGroup )
self.assert_( result['OK'] )
result = self.fc.getGroups()
self.assert_( result['OK'] )
if result['OK']:
self.assert_( testGroup in result['Value'] )
class DirectoryCase(FileCatalogDBTestCase):
def test_directoryOperations(self):
result = self.fc.createDirectory( testDir )
if not result['OK']:
print result
self.assert_( result['OK'] )
result = self.fc.changePathOwner( { testDir: {"Owner":testUser} }, False )
if not result['OK']:
print result
self.assert_( result['OK'] )
result = self.fc.changePathGroup( { testDir: {"Group":testGroup} }, False )
if not result['OK']:
print result
self.assert_( result['OK'] )
result = self.fc.isDirectory('/')
if not result['OK']:
print result
self.assert_( result['OK'])
class FileCase(FileCatalogDBTestCase):
def test_fileOperations(self):
"""
this test requires the SE to be properly defined in the CS
"""
from DIRAC import gConfig
testSE = 'testSE'
#rssClient = ResourceStatusClient()
#result = rssClient.getStorageElementsList( 'Read' )
#result = gConfig.getSections( '/Resources/StorageElements' )
#if result['OK'] and result['Value']:
# testSE = result['Value'][0]
#if result['Ok']:
# testSE = result['Value'][ 0 ]
result = self.fc.addFile( { testFile: { 'PFN': 'testfile',
'SE': testSE ,
'Size':0,
'GUID':0,
'Checksum':'0' } } )
self.assert_( result['OK'] )
if gConfig.getValue( '/Resources/StorageElements/%s/AccessProtocol.1/Host' % testSE, '' ):
result = self.fc.getReplicas( testFile )
self.assert_( result['OK'] )
self.assert_( testFile in result['Value']['Successful'] )
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(UserGroupCase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(DirectoryCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(FileCase))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
|
avedaee/DIRAC
|
DataManagementSystem/DB/test/TestFileCatalogDB.py
|
Python
|
gpl-3.0
| 3,226
|
[
"DIRAC"
] |
4838ac5c84c3e9f7e249b7955a2cf586132b2e0449de4ad7cd77a2b875a3f88e
|
#
# READBSC
#
# Routines to read and manipulate the Bright Star Catalog. These sources
# are used for optical pointing measurements, not radio observations.
#
# 2013-Jun-23 DEG
# Converted from original to use PyEphem routines
# 2015-May-29 DG
# Converted from using datime() to using Time() based on astropy
# 2015-Oct-14 DG
# Fixed a couple of bugs, including need to login with user/password
# 2015-Oct-18 DG
# Added "mount" flag to startracktable() to indicate if this is an
# azel mount (default, mount='azel'), or equatorial mount (mount=anything else)
# 2015-Oct-20 DG
# Strange. For some reason, eovsa_ha() did not work because the sources were
# not "computed" for the EOVSA array. Somehow, my earlier changes to some
# routines must have broken this. Now I just do the compute in starobs2dxeldel().
# I realize that I also needed an hadec switch in that routine, to write out
# HA and Dec coordinates for equatorial mounts (see eq_mountcal() in mountcal.py).
# 2016-Jan-09 DG
# Added analyze_stars() routine, to automatically upload image files to
# Astrometry.net, get the solutions, and create the starsolutions file.
# Really cool!
# 2016-Jan-26 DG
# Changed analyze_stars() to skip submission of image if wcs file already
# exists in the directory.
# 2021-May-29 DG
# A couple of small changes to analyze_stars() to account for differences with
# the ZWO software (relative to MaxIm DL). It should work for either. Another
# very important change is to add --crpix-center to the call to astropy.net so
# that the wcs files returned are using the image center as the reference
# pixel! The whole scheme depends on that, so it was failing before...
# 2021-May-31 DG
# Added filter_images() routine, and two helper routines (chkimg and star_shape)
# to find the best images for each star and save them to another folder. It does
# this based on a 2D Gaussian fit to the brightest star and finding the smallest
# area for all of the images of each pointing. One an optionally see the fit
# results.
# 2021-Jul-21 OG
# Added statement to Skip commented out lines in filename in procedure
# starobs2dxeldel
# Must be run from Dropbox/PythonCode/Current directory
from numpy import array, zeros, ones, arange, where, argsort, sort, pi, median, argmin
from util import *
import ephem
import datetime as dt
#from Coordinates import *
#from Astrometry import *
from eovsa_lst import *
from eovsa_array import *
from ftplib import FTP
def readbsc(filename=None):
''' Read entire Bright Star Catalog and return a list of PyEphem sources
'''
if filename is None:
filename = '../Star Pointing/BrightStarCatalog.txt'
try:
f = open(filename,'r')
except:
print 'readbsc: Could not open file',filename
return None
# Read two header lines (and ignore them)
line = f.readline()
line = f.readline()
srcs = []
# Loop over lines in file
for line in f.readlines():
srcs.append(ephem.readdb('HR'+line[1:5]+',f,'+line[6:17]+','+line[18:30]+','+line[53:57]+',2000'))
f.close()
# Do an initial compute so that name, ra, dec, etc. are accessible. Will override with compute for
# observer later.
for src in srcs:
src.compute()
return srcs
def selectbsc(t, srcs, magrange):
''' Given a list of star sources, select from the list based on hour angle,
elevation, and magnitude range provided in magrange as [maglow, maghi]
'''
# Pare down the list by selecting stars within +3 and -9 hours of HA.
# Pare down further by requiring stars within -30 and +45 degrees in Dec
# Finally, find stars with magnitudes between 3.5 and 5
halow = Dec_Angle('-09:00:00','hms').radians
hahi = Dec_Angle('03:00:00','hms').radians
declow = Dec_Angle('-30:00:00','dms').radians
dechi = Dec_Angle('+45:00:00','dms').radians
maglow = magrange[0]
maghi = magrange[1]
# Make a list of good sources (those in the ranges specified)
sel = []
# Loop over sources
for i,src in enumerate(srcs):
# Make sure ha is in range -pi to pi
ha = eovsa_ha(src,t)
dec = src.dec
mag = float(src.mag)
good = (ha > halow) & (ha < hahi) & (dec > declow) & (dec < dechi) \
& (mag > maglow) & (mag < maghi)
if good:
sel.append(i)
# These selected stars should be reachable by the antennas, so now check them
# for angular separation.
nstars = len(sel) # Number of stars so far selected
widesep = zeros((nstars,nstars),bool) # Boolean nstars x nstars array
for i in range(nstars-1):
for j in range(i+1,nstars):
# True where pairs of stars are > 20 deg apart
widesep[i,j] = ephem.separation(srcs[sel[i]],srcs[sel[j]]) > (21.5*pi/180)
widesep[j,i] = widesep[i,j]
# Go through asep array row by row and mark stars for deletion with sep < 20 degrees
idx = ones((nstars),bool)
for i in range(nstars):
if idx[i]:
# Only look at rows for "True" columns
x = widesep[:,i]
x[:i+1] = True # Do not delete stars in lower part of array
idx = idx & x
# This should be a list of remaining good stars
ids = array(sel)[idx]
print len(ids),'stars selected for date/time starting at:',t.iso
print 'Number RA Dec Magnitude'
fmt = '{0:<4} {1:>12} {2:>12} {3:>6}'
for i in ids:
dec = srcs[i].a_dec
decstr = str(srcs[i].a_dec).zfill(10)
if dec < 0:
decstr = '-'+str(srcs[i].a_dec)[1:].zfill(10)
print fmt.format(srcs[i].name[2:],str(srcs[i].a_ra),decstr,srcs[i].mag)
return ids
def getbscnames(num=None, filename=None):
''' Looks up the common star name for each HR number in the list given in array num
'''
if filename is None:
filename = '../Star Pointing/bsc5.dat'
try:
f = open(filename,'r')
except:
print 'getbscnames: Could not open file', filename
return ''
if num is None:
print 'getbscnames: Must specify an ordered list of star numbers'
return ''
line = '9999'
name = []
for i in range(len(num)):
while int(line[0:4]) != num[i]:
line = f.readline()
# This should be the line that matches the star number i
n = line[4:7]
greek = line[7:10]
cnstl = line[10:14]
alt = line[14:24]
if greek == ' ':
# No greek letter
if n == ' ':
# No number, so use alt
name.append(alt) # e.g. 'BD-10 6177' or 'CD-4015285'
else:
# Number, but no greek letter
name.append(n+cnstl) # e.g. ' 80 Peg' or '108 Aqr'
else:
# Greek letter
name.append(greek+cnstl) # e.g. 'Phi Peg' or 'Gam1And'
f.close()
return array(name)
def startracktable(t, names, srcs, ids, npts=25, mount='azel'):
''' Generate an RA_Dec track table for observing this list of stars
'''
nstars = len(ids)
min2radians = RA_Angle('00:01:00','hms').radians
min2mjd = 60.0001/86400. # Slightly greater to avoid annoying times like 59.999.
# Convert radians to antenna controller "user units" of 1/10000th of a degree
r2u = 1800000./pi
ovsa = ephem.Observer()
ovsa.date = t.mjd - 15019.5 # Converts MJD to ephem date
ovsa.lon = '-118.286953'
ovsa.lat = '37.233170'
ovsa.elevation = 1200
ovsa.pressure = 0 # Eliminates refraction due to atmosphere
filename = '../Star Pointing/startracktable.radec'
try:
f = open(filename,'w')
except:
print 'startracktable: Could not open output file',filename
return
# Generate star table file name
datstr = t.iso[:10]
outfile = '../Star Pointing/startable-'+datstr+'.txt'
o = open(outfile,'w')
ha_start = zeros(nstars)
# Loop over sources
for i in range(nstars):
src = srcs[ids[i]]
# First calculate HA
ha_start[i] = eovsa_ha(src,t)
# We now have nstars coordinates of date, with RA converted to an
# initial HA. Now we have to increment over time in steps of ha_minutes
# for a specified number of pointings and identify the stars we want to observe based
# on their Az, El visibility.
# Find first star of interest (the one with the greatest hour angle)
idx = argsort(ha_start)
# Determine sky limits accounting for distance star can travel during
# observation
azlo = RA_Angle('45:00:00','dms').radians
azhi = RA_Angle('325:00:00','dms').radians
ello = Dec_Angle('15:00:00','dms').radians
elhi = Dec_Angle('80:00:00','dms').radians
# HA/Dec limits for equatorial mount antennas
halo = Angle('-55:00:00','dms').radians
hahi = Angle('55:00:00','dms').radians
declo = Dec_Angle('-23:00:00','dms').radians
dechi = Dec_Angle('44:00:00','dms').radians
written = False # Flag to say a star was written
j = idx[nstars-1] # Pointer to current star of interest, for cycling among the stars
nchecked = 0
ha_minutes = 0
# Print to screen
print 'Num Name RA(J2000) Dec(J2000) RA(Date) Dec(Date) Az(deg) El(deg) Mag Time'
print '==== ========== ============ ============ ============ ============ ============ =========== ===== ==========='
# And write to outfile
o.write('Num Name RA(J2000) Dec(J2000) RA(Date) Dec(Date) Az(deg) El(deg) Mag Time \n')
o.write('==== ========== ============ ============ ============ ============ ============ =========== ===== ===========\n')
dha_minutes = 4 # Length of time to stay on each star
firstline = True
# Entire duration is npts pointings, or npts*dha_minutes
while ha_minutes < dha_minutes*npts:
# print 'HA_Minutes is',ha_minutes
for i in range(j,nstars):
nchecked += 1
newt = Time(t.mjd+ha_minutes*min2mjd,format='mjd')
ovsa.date = newt.mjd - 15019.5 # Converts MJD to ephem date
src = srcs[ids[i]]
src.compute(ovsa)
ha = eovsa_ha(src,newt) # updates source to new time
dec = src.dec
# Get Az, El
az, el = src.az, src.alt
azgood = (az > azlo) & (az < azhi)
elgood = (el > ello) & (el < elhi)
# Case of equatorial mount--just use same "good" variables but now
# corresponding to HA and Dec
if mount != 'azel':
azgood = (ha > halo) & (ha < hahi)
elgood = (dec > declo) & (dec < dechi)
if azgood & elgood:
# This star is okay, so create entries for it and
# jump out of loop
rad = int(src.ra*r2u)
decd = int(src.dec*r2u)
mjd = t.mjd + ha_minutes*min2mjd # Time for this entry
mjdint = int(mjd) # Integer part of day
ms = int(round((mjd-mjdint)*86400*1000)) # Time of day as ms
if firstline:
line = str(rad)+' '+str(decd)+' '+str(mjdint)+' '+str(0)+'\n' # Start first line at 0 UT (because why not?)
else:
line = str(rad)+' '+str(decd)+' '+str(mjdint)+' '+str(ms)+'\n'
f.write(line)
mjd = t.mjd + (ha_minutes + 0.010/60.)*min2mjd # Add 10 msec
mjdint = int(mjd) # Integer part of day
ms = int(round((mjd-mjdint)*86400*1000)) # Time of day as ms
if firstline:
line = str(rad)+' '+str(decd)+' '+str(mjdint)+' '+str(10)+'\n'
firstline = False
else:
line = str(rad)+' '+str(decd)+' '+str(mjdint)+' '+str(ms)+'\n'
f.write(line)
mjd = t.mjd + (ha_minutes + dha_minutes - 0.020/60.)*min2mjd # Add dha_minutes - 20 ms
mjdint = int(mjd) # Integer part of day
ms = int(round((mjd-mjdint)*86400*1000)) # Time of day as ms
line = str(rad)+' '+str(decd)+' '+str(mjdint)+' '+str(ms)+'\n'
f.write(line)
mjd = t.mjd + (ha_minutes + dha_minutes - 0.010/60.)*min2mjd # Add dha_minutes - 10 ms
mjdint = int(mjd) # Integer part of day
ms = int(round((mjd-mjdint)*86400*1000)) # Time of day as ms
line = str(rad)+' '+str(decd)+' '+str(mjdint)+' '+str(ms)+'\n'
f.write(line)
dec = src.a_dec
adecstr = str(src.a_dec).zfill(10)
if dec < 0:
adecstr = '-'+str(src.a_dec)[1:].zfill(10)
dec = src.dec
decstr = str(src.dec).zfill(10)
if dec < 0:
decstr = '-'+str(src.dec)[1:].zfill(10)
# Print to screen
fmt = '{0:<4} {1:<10} {2:>12} {3:>12} {4:>12} {5:>12} {6:>12} {7:>11} {8:5.2f} {9:>12}'
print fmt.format(src.name[2:], names[i], str(src.a_ra), adecstr, str(src.ra), decstr, \
str(src.az), str(src.alt), src.mag, newt.iso[:19])
# And write to outfile
fmt += '\n'
o.write(fmt.format(src.name[2:], names[i], str(src.a_ra), adecstr, str(src.ra), decstr, \
str(src.az), str(src.alt), src.mag, newt.iso[:19]))
written = True
# print 'J written =',j
j = i+1 # advance to next star and next time
ha_minutes += dha_minutes
break
if j is nstars:
#print 'J is equal to NSTARS, so setting to zero. Nchecked is ',nchecked
j = 0
if nchecked >= nstars:
#print 'Checked all of the stars for', ha_minutes,'!'
ha_minutes += dha_minutes
nchecked = 0
if (i is nstars-1) and (not written):
#print 'Got through all the stars so far, so go back to beginning'
j = 0
else:
#print 'Got our star. Next J =',j
nchecked = 0
written = False # Reset the "written" flag
# All done, so repeat the last line twice replacing time for end of UT day so antenna will hold on source
f.write(line[:line.rfind(' ')]+' 86399989\n')
f.write(line[:line.rfind(' ')]+' 86399999')
f.close()
o.close()
def starobs2dxeldel(filename=None,hadec=False):
"""Takes the star observations (calculated RA,Dec and measured RA,Dec) and
calculates the differences in (Az, El). The expected file format has the
first three lines, below, and then a series of lines like the third:
Num Name RA(J2000) Dec(J2000) RA(Meas) Dec(Meas) Time(Meas)
==== ========== ============ ============= ============ ============= ==========
37 BD-18 14 0:12:10.000 -17:56:18.000 0:15:54.000 -18:35:23.000 02:38:00
4203 42 LMi 10:45:51.90 30:40:56.0 10:50:11.55 30:56:25.0 05:11:53.208
"""
if filename is None:
filename = '/home/dgary/Dropbox/Python/Star Pointing/2012Jan09_starobs.txt'
f = open(filename, 'r')
lines = f.readlines()
f.close()
idx = filename.find('solutions')
outfile = filename[0:idx]+'reduction'+filename[idx+9:]
o = open(outfile,'w')
line = lines[0]
didx = filename.find('.txt')
datstr = filename[didx-10:didx]
# Loop over the lines in the file
for line in lines[2:]:
if line == '': break
if line[0] == '#': continue
num = int(line[0:4])
name = line[5:15]
RA_J2000 = RA_Angle(line[16:28].strip(),'hms')
Dec_J2000 = Dec_Angle(line[29:42].strip(),'dms')
RA_Obs = RA_Angle(line[43:55].strip(),'hms')
Dec_Obs = Dec_Angle(line[56:69].strip(),'dms')
timstr = line[71:79]
t = Time(datstr+' '+timstr)
# Make a couple of fake sources, one for the nominal position, and one for the observed position
srcnom = ephem.readdb('Nominal'+',f,'+line[16:28].strip()+','+line[29:42].strip()+','+'1.0,2000')
srcobs = ephem.readdb('Observed'+',f,'+line[43:55].strip()+','+line[56:69].strip()+','+'1.0,2000')
# Compute sources for EOVSA array at current time
aa = eovsa_array()
aa.set_jultime(t.jd)
srcnom.compute(aa)
srcobs.compute(aa)
HA_Obs = eovsa_ha(srcnom,t) # Updates srcnom for OVRO location and time Time_Obs)
HA_Obs = eovsa_ha(srcobs,t) # Updates srcobs for OVRO location and time Time_Obs)
if hadec:
ha1 = HA_Obs
dec1 = Dec_Obs.get()
az0 = srcnom.az
el0 = srcnom.alt
az1 = srcobs.az
el1 = srcobs.alt
# Calculate differences (in radians)
d_xel = (az1 - az0) * cos(el1)
d_el = (el1 - el0)
# Write out a new file in the style of KSRBL (for now)
timesec = int((t.mjd-int(t.mjd))*86400)
dra = (srcobs.a_ra - srcnom.a_ra)*180/pi # Degrees
ddec = (srcobs.a_dec - srcnom.a_dec)*180/pi # Degrees
fmt = '{0:<8} {1:5d} {2:7.3f} {3:7.3f} {4:7.3f} {5:7.3f} {6:6.3f} {7:6.3f} {8:8.3f} {9:7.3f} {10:6.3f} {11:6.3f}'
if hadec:
# If the hadec switch is set, write out HA and Dec coordinates (ha1 and dec1)
print fmt.format(name[0:8],timesec,srcnom.a_ra*180/pi, srcnom.a_dec*180/pi,
srcobs.a_ra*180/pi, srcobs.a_dec*180/pi, dra, ddec,
ha1*180/pi, dec1*180/pi, d_xel*180/pi, d_el*180/pi)
o.write(fmt.format(name[0:8],timesec,srcnom.a_ra*180/pi, srcnom.a_dec*180/pi,
srcobs.a_ra*180/pi, srcobs.a_dec*180/pi, dra, ddec,
ha1*180/pi, dec1*180/pi, d_xel*180/pi, d_el*180/pi)+'\n')
else:
# Otherwise, write out AZ and EL coordinates (az1 and el1)
print fmt.format(name[0:8],timesec,srcnom.a_ra*180/pi, srcnom.a_dec*180/pi,
srcobs.a_ra*180/pi, srcobs.a_dec*180/pi, dra, ddec,
az1*180/pi, el1*180/pi, d_xel*180/pi, d_el*180/pi)
o.write(fmt.format(name[0:8],timesec,srcnom.a_ra*180/pi, srcnom.a_dec*180/pi,
srcobs.a_ra*180/pi, srcobs.a_dec*180/pi, dra, ddec,
az1*180/pi, el1*180/pi, d_xel*180/pi, d_el*180/pi)+'\n')
o.close()
def do_stars(yr, mo, da, hr, mn, npts=25, mount='azel'):
userpass = 'admin:observer@'
srcs = readbsc()
t = Time(dt.datetime(yr,mo,da,hr,mn),format='datetime')
ids = selectbsc(t, srcs, [5.2,5.3])
num = zeros(len(ids))
for i,idx in enumerate(ids):
num[i] = int(srcs[idx].name[2:])
names = getbscnames(num)
startracktable(t, names, srcs, ids, npts, mount)
# Connect to ACC /parm directory and transfer tracktable file
try:
acc = FTP('acc.solar.pvt')
acc.login('admin','observer')
acc.cwd('parm')
# Send tracktable file to ACC
filename = '../Star Pointing/startracktable.radec'
f = open(filename,'r')
acc.storlines('STOR startracktable.radec',f)
f.close()
except:
print 'Could not transfer startracktable.radec file. ACC is down?'
def analyze_stars(yr, mo, da, radius=3):
import os, glob, time
from astropy.io import fits
import subprocess
t = Time(dt.datetime(yr,mo,da,0,0),format='datetime')
fileloc = '/home/sched/Dropbox/PythonCode/Star Pointing/'
apikey = 'cqjhjsprirwttecb'
radeg = 180./pi
datestr = t.iso[:10]
os.chdir(fileloc+datestr)
# Read startable
f = open(fileloc+'startable-'+datestr+'.txt','r')
table = f.readlines()
f.close()
# Remove and save header lines of table
header = array([table[0].strip(),table[1].strip()])
table = table[2:]
# Read times, RA and Dec for each line of table
times = []
RA_J2000 = []
Dec_J2000 = []
for line in table:
times.append(line.strip()[-19:])
RA_J2000.append(RA_Angle(line[16:28].strip(),'hms'))
Dec_J2000.append(Dec_Angle(line[29:42].strip(),'dms'))
# Convert times to Time object
times = Time(array(times))
# Read image file list
# Try to file MaxIm DL extension, and if none, try ZWO
filelist = glob.glob('*.fts') # MaxIm DL extension
if filelist == []:
filelist = glob.glob('*.fit') # ZWO extension
filelist = sort(filelist)
wcslist = array(sort(glob.glob('*.fits')))
idxlist = [] # List of indexes into table that were processed
ftimes = [] # List of times for files that were processed
#import pdb; pdb.set_trace()
for file in filelist:
# Gather information from file header
hdulist = fits.open(file)
try:
# Case of MaxIm DL data header
f_timestr = hdulist[0].header['time-obs']
f_datestr = hdulist[0].header['date-obs']
except:
# Case of ZWO data header
f_datestr, f_timestr = hdulist[0].header['date-obs'].split('T')
f_time = Time(f_datestr+' '+f_timestr)
# Identify star from time
try:
idx = where(f_time > times)[0][-1]
skip = False
except:
print f_time.iso,'is before first time in file...skipping'
skip = True
if not skip:
# First check if this star is already done (indicated by existing wcs file)
wcsfound = False
for wcsfile in wcslist:
if (wcsfile == 'wcs_'+file.split('.fts')[0]+'.fits'):
wcsfound = True
print 'Star already done.'
idxlist.append(idx)
ftimes.append(f_timestr)
break
if not wcsfound:
# Submit this image to Astrometry.net, and wait for processing
# to complete.
command = ['python','/common/python/current/astronet.py','--apikey='+apikey,'--upload='+file,
'--ra='+str(RA_J2000[idx].radians*radeg),'--dec='+str(Dec_J2000[idx].radians*radeg),
#'--scale-lower=0.3','--scale-upper=1','--downsample=1',
'--radius='+str(radius),'--wcs=wcs_'+file[:-3]+'fits','--crpix-center']
# print 'Sending:',command
p = subprocess.Popen(command,stdout=subprocess.PIPE)
tstart = time.time()
lines = p.stdout.readlines()
print 'Result is',lines[-1].strip()
print 'Took',time.time() - tstart,'seconds.'
if lines[-1][:12] == 'Wrote to wcs':
idxlist.append(idx)
ftimes.append(f_timestr)
# Write output file
f = open(fileloc+'starsolutions-'+datestr+'.txt','w')
f.write('Num Name RA(J2000) Dec(J2000) RA(Meas) Dec(Meas) Time(Meas)\n')
f.write('==== ========== ============ ============= ============ ============= ==========\n')
wcslist = sort(glob.glob('*.fits'))
for i,file in enumerate(wcslist):
hdulist = fits.open(file)
ra = RA_Angle(hdulist[0].header['crval1']/radeg)
dec = Dec_Angle(hdulist[0].header['crval2']/radeg)
outline = table[idxlist[i]][:41]+' '+ra.get('hms')[:-1]+' '+dec.get('dms')[:-2]+' '+ftimes[i]
f.write(outline+'\n')
f.close()
def chkimg(p, sub, title):
import matplotlib.pylab as plt
from numpy import mgrid
plt.imshow(sub)
y, x = mgrid[:60,:60]
plt.contour(p(x,y))
plt.title(title)
plt.show()
ans = raw_input('Image Okay [y/n] ?')
plt.clf()
if ans.upper() == 'N':
return False
else:
return True
def filter_images(startable, imgfolder, showimgs=False):
''' Filters the >1000 images taken by ZWO camera down to only one
per star.
Input:
startable Path and filename of the original star table created
by do_stars.
imgfolder Path to the folder containing the >1000 star images
showimgs Option to display each selected image and wait for
confirmation. If rejected, star is skipped.
'''
import glob, os
from astropy.io import fits
from numpy import logical_and
fh = open(startable,'r')
lines = fh.readlines()
lines = lines[2:] # Remove two header lines
nlines = len(lines)
ti = []
for line in lines:
ti.append(Time(line.strip()[-19:]).mjd) # Time of each line in startable, in mjd
fh.close()
files = glob.glob(imgfolder+'/*.fit')
# Simplify filenames by removing all the crap before the time.
for file in files:
basename = os.path.basename(file)
if len(basename) > 31:
os.rename(file,imgfolder+'/'+basename[-31:])
files = glob.glob(imgfolder+'/*.fit')
files.sort()
# Sorted list of times of star image files, in mjd
ftimes = []
for file in files:
fp = file[-31:-14].replace('_',' ')
ftimes.append(Time(fp[:13]+':'+fp[13:15]+':'+fp[15:]).mjd)
ftimes = array(ftimes)
# Make new folder of date
datstr = Time(ti[0],format='mjd').iso[:10]
dirname = os.path.dirname(startable)
# Create output folder for images if is does not exist
outdir = dirname+'/'+datstr
if not os.path.exists(outdir):
os.makedirs(outdir)
for i in range(len(ti)):
if i == 49:
jfiles, = where(ftimes > ti[i]+30./86400)
else:
jfiles, = where(logical_and(ftimes > ti[i]+30./86400,ftimes < ti[i+1]))
if jfiles != []:
jbest = jfiles[0]
prev_area = 9999.
for j in jfiles:
img = fits.getdata(files[j])
p, sub, area = star_shape(img)
if area < prev_area:
jbest = j
prev_area = area
good = True
if showimgs:
img = fits.getdata(files[jbest])
print jbest,':',
p, sub, area = star_shape(img)
title = 'Star '+lines[i][4:15]+lines[i].strip()[-9:]+'/'+Time(ftimes[jbest],format='mjd').iso[10:19]
good = chkimg(p, sub, title)
if good:
fc = files[jbest] # Selects best file
os.rename(fc,outdir+'/'+os.path.basename(fc))
def star_shape(img):
from astropy.modeling import models, fitting
from numpy import argmax, mgrid
subsize = 30
pk = argmax(img[subsize:-subsize,subsize:-subsize])
xpk = pk % (1280 - 2*subsize) + subsize
ypk = pk/(1280 - 2*subsize) + subsize
sub = img[ypk-subsize:ypk+subsize,xpk-subsize:xpk+subsize]
ny, nx = sub.shape
fit_g = fitting.LevMarLSQFitter()
g_init = models.Gaussian2D(amplitude=1.0, x_mean=0., y_mean=0., x_stddev=1., y_stddev=1.)
y, x = mgrid[:ny,:nx]
try:
p = fit_g(g_init,x,y,sub)
ax = p.x_stddev.value
ay = p.y_stddev.value
area = ax*ay
print '{:6.2f}, {:6.2f}; {:7.2f}'.format(ax,ay,area)
return p, sub, area
except:
print 'No fit'
return None, None, 9999.
|
dgary50/eovsa
|
readbsc.py
|
Python
|
gpl-2.0
| 27,536
|
[
"Gaussian"
] |
6e2484fa81d85636e0b11b686e9a24b7d58ad1929b68737892d5a343c500ef30
|
# Copyright (c) 2018, Henrique Miranda
# All rights reserved.
#
# This file is part of the yambopy project
#
import os
import numpy as np
from itertools import product
from netCDF4 import Dataset
from yambopy.tools.jsonencoder import JsonDumper, JsonLoader
from yambopy.lattice import vol_lat, rec_lat, car_red, red_car, vec_in_list, isbetween
from yambopy.units import atomic_mass
from yambopy.tools.string import marquee
from qepy.lattice import Path
class YamboLatticeDB(object):
"""
Class to read the lattice information from the netcdf file
"""
def __init__(self,lat=None,alat=None,sym_car=None,iku_kpoints=None,
car_atomic_positions=None,atomic_numbers=None,time_rev=None):
self.lat = np.array(lat)
self.alat = np.array(alat)
self.sym_car = np.array(sym_car)
self.iku_kpoints = np.array(iku_kpoints)
self.car_atomic_positions = np.array(car_atomic_positions)
self.atomic_numbers = np.array(atomic_numbers)
self.time_rev = time_rev
self.ibz_nkpoints = len(iku_kpoints)
@classmethod
def from_db(cls,filename='ns.db1',Expand=True):
return cls.from_db_file(filename,Expand)
@classmethod
def from_db_file(cls,filename='ns.db1',Expand=True):
""" Initialize YamboLattice from a local dbfile """
if not os.path.isfile(filename):
raise FileNotFoundError("error opening %s in YamboLatticeDB"%filename)
with Dataset(filename) as database:
dimensions = database.variables['DIMENSIONS'][:]
natoms_a = database.variables['N_ATOMS'][:].astype(int).T
tmp_an = database.variables['atomic_numbers'][:].astype(int)
tmp_apos = database.variables['ATOM_POS'][:,:]
flatten = lambda l: [item for sublist in l for item in sublist]
atomic_numbers = flatten([[tmp_an[n]]*na for n,na in enumerate(natoms_a)])
atomic_positions = np.vstack([[tmp_apos[n,ia] for ia in range(na)] for n,na in enumerate(natoms_a) ])
args = dict( atomic_numbers = atomic_numbers,
car_atomic_positions = atomic_positions,
sym_car = database.variables['SYMMETRY'][:],
iku_kpoints = database.variables['K-POINTS'][:].T,
lat = database.variables['LATTICE_VECTORS'][:].T,
alat = database.variables['LATTICE_PARAMETER'][:].T,
time_rev = dimensions[9] )
y = cls(**args)
if Expand: y.expand_kpoints()
return y
@classmethod
def from_dict(cls,data):
""" Initialize instance of the class from a dictionary
"""
lat = data["lat"]
alat = data["alat"]
sym_car = data["sym_car"]
iku_kpoints = data["iku_kpoints"]
atomic_positions = data["car_atomic_positions"]
atomic_numbers = data["atomic_numbers"]
time_rev = data["time_rev"]
y = cls(lat,alat,sym_car,iku_kpoints,atomic_positions,atomic_numbers,time_rev)
return y
@classmethod
def from_json_file(cls,filename):
data = JsonLoader(filename)
return cls.from_dict(data)
@property
def nkpoints(self):
return len(self.car_kpoints)
@property
def red_atomic_positions(self):
return car_red(self.car_atomic_positions,self.lat)
def as_dict(self):
""" get the information of this class as a dictionary
"""
data = {"lat" : self.lat,
"alat" : self.alat,
"sym_car" : self.sym_car,
"iku_kpoints" : self.iku_kpoints,
"car_atomic_positions" : self.car_atomic_positions,
"atomic_numbers" : self.atomic_numbers,
"time_rev": self.time_rev }
return data
def write_json(self,filename):
""" write a json file with the lattice information """
JsonDumper(self.as_dict(),filename)
@property
def iku_kpoints(self):
return self._iku_kpoints
@iku_kpoints.setter
def iku_kpoints(self,value):
if hasattr(self,"_red_kpoints"): delattr(self,"_red_kpoints")
if hasattr(self,"_car_kpoints"): delattr(self,"_car_kpoints")
self._iku_kpoints = value
@property
def nkpoints(self):
return len(self.iku_kpoints)
@property
def nsym(self):
return len(self.sym_car)
@property
def rlat(self):
"""caclulate the reciprocal lattice"""
if not hasattr(self,'_rlat'):
self._rlat = rec_lat(self.lat)
return self._rlat
@property
def rlat_vol(self):
return (2*np.pi)**3 * vol_lat(self.rlat)
@property
def lat_vol(self):
return vol_lat(self.lat)
@property
def car_kpoints(self):
"""convert form internal yambo units to cartesian lattice units"""
if not hasattr(self,"_car_kpoints"):
self._car_kpoints = np.array([ k/self.alat for k in self.iku_kpoints ])
return self._car_kpoints
@property
def red_kpoints(self):
"""convert from cartesian coordinates to reduced coordinates"""
if not hasattr(self,"_red_kpoints"):
self._red_kpoints = car_red(self.car_kpoints,self.rlat)
return self._red_kpoints
@property
def sym_red(self):
"""Convert cartesian transformations to reduced transformations"""
if not hasattr(self,"_sym_red"):
sym_red = np.zeros([self.nsym,3,3],dtype=int)
for n,s in enumerate(self.sym_car):
sym_red[n] = np.round(np.dot(np.dot(self.lat,s.T),np.linalg.inv(self.lat)))
self._sym_red = sym_red
return self._sym_red
@property
def sym_rec_red(self):
"""Convert reduced transformations to reduced reciprocal transformations"""
if not hasattr(self,"_sym_rec_red"):
sym_rec_red = np.zeros([self.nsym,3,3],dtype=int)
for n,s in enumerate(self.sym_red):
sym_rec_red[n] = np.linalg.inv(s).T
self._sym_rec_red = sym_rec_red
return self._sym_rec_red
@property
def sym_rec(self):
"""Convert cartesian transformations to reciprocal transformations"""
if not hasattr(self,"_sym_rec"):
sym_rec = np.zeros([self.nsym,3,3])
for n,s in enumerate(self.sym_car):
sym_rec[n] = np.linalg.inv(s).T
self._sym_rec = sym_rec
return self._sym_rec
@property
def time_rev_list(self):
"""get a list of symmetries with time reversal"""
time_rev_list = [False]*self.nsym
for i in range(self.nsym):
time_rev_list[i] = ( i >= self.nsym/(self.time_rev+1) )
return time_rev_list
def expand_kpoints(self,atol=1e-6,verbose=0):
"""
Take a list of qpoints and symmetry operations and return the full brillouin zone
with the corresponding index in the irreducible brillouin zone
"""
#check if the kpoints were already exapnded
kpoints_indexes = []
kpoints_full = []
symmetry_indexes = []
#kpoints in the full brillouin zone organized per index
kpoints_full_i = {}
#expand using symmetries
for nk,k in enumerate(self.car_kpoints):
#if the index in not in the dicitonary add a list
if nk not in kpoints_full_i:
kpoints_full_i[nk] = []
for ns,sym in enumerate(self.sym_car):
new_k = np.dot(sym,k)
#check if the point is inside the bounds
k_red = car_red([new_k],self.rlat)[0]
k_bz = (k_red+atol)%1
#if the vector is not in the list of this index add it
if not vec_in_list(k_bz,kpoints_full_i[nk]):
kpoints_full_i[nk].append(k_bz)
kpoints_full.append(new_k)
kpoints_indexes.append(nk)
symmetry_indexes.append(ns)
continue
#calculate the weights of each of the kpoints in the irreducible brillouin zone
nkpoints_full = len(kpoints_full)
weights = np.zeros([nkpoints_full])
for nk in kpoints_full_i:
weights[nk] = float(len(kpoints_full_i[nk]))/nkpoints_full
if verbose: print("%d kpoints expanded to %d"%(len(self.car_kpoints),len(kpoints_full)))
#set the variables
self.weights_ibz = np.array(weights)
self.kpoints_indexes = np.array(kpoints_indexes)
self.symmetry_indexes = np.array(symmetry_indexes)
self.iku_kpoints = [k*self.alat for k in kpoints_full]
def get_path(self,path,debug=False):
"""
Obtain a list of indexes and kpoints that belong to the regular mesh
"""
if isinstance(path,Path):
path = path.get_klist()
#points in cartesian coordinates
path_car = red_car(path, self.rlat)
#find the points along the high symmetry lines
distance = 0
bands_kpoints = []
bands_indexes = []
#for all the paths
for k in range(len(path)-1):
# store here all the points in the path
# key: has the coordinates of the kpoint rounded to 4 decimal places
# value: index of the kpoint
# distance to the starting kpoint
# the kpoint cordinate
kpoints_in_path = {}
start_kpt = path_car[k] #start point of the path
end_kpt = path_car[k+1] #end point of the path
#generate repetitions of the brillouin zone
for x,y,z in product(list(range(-1,2)),list(range(-1,2)),list(range(1))):
#shift the brillouin zone
shift = red_car([np.array([x,y,z])],self.rlat)[0]
#iterate over all the kpoints
for index, kpt in enumerate(self.car_kpoints):
kpt_shift = kpt+shift #shift the kpoint
#if the point is collinear we add it
if isbetween(start_kpt,end_kpt,kpt_shift):
key = tuple([round(kpt,4) for kpt in kpt_shift])
value = [ index, np.linalg.norm(start_kpt-kpt_shift), kpt_shift ]
kpoints_in_path[key] = value
#sort the points acoording to distance to the start of the path
kpoints_in_path = sorted(list(kpoints_in_path.values()),key=lambda i: i[1])
#for all the kpoints in the path
for index, disp, kpt in kpoints_in_path:
bands_kpoints.append( kpt )
bands_indexes.append( index )
if debug: print(("%12.8lf "*3)%tuple(kpt), index)
self.bands_kpoints = bands_kpoints
self.bands_indexes = bands_indexes
self.bands_highsym_qpts = path_car
return bands_kpoints, bands_indexes, path_car
def get_units_info(self):
info_string = \
" Yambo cartesian units [cc in yambo]: \n\
:: self.car_kpoints*2.*pi\n\
\n\
QE cartesian unists [cart. coord. in units 2pi/alat in QE: \n\
:: self.car_kpoints*self.alat[0]\n\
\n\
Internal yambo units [iku]: \n\
:: self.iku_kpoints\n\
\n\
Reduced coordinates [rlu in yambo, cryst. coord. in QE]: \n\
:: self.red_kpoints\n"
print(info_string)
def __str__(self):
lines = []; app = lines.append
app(marquee(self.__class__.__name__))
app("lattice:")
lines += [("%12.8lf " * 3) % tuple(vec) for vec in self.lat]
app("atom positions:")
for an, pos in zip(self.atomic_numbers, self.red_atomic_positions):
app( "%3d " % an + ("%12.8lf " * 3) % tuple(pos) )
return "\n".join(lines)
|
alexmoratalla/yambopy
|
yambopy/dbs/latticedb.py
|
Python
|
bsd-3-clause
| 12,165
|
[
"NetCDF",
"Yambo"
] |
2c416ebf8651cde140697a5f2f432883fa1f51cfbe28379f2365a80c35b52859
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest
import json
import os
from monty.json import MontyDecoder
from pymatgen.analysis.defects.dilute_solution_model import *
try:
import sympy
except ImportError:
sympy = None
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
with open(
os.path.join(test_dir, 'mp1048_defect_formation_energies.json')) as fp:
formation_energy_dict = json.load(fp, cls=MontyDecoder)
with open(os.path.join(test_dir, 'mp1048_raw_defect_energies.json')) as fp:
raw_energy_dict = json.load(fp, cls=MontyDecoder)
with open(os.path.join(test_dir, 'mp1487_raw_defect_energies.json')) as fp:
mp1487_raw_energy_dict = json.load(fp, cls=MontyDecoder)
@unittest.skipIf(not sympy, "sympy not present.")
class DiluteSolutionModelTest(unittest.TestCase):
def setUp(self):
"""
Setup mandatory inputs for dilute_solution_model
"""
self.e0 = raw_energy_dict['bulk_energy']
self.asites = raw_energy_dict['antisites']
self.vac = raw_energy_dict['vacancies']
self.struct = raw_energy_dict['structure']
self.T = 600
self.trial_mu = formation_energy_dict[str(self.T)]['chemical_potential']
def test_formation_energies_without_chem_pot(self):
"""
Should generate formation energies without input chempot
"""
energies, chem_pot = dilute_solution_model(
self.struct, self.e0, self.vac, self.asites, self.T,
generate='energy')
self.assertIsNotNone(energies)
self.assertIsNotNone(chem_pot)
def test_formation_energies_with_chem_pot(self):
energies, chem_pot = dilute_solution_model(
self.struct, self.e0, self.vac, self.asites, self.T,
trial_chem_pot=self.trial_mu, generate='energy')
self.assertIsNotNone(energies)
self.assertIsNotNone(chem_pot)
def test_plot_data_without_chem_pot(self):
conc_data, en_data, mu_data = dilute_solution_model(
self.struct, self.e0, self.vac, self.asites, self.T,
generate='plot')
self.assertIsNotNone(conc_data)
self.assertIsNotNone(en_data)
self.assertIsNotNone(mu_data)
for key, value in conc_data.items():
self.assertIsNotNone(value)
for key, value in mu_data.items():
self.assertIsNotNone(value)
for key, value in en_data.items():
self.assertIsNotNone(value)
def test_plot_data_with_chem_pot(self):
conc_data, en_data, mu_data = dilute_solution_model(
self.struct, self.e0, self.vac, self.asites, self.T,
trial_chem_pot=self.trial_mu, generate='plot')
self.assertIsNotNone(conc_data)
self.assertIsNotNone(en_data)
self.assertIsNotNone(mu_data)
for key, value in conc_data.items():
self.assertIsNotNone(value)
for key, value in mu_data.items():
self.assertIsNotNone(value)
for key, value in en_data.items():
self.assertIsNotNone(value)
# print(plot_data['y'])
@unittest.skipIf(not sympy, "sympy not present.")
class SoluteSiteFinderTest(unittest.TestCase):
def setUp(self):
"""
Setup mandatory inputs for dilute_solution_model
"""
self.e0 = mp1487_raw_energy_dict['bulk_energy']
self.asites = mp1487_raw_energy_dict['antisites']
self.vac = mp1487_raw_energy_dict['vacancies']
self.solutes = mp1487_raw_energy_dict['solutes']
self.struct = mp1487_raw_energy_dict['structure']
self.T = 1000
def test_plot_data_without_chem_pot(self):
plot_data = solute_site_preference_finder(
self.struct, self.e0, self.T, self.vac, self.asites, self.solutes,
solute_concen=0.01)
self.assertIsNotNone(plot_data)
def still_wait_plot_data_with_chem_pot(self):
plot_data = dilute_solution_model(
self.struct, self.e0, self.vac, self.asites, self.T,
trial_chem_pot=self.trial_mu, generate='plot')
self.assertIsNotNone(plot_data)
for key, value in plot_data.items():
self.assertIsNotNone(value)
if __name__ == "__main__":
unittest.main()
|
migueldiascosta/pymatgen
|
pymatgen/analysis/defects/tests/test_dilute_solution_model.py
|
Python
|
mit
| 4,419
|
[
"pymatgen"
] |
9149cef12caf74db90b92111c904df085b9667ba4b86b098927ffbcc4f855bd7
|
from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats_male = (
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{prefix}} {{last_name}}",
"{{first_name_male}} {{last_name}}-{{last_name}}",
)
formats_female = (
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{prefix}} {{last_name}}",
"{{first_name_female}} {{last_name}}-{{last_name}}",
"{{first_name_female}}-{{first_name_female}} {{last_name}}",
)
formats = formats_male + formats_female
first_names_male = (
"Afonso",
"Alexandre",
"Álvaro",
"André",
"Ângelo",
"António",
"Artur",
"Benjamim",
"Bernardo",
"Brian",
"Bruno",
"Bryan",
"Carlos",
"Cláudio",
"Cristiano",
"César",
"Daniel",
"David",
"Denis",
"Diego",
"Dinis",
"Diogo",
"Duarte",
"Edgar",
"Eduardo",
"Emanuel",
"Enzo",
"Fernando",
"Filipe",
"Francisco",
"Frederico",
"Fábio",
"Gabriel",
"Gaspar",
"Gil",
"Gonçalo",
"Guilherme",
"Gustavo",
"Henrique",
"Hugo",
"Igor",
"Isaac",
"Ismael",
"Ivan",
"Ivo",
"Jaime",
"Joaquim",
"Joel",
"Jorge",
"José",
"João",
"Kevin",
"Kévim",
"Leandro",
"Leonardo",
"Lisandro",
"Lourenço",
"Luca",
"Lucas",
"Luís",
"Manuel",
"Marco",
"Marcos",
"Martim",
"Mateus",
"Matias",
"Mauro",
"Micael",
"Miguel",
"Márcio",
"Mário",
"Nelson",
"Noa",
"Noah",
"Nuno",
"Paulo",
"Pedro",
"Rafael",
"Renato",
"Ricardo",
"Rodrigo",
"Rui",
"Rúben",
"Salvador",
"Samuel",
"Sandro",
"Santiago",
"Sebastião",
"Simão",
"Sérgio",
"Tiago",
"Tomás",
"Tomé",
"Valentim",
"Vasco",
"Vicente",
"Vítor",
"William",
"Wilson",
"Xavier",
)
first_names_female = (
"Adriana",
"Alexandra",
"Alice",
"Alícia",
"Amélia",
"Ana",
"Andreia",
"Ângela",
"Anita",
"Ariana",
"Beatriz",
"Benedita",
"Bianca",
"Bruna",
"Bárbara",
"Caetana",
"Camila",
"Carlota",
"Carminho",
"Carolina",
"Catarina",
"Clara",
"Constança",
"Daniela",
"Diana",
"Débora",
"Eduarda",
"Ema",
"Emma",
"Emília",
"Erica",
"Érica",
"Erika",
"Eva",
"Fabiana",
"Filipa",
"Flor",
"Francisca",
"Gabriela",
"Helena",
"Iara",
"Inês",
"Irina",
"Íris",
"Isabel",
"Isabela",
"Joana",
"Juliana",
"Jéssica",
"Júlia",
"Kelly",
"Kyara",
"Lara",
"Larissa",
"Laura",
"Leonor",
"Letícia",
"Lia",
"Lorena",
"Luana",
"Luciana",
"Luna",
"Luísa",
"Lúcia",
"Madalena",
"Mafalda",
"Mara",
"Margarida",
"Maria",
"Mariana",
"Marta",
"Matilde",
"Melissa",
"Mia",
"Miriam",
"Mélanie",
"Naiara",
"Nair",
"Nicole",
"Nádia",
"Núria",
"Patrícia",
"Petra",
"Pilar",
"Rafaela",
"Raquel",
"Renata",
"Rita",
"Salomé",
"Sara",
"Sofia",
"Soraia",
"Tatiana",
"Teresa",
"Valentina",
"Vera",
"Victória",
"Violeta",
"Vitória",
"Yara",
"Yasmin",
)
first_names = first_names_male + first_names_female
last_names = (
"Abreu",
"Almeida",
"Alves",
"Amaral",
"Amorim",
"Andrade",
"Anjos",
"Antunes",
"Araújo",
"Assunção",
"Azevedo",
"Baptista",
"Barbosa",
"Barros",
"Batista",
"Borges",
"Branco",
"Brito",
"Campos",
"Cardoso",
"Carneiro",
"Carvalho",
"Castro",
"Coelho",
"Correia",
"Costa",
"Cruz",
"Cunha",
"Domingues",
"Esteves",
"Faria",
"Fernandes",
"Ferreira",
"Figueiredo",
"Fonseca",
"Freitas",
"Garcia",
"Gaspar",
"Gomes",
"Gonçalves",
"Guerreiro",
"Henriques",
"Jesus",
"Leal",
"Leite",
"Lima",
"Lopes",
"Loureiro",
"Lourenço",
"Macedo",
"Machado",
"Magalhães",
"Maia",
"Marques",
"Martins",
"Matias",
"Matos",
"Melo",
"Mendes",
"Miranda",
"Monteiro",
"Morais",
"Moreira",
"Mota",
"Moura",
"Nascimento",
"Neto",
"Neves",
"Nogueira",
"Nunes",
"Oliveira",
"Pacheco",
"Paiva",
"Pereira",
"Pinheiro",
"Pinho",
"Pinto",
"Pires",
"Ramos",
"Reis",
"Ribeiro",
"Rocha",
"Rodrigues",
"Santos",
"Silva",
"Simões",
"Soares",
"Sousa",
"Sá",
"Tavares",
"Teixeira",
"Torres",
"Valente",
"Vaz",
"Vicente",
"Vieira",
)
prefixes = ("de", "da", "do")
def prefix(self) -> str:
return self.random_element(self.prefixes)
|
joke2k/faker
|
faker/providers/person/pt_PT/__init__.py
|
Python
|
mit
| 6,791
|
[
"Brian"
] |
b528ad07bd8675dc269e5c3586036581558d5f3289a96bf234ccff676e2d33b5
|
from dsltools import Traversal
import types
import numpy as np
def transform_value(x):
"""
Replace arrays with their shapes,
and recursively replace any instances of arrays
in data structures like tuples also with their shapes
"""
if isinstance(x, np.ndarray):
return x.shape
elif isinstance(x, list):
return np.array(x).shape
elif isinstance(x, tuple):
return tuple(transform_value(elt) for elt in x)
elif isinstance(x, (int, long, float, complex, types.NoneType)):
return x
else:
return None
class EvalShape(Traversal):
def __init__(self, input_values):
self.inputs = []
for x in input_values:
y = transform_value(x)
if hasattr(y, '__iter__'):
self.inputs.extend(y)
else:
self.inputs.append(y)
def visit_Var(self, v):
return self.inputs[v.num]
def visit_Const(self, v):
return v.value
def visit_AnyScalar(self, v):
return None
def visit_Shape(self, v):
dims = self.visit_tuple(v.dims)
assert all(isinstance(d, int) for d in dims)
return dims
def visit_Dim(self, v):
return self.visit(v.array)[v.dim]
def visit_Tuple(self, v):
return self.visit_tuple(v.elts)
def visit_Sub(self, v):
return self.visit(v.x) - self.visit(v.y)
def visit_Add(self, v):
return self.visit(v.x) + self.visit(v.y)
def visit_Mult(self, v):
return self.visit(v.x) * self.visit(v.y)
def visit_Div(self, v):
return self.visit(v.x) / self.visit(v.y)
def visit_Mod(self, v):
return self.visit(v.x) % self.visit(v.y)
def visit_Closure(self, v):
return v.fn, self.visit_tuple(v.args)
def eval_shape(symbolic_shape, input_values):
evaluator = EvalShape(input_values)
result = evaluator.visit(symbolic_shape)
if not isinstance(result, tuple):
return ()
else:
assert all(isinstance(elt, int) for elt in result)
return result
def result_shape(typed_fn, input_values):
import shape_inference
symbolic_shape = shape_inference.call_shape_expr(typed_fn)
return eval_shape(symbolic_shape, input_values)
|
pombredanne/parakeet
|
parakeet/shape_inference/shape_eval.py
|
Python
|
bsd-3-clause
| 2,137
|
[
"VisIt"
] |
1b347ff4eb5395ecc05b8a9cf5294bb57622d008680313ecca2de344a1045832
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Adios(AutotoolsPackage):
"""The Adaptable IO System (ADIOS) provides a simple,
flexible way for scientists to describe the
data in their code that may need to be written,
read, or processed outside of the running simulation.
"""
homepage = "http://www.olcf.ornl.gov/center-projects/adios/"
url = "https://github.com/ornladios/ADIOS/archive/v1.12.0.tar.gz"
git = "https://github.com/ornladios/ADIOS.git"
maintainers = ['ax3l']
version('develop', branch='master')
version('1.13.1', sha256='b1c6949918f5e69f701cabfe5987c0b286793f1057d4690f04747852544e157b')
version('1.13.0', sha256='7b5ee8ff7a5f7215f157c484b20adb277ec0250f87510513edcc25d2c4739f50')
version('1.12.0', sha256='22bc22c157322abec2d1a0817a259efd9057f88c2113e67d918a9a5ebcb3d88d')
version('1.11.1', sha256='9f5c10b9471a721ba57d1cf6e5a55a7ad139a6c12da87b4dc128539e9eef370e')
version('1.11.0', sha256='e89d14ccbe7181777225e0ba6c272c0941539b8ccd440e72ed5a9457441dae83')
version('1.10.0', sha256='6713069259ee7bfd4d03f47640bf841874e9114bab24e7b0c58e310c42a0ec48')
version('1.9.0', sha256='23b2bb70540d51ab0855af0b205ca484fd1bd963c39580c29e3133f9e6fffd46')
variant('shared', default=True,
description='Builds a shared version of the library')
variant('fortran', default=False,
description='Enable Fortran bindings support')
variant('mpi', default=True,
description='Enable MPI support')
variant('infiniband', default=False,
description='Enable infiniband support')
# transforms
variant('zlib', default=True,
description='Enable zlib transform support')
variant('bzip2', default=False,
description='Enable bzip2 transform support')
variant('szip', default=False,
description='Enable szip transform support')
variant('zfp', default=True,
description='Enable ZFP transform support')
variant('sz', default=True,
description='Enable SZ transform support')
variant('lz4', default=True,
description='Enable LZ4 transform support')
variant('blosc', default=True,
description='Enable Blosc transform support')
# transports and serial file converters
variant('hdf5', default=False,
description='Enable parallel HDF5 transport and serial bp2h5 ' +
'converter')
variant('netcdf', default=False, description='Enable netcdf support')
variant(
'staging', values=any_combination_of('flexpath', 'dataspaces'),
description='Enable dataspaces and/or flexpath staging transports'
)
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('m4', type='build')
depends_on('libtool@:2.4.2', type='build')
depends_on('python', type='build')
depends_on('mpi', when='+mpi')
# optional transformations
depends_on('zlib', when='+zlib')
depends_on('bzip2', when='+bzip2')
depends_on('szip', when='+szip')
depends_on('sz@:1.4.10', when='@:1.12.0 +sz')
depends_on('sz@1.4.11.0:1.4.11.99', when='@1.13.0 +sz')
depends_on('sz@1.4.12.3:1.4.12.99', when='@1.13.1: +sz')
depends_on('zfp@0.5.1:0.5.99', when='+zfp')
depends_on('lz4', when='+lz4')
depends_on('c-blosc@1.12.0:', when='+blosc')
# optional transports & file converters
depends_on('hdf5@1.8:+hl+mpi', when='+hdf5')
depends_on('netcdf-c', when='+netcdf')
depends_on('libevpath', when='staging=flexpath')
depends_on('dataspaces+mpi', when='staging=dataspaces')
for p in ['+hdf5', '+netcdf', 'staging=flexpath', 'staging=dataspaces']:
conflicts(p, when='~mpi')
build_directory = 'spack-build'
# ADIOS uses the absolute Python path, which is too long and results in
# "bad interpreter" errors - but not applicable for 1.9.0
patch('python.patch', when='@1.10.0:')
# Fix ADIOS <=1.10.0 compile error on HDF5 1.10+
# https://github.com/ornladios/ADIOS/commit/3b21a8a41509
# https://github.com/spack/spack/issues/1683
patch('adios_1100.patch', when='@:1.10.0^hdf5@1.10:')
# ADIOS 1.13.1 is written for ZFP 0.5.0 interfaces
# https://github.com/ornladios/ADIOS/pull/204
patch('zfp051.patch', when='@1.11.0:1.13.1')
# Fix a bug in configure.ac that causes automake issues on RHEL 7.7
patch('https://github.com/ornladios/ADIOS/pull/207.patch', when='@1.12.0:',
sha256='01113e9efb929d71c28bf33cc8b7f215d85195ec700e99cb41164e2f8f830640')
def validate(self, spec):
"""Checks if incompatible variants have been activated at the same time
Args:
spec: spec of the package
Raises:
RuntimeError: in case of inconsistencies
"""
if '+fortran' in spec and not self.compiler.fc:
msg = 'cannot build a fortran variant without a fortran compiler'
raise RuntimeError(msg)
def with_or_without_hdf5(self, activated):
if activated:
return '--with-phdf5={0}'.format(
self.spec['hdf5'].prefix
)
return '--without-phdf5'
def setup_build_environment(self, env):
# https://github.com/ornladios/ADIOS/issues/206
if self.spec.satisfies('%gcc@10: +fortran'):
env.set('FCFLAGS', '-fallow-argument-mismatch')
def configure_args(self):
spec = self.spec
self.validate(spec)
extra_args = [
# required, otherwise building its python bindings will fail
'CFLAGS={0}'.format(self.compiler.cc_pic_flag)
]
extra_args += self.enable_or_disable('shared')
extra_args += self.enable_or_disable('fortran')
if '+mpi' in spec:
env['MPICC'] = spec['mpi'].mpicc
env['MPICXX'] = spec['mpi'].mpicxx
extra_args += self.with_or_without('mpi', activation_value='prefix')
extra_args += self.with_or_without('infiniband')
# Transforms
variants = ['zlib', 'bzip2', 'szip']
if spec.satisfies('@1.11.0:'):
variants += ['zfp']
if spec.satisfies('@1.12.0:'):
variants += ['sz', 'lz4']
if spec.satisfies('@1.13.0:'):
extra_args += self.with_or_without(
'blosc',
activation_value=lambda x: spec['c-blosc'].prefix
)
# External I/O libraries
variants += ['hdf5', 'netcdf']
for x in variants:
extra_args += self.with_or_without(x, activation_value='prefix')
# Staging transports
def with_staging(name):
if name == 'flexpath':
return spec['libevpath'].prefix
return spec[name].prefix
extra_args += self.with_or_without(
'staging',
activation_value=with_staging
)
return extra_args
|
rspavel/spack
|
var/spack/repos/builtin/packages/adios/package.py
|
Python
|
lgpl-2.1
| 7,136
|
[
"NetCDF"
] |
cd51c02b60267067347d3f148c6a9d05edf9b15a867e2da205d09aa48ee927c6
|
# -*- test-case-name: twisted.trial.test.test_runner -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A miscellany of code used to run Trial tests.
Maintainer: Jonathan Lange
"""
__all__ = [
'suiteVisit', 'TestSuite',
'DestructiveTestSuite', 'DocTestCase', 'DryRunVisitor',
'ErrorHolder', 'LoggedSuite', 'PyUnitTestCase',
'TestHolder', 'TestLoader', 'TrialRunner', 'TrialSuite',
'filenameToModule', 'isPackage', 'isPackageDirectory', 'isTestCase',
'name', 'samefile', 'NOT_IN_TEST',
]
import pdb
import os, types, warnings, sys, inspect, imp
import doctest, time
from twisted.python import reflect, log, failure, modules, filepath
from twisted.python.compat import set
from twisted.internet import defer
from twisted.trial import util, unittest
from twisted.trial.itrial import ITestCase
from twisted.trial.reporter import UncleanWarningsReporterWrapper
# These are imported so that they remain in the public API for t.trial.runner
from twisted.trial.unittest import suiteVisit, TestSuite
from zope.interface import implements
pyunit = __import__('unittest')
def isPackage(module):
"""Given an object return True if the object looks like a package"""
if not isinstance(module, types.ModuleType):
return False
basename = os.path.splitext(os.path.basename(module.__file__))[0]
return basename == '__init__'
def isPackageDirectory(dirname):
"""Is the directory at path 'dirname' a Python package directory?
Returns the name of the __init__ file (it may have a weird extension)
if dirname is a package directory. Otherwise, returns False"""
for ext in zip(*imp.get_suffixes())[0]:
initFile = '__init__' + ext
if os.path.exists(os.path.join(dirname, initFile)):
return initFile
return False
def samefile(filename1, filename2):
"""
A hacky implementation of C{os.path.samefile}. Used by L{filenameToModule}
when the platform doesn't provide C{os.path.samefile}. Do not use this.
"""
return os.path.abspath(filename1) == os.path.abspath(filename2)
def filenameToModule(fn):
"""
Given a filename, do whatever possible to return a module object matching
that file.
If the file in question is a module in Python path, properly import and
return that module. Otherwise, load the source manually.
@param fn: A filename.
@return: A module object.
@raise ValueError: If C{fn} does not exist.
"""
if not os.path.exists(fn):
raise ValueError("%r doesn't exist" % (fn,))
try:
ret = reflect.namedAny(reflect.filenameToModuleName(fn))
except (ValueError, AttributeError):
# Couldn't find module. The file 'fn' is not in PYTHONPATH
return _importFromFile(fn)
# ensure that the loaded module matches the file
retFile = os.path.splitext(ret.__file__)[0] + '.py'
# not all platforms (e.g. win32) have os.path.samefile
same = getattr(os.path, 'samefile', samefile)
if os.path.isfile(fn) and not same(fn, retFile):
del sys.modules[ret.__name__]
ret = _importFromFile(fn)
return ret
def _importFromFile(fn, moduleName=None):
fn = _resolveDirectory(fn)
if not moduleName:
moduleName = os.path.splitext(os.path.split(fn)[-1])[0]
if moduleName in sys.modules:
return sys.modules[moduleName]
fd = open(fn, 'r')
try:
module = imp.load_source(moduleName, fn, fd)
finally:
fd.close()
return module
def _resolveDirectory(fn):
if os.path.isdir(fn):
initFile = isPackageDirectory(fn)
if initFile:
fn = os.path.join(fn, initFile)
else:
raise ValueError('%r is not a package directory' % (fn,))
return fn
def _getMethodNameInClass(method):
"""
Find the attribute name on the method's class which refers to the method.
For some methods, notably decorators which have not had __name__ set correctly:
getattr(method.im_class, method.__name__) != method
"""
if getattr(method.im_class, method.__name__, object()) != method:
for alias in dir(method.im_class):
if getattr(method.im_class, alias, object()) == method:
return alias
return method.__name__
class DestructiveTestSuite(TestSuite):
"""
A test suite which remove the tests once run, to minimize memory usage.
"""
def run(self, result):
"""
Almost the same as L{TestSuite.run}, but with C{self._tests} being
empty at the end.
"""
while self._tests:
if result.shouldStop:
break
test = self._tests.pop(0)
test(result)
return result
# When an error occurs outside of any test, the user will see this string
# in place of a test's name.
NOT_IN_TEST = "<not in test>"
class LoggedSuite(TestSuite):
"""
Any errors logged in this suite will be reported to the L{TestResult}
object.
"""
def run(self, result):
"""
Run the suite, storing all errors in C{result}. If an error is logged
while no tests are running, then it will be added as an error to
C{result}.
@param result: A L{TestResult} object.
"""
observer = unittest._logObserver
observer._add()
super(LoggedSuite, self).run(result)
observer._remove()
for error in observer.getErrors():
result.addError(TestHolder(NOT_IN_TEST), error)
observer.flushErrors()
class PyUnitTestCase(object):
"""
DEPRECATED in Twisted 8.0.
This class decorates the pyunit.TestCase class, mainly to work around the
differences between unittest in Python 2.3, 2.4, and 2.5. These
differences are::
- The way doctest unittests describe themselves
- Where the implementation of TestCase.run is (used to be in __call__)
- Where the test method name is kept (mangled-private or non-mangled
private variable)
It also implements visit, which we like.
"""
def __init__(self, test):
warnings.warn("Deprecated in Twisted 8.0.",
category=DeprecationWarning)
self._test = test
test.id = self.id
def id(self):
cls = self._test.__class__
tmn = getattr(self._test, '_TestCase__testMethodName', None)
if tmn is None:
# python2.5's 'unittest' module is more sensible; but different.
tmn = self._test._testMethodName
return (cls.__module__ + '.' + cls.__name__ + '.' +
tmn)
def __repr__(self):
return 'PyUnitTestCase<%r>'%(self.id(),)
def __call__(self, results):
return self._test(results)
def visit(self, visitor):
"""
Call the given visitor with the original, standard library, test case
that C{self} wraps. See L{unittest.TestCase.visit}.
Deprecated in Twisted 8.0.
"""
warnings.warn("Test visitors deprecated in Twisted 8.0",
category=DeprecationWarning)
visitor(self._test)
def __getattr__(self, name):
return getattr(self._test, name)
class DocTestCase(PyUnitTestCase):
"""
DEPRECATED in Twisted 8.0.
"""
def id(self):
"""
In Python 2.4, doctests have correct id() behaviour. In Python 2.3,
id() returns 'runit'.
Here we override id() so that at least it will always contain the
fully qualified Python name of the doctest.
"""
return self._test.shortDescription()
class TrialSuite(TestSuite):
"""
Suite to wrap around every single test in a C{trial} run. Used internally
by Trial to set up things necessary for Trial tests to work, regardless of
what context they are run in.
"""
def __init__(self, tests=()):
suite = LoggedSuite(tests)
super(TrialSuite, self).__init__([suite])
def _bail(self):
from twisted.internet import reactor
d = defer.Deferred()
reactor.addSystemEventTrigger('after', 'shutdown',
lambda: d.callback(None))
reactor.fireSystemEvent('shutdown') # radix's suggestion
# As long as TestCase does crap stuff with the reactor we need to
# manually shutdown the reactor here, and that requires util.wait
# :(
# so that the shutdown event completes
unittest.TestCase('mktemp')._wait(d)
def run(self, result):
try:
TestSuite.run(self, result)
finally:
self._bail()
def name(thing):
"""
@param thing: an object from modules (instance of PythonModule,
PythonAttribute), a TestCase subclass, or an instance of a TestCase.
"""
if isTestCase(thing):
# TestCase subclass
theName = reflect.qual(thing)
else:
# thing from trial, or thing from modules.
# this monstrosity exists so that modules' objects do not have to
# implement id(). -jml
try:
theName = thing.id()
except AttributeError:
theName = thing.name
return theName
def isTestCase(obj):
"""
@return: C{True} if C{obj} is a class that contains test cases, C{False}
otherwise. Used to find all the tests in a module.
"""
try:
return issubclass(obj, pyunit.TestCase)
except TypeError:
return False
class TestHolder(object):
"""
Placeholder for a L{TestCase} inside a reporter. As far as a L{TestResult}
is concerned, this looks exactly like a unit test.
"""
implements(ITestCase)
failureException = None
def __init__(self, description):
"""
@param description: A string to be displayed L{TestResult}.
"""
self.description = description
def __call__(self, result):
return self.run(result)
def id(self):
return self.description
def countTestCases(self):
return 0
def run(self, result):
"""
This test is just a placeholder. Run the test successfully.
@param result: The C{TestResult} to store the results in.
@type result: L{twisted.trial.itrial.ITestResult}.
"""
result.startTest(self)
result.addSuccess(self)
result.stopTest(self)
def shortDescription(self):
return self.description
class ErrorHolder(TestHolder):
"""
Used to insert arbitrary errors into a test suite run. Provides enough
methods to look like a C{TestCase}, however, when it is run, it simply adds
an error to the C{TestResult}. The most common use-case is for when a
module fails to import.
"""
def __init__(self, description, error):
"""
@param description: A string used by C{TestResult}s to identify this
error. Generally, this is the name of a module that failed to import.
@param error: The error to be added to the result. Can be an `exc_info`
tuple or a L{twisted.python.failure.Failure}.
"""
super(ErrorHolder, self).__init__(description)
self.error = util.excInfoOrFailureToExcInfo(error)
def __repr__(self):
return "<ErrorHolder description=%r error=%s%s>" % (
# Format the exception type and arguments explicitly, as exception
# objects do not have nice looking string formats on Python 2.4.
self.description, self.error[0].__name__, self.error[1].args)
def run(self, result):
"""
Run the test, reporting the error.
@param result: The C{TestResult} to store the results in.
@type result: L{twisted.trial.itrial.ITestResult}.
"""
result.startTest(self)
result.addError(self, self.error)
result.stopTest(self)
def visit(self, visitor):
"""
See L{unittest.TestCase.visit}.
"""
visitor(self)
class TestLoader(object):
"""
I find tests inside function, modules, files -- whatever -- then return
them wrapped inside a Test (either a L{TestSuite} or a L{TestCase}).
@ivar methodPrefix: A string prefix. C{TestLoader} will assume that all the
methods in a class that begin with C{methodPrefix} are test cases.
@ivar modulePrefix: A string prefix. Every module in a package that begins
with C{modulePrefix} is considered a module full of tests.
@ivar forceGarbageCollection: A flag applied to each C{TestCase} loaded.
See L{unittest.TestCase} for more information.
@ivar sorter: A key function used to sort C{TestCase}s, test classes,
modules and packages.
@ivar suiteFactory: A callable which is passed a list of tests (which
themselves may be suites of tests). Must return a test suite.
"""
methodPrefix = 'test'
modulePrefix = 'test_'
def __init__(self):
self.suiteFactory = TestSuite
self.sorter = name
self._importErrors = []
def sort(self, xs):
"""
Sort the given things using L{sorter}.
@param xs: A list of test cases, class or modules.
"""
return sorted(xs, key=self.sorter)
def findTestClasses(self, module):
"""Given a module, return all Trial test classes"""
classes = []
for name, val in inspect.getmembers(module):
if isTestCase(val):
classes.append(val)
return self.sort(classes)
def findByName(self, name):
"""
Return a Python object given a string describing it.
@param name: a string which may be either a filename or a
fully-qualified Python name.
@return: If C{name} is a filename, return the module. If C{name} is a
fully-qualified Python name, return the object it refers to.
"""
if os.path.exists(name):
return filenameToModule(name)
return reflect.namedAny(name)
def loadModule(self, module):
"""
Return a test suite with all the tests from a module.
Included are TestCase subclasses and doctests listed in the module's
__doctests__ module. If that's not good for you, put a function named
either C{testSuite} or C{test_suite} in your module that returns a
TestSuite, and I'll use the results of that instead.
If C{testSuite} and C{test_suite} are both present, then I'll use
C{testSuite}.
"""
## XXX - should I add an optional parameter to disable the check for
## a custom suite.
## OR, should I add another method
if not isinstance(module, types.ModuleType):
raise TypeError("%r is not a module" % (module,))
if hasattr(module, 'testSuite'):
return module.testSuite()
elif hasattr(module, 'test_suite'):
return module.test_suite()
suite = self.suiteFactory()
for testClass in self.findTestClasses(module):
suite.addTest(self.loadClass(testClass))
if not hasattr(module, '__doctests__'):
return suite
docSuite = self.suiteFactory()
for doctest in module.__doctests__:
docSuite.addTest(self.loadDoctests(doctest))
return self.suiteFactory([suite, docSuite])
loadTestsFromModule = loadModule
def loadClass(self, klass):
"""
Given a class which contains test cases, return a sorted list of
C{TestCase} instances.
"""
if not (isinstance(klass, type) or isinstance(klass, types.ClassType)):
raise TypeError("%r is not a class" % (klass,))
if not isTestCase(klass):
raise ValueError("%r is not a test case" % (klass,))
names = self.getTestCaseNames(klass)
tests = self.sort([self._makeCase(klass, self.methodPrefix+name)
for name in names])
return self.suiteFactory(tests)
loadTestsFromTestCase = loadClass
def getTestCaseNames(self, klass):
"""
Given a class that contains C{TestCase}s, return a list of names of
methods that probably contain tests.
"""
return reflect.prefixedMethodNames(klass, self.methodPrefix)
def loadMethod(self, method):
"""
Given a method of a C{TestCase} that represents a test, return a
C{TestCase} instance for that test.
"""
if not isinstance(method, types.MethodType):
raise TypeError("%r not a method" % (method,))
return self._makeCase(method.im_class, _getMethodNameInClass(method))
def _makeCase(self, klass, methodName):
return klass(methodName)
def loadPackage(self, package, recurse=False):
"""
Load tests from a module object representing a package, and return a
TestSuite containing those tests.
Tests are only loaded from modules whose name begins with 'test_'
(or whatever C{modulePrefix} is set to).
@param package: a types.ModuleType object (or reasonable facsimilie
obtained by importing) which may contain tests.
@param recurse: A boolean. If True, inspect modules within packages
within the given package (and so on), otherwise, only inspect modules
in the package itself.
@raise: TypeError if 'package' is not a package.
@return: a TestSuite created with my suiteFactory, containing all the
tests.
"""
if not isPackage(package):
raise TypeError("%r is not a package" % (package,))
pkgobj = modules.getModule(package.__name__)
if recurse:
discovery = pkgobj.walkModules()
else:
discovery = pkgobj.iterModules()
discovered = []
for disco in discovery:
if disco.name.split(".")[-1].startswith(self.modulePrefix):
discovered.append(disco)
suite = self.suiteFactory()
for modinfo in self.sort(discovered):
try:
module = modinfo.load()
except:
thingToAdd = ErrorHolder(modinfo.name, failure.Failure())
else:
thingToAdd = self.loadModule(module)
suite.addTest(thingToAdd)
return suite
def loadDoctests(self, module):
"""
Return a suite of tests for all the doctests defined in C{module}.
@param module: A module object or a module name.
"""
if isinstance(module, str):
try:
module = reflect.namedAny(module)
except:
return ErrorHolder(module, failure.Failure())
if not inspect.ismodule(module):
warnings.warn("trial only supports doctesting modules")
return
extraArgs = {}
if sys.version_info > (2, 4):
# Work around Python issue2604: DocTestCase.tearDown clobbers globs
def saveGlobals(test):
"""
Save C{test.globs} and replace it with a copy so that if
necessary, the original will be available for the next test
run.
"""
test._savedGlobals = getattr(test, '_savedGlobals', test.globs)
test.globs = test._savedGlobals.copy()
extraArgs['setUp'] = saveGlobals
return doctest.DocTestSuite(module, **extraArgs)
def loadAnything(self, thing, recurse=False):
"""
Given a Python object, return whatever tests that are in it. Whatever
'in' might mean.
@param thing: A Python object. A module, method, class or package.
@param recurse: Whether or not to look in subpackages of packages.
Defaults to False.
@return: A C{TestCase} or C{TestSuite}.
"""
if isinstance(thing, types.ModuleType):
if isPackage(thing):
return self.loadPackage(thing, recurse)
return self.loadModule(thing)
elif isinstance(thing, types.ClassType):
return self.loadClass(thing)
elif isinstance(thing, type):
return self.loadClass(thing)
elif isinstance(thing, types.MethodType):
return self.loadMethod(thing)
raise TypeError("No loader for %r. Unrecognized type" % (thing,))
def loadByName(self, name, recurse=False):
"""
Given a string representing a Python object, return whatever tests
are in that object.
If C{name} is somehow inaccessible (e.g. the module can't be imported,
there is no Python object with that name etc) then return an
L{ErrorHolder}.
@param name: The fully-qualified name of a Python object.
"""
try:
thing = self.findByName(name)
except:
return ErrorHolder(name, failure.Failure())
return self.loadAnything(thing, recurse)
loadTestsFromName = loadByName
def loadByNames(self, names, recurse=False):
"""
Construct a TestSuite containing all the tests found in 'names', where
names is a list of fully qualified python names and/or filenames. The
suite returned will have no duplicate tests, even if the same object
is named twice.
"""
things = []
errors = []
for name in names:
try:
things.append(self.findByName(name))
except:
errors.append(ErrorHolder(name, failure.Failure()))
suites = [self.loadAnything(thing, recurse)
for thing in self._uniqueTests(things)]
suites.extend(errors)
return self.suiteFactory(suites)
def _uniqueTests(self, things):
"""
Gather unique suite objects from loaded things. This will guarantee
uniqueness of inherited methods on TestCases which would otherwise hash
to same value and collapse to one test unexpectedly if using simpler
means: e.g. set().
"""
entries = []
for thing in things:
if isinstance(thing, types.MethodType):
entries.append((thing, thing.im_class))
else:
entries.append((thing,))
return [entry[0] for entry in set(entries)]
class DryRunVisitor(object):
"""
A visitor that makes a reporter think that every test visited has run
successfully.
"""
def __init__(self, reporter):
"""
@param reporter: A C{TestResult} object.
"""
self.reporter = reporter
def markSuccessful(self, testCase):
"""
Convince the reporter that this test has been run successfully.
"""
self.reporter.startTest(testCase)
self.reporter.addSuccess(testCase)
self.reporter.stopTest(testCase)
class TrialRunner(object):
"""
A specialised runner that the trial front end uses.
"""
DEBUG = 'debug'
DRY_RUN = 'dry-run'
def _getDebugger(self):
dbg = pdb.Pdb()
try:
import readline
except ImportError:
print "readline module not available"
sys.exc_clear()
for path in ('.pdbrc', 'pdbrc'):
if os.path.exists(path):
try:
rcFile = file(path, 'r')
except IOError:
sys.exc_clear()
else:
dbg.rcLines.extend(rcFile.readlines())
return dbg
def _setUpTestdir(self):
self._tearDownLogFile()
currentDir = os.getcwd()
base = filepath.FilePath(self.workingDirectory)
testdir, self._testDirLock = util._unusedTestDirectory(base)
os.chdir(testdir.path)
return currentDir
def _tearDownTestdir(self, oldDir):
os.chdir(oldDir)
self._testDirLock.unlock()
_log = log
def _makeResult(self):
reporter = self.reporterFactory(self.stream, self.tbformat,
self.rterrors, self._log)
if self.uncleanWarnings:
reporter = UncleanWarningsReporterWrapper(reporter)
return reporter
def __init__(self, reporterFactory,
mode=None,
logfile='test.log',
stream=sys.stdout,
profile=False,
tracebackFormat='default',
realTimeErrors=False,
uncleanWarnings=False,
workingDirectory=None,
forceGarbageCollection=False):
self.reporterFactory = reporterFactory
self.logfile = logfile
self.mode = mode
self.stream = stream
self.tbformat = tracebackFormat
self.rterrors = realTimeErrors
self.uncleanWarnings = uncleanWarnings
self._result = None
self.workingDirectory = workingDirectory or '_trial_temp'
self._logFileObserver = None
self._logFileObject = None
self._forceGarbageCollection = forceGarbageCollection
if profile:
self.run = util.profiled(self.run, 'profile.data')
def _tearDownLogFile(self):
if self._logFileObserver is not None:
log.removeObserver(self._logFileObserver.emit)
self._logFileObserver = None
if self._logFileObject is not None:
self._logFileObject.close()
self._logFileObject = None
def _setUpLogFile(self):
self._tearDownLogFile()
if self.logfile == '-':
logFile = sys.stdout
else:
logFile = file(self.logfile, 'a')
self._logFileObject = logFile
self._logFileObserver = log.FileLogObserver(logFile)
log.startLoggingWithObserver(self._logFileObserver.emit, 0)
def run(self, test):
"""
Run the test or suite and return a result object.
"""
test = unittest.decorate(test, ITestCase)
if self._forceGarbageCollection:
test = unittest.decorate(
test, unittest._ForceGarbageCollectionDecorator)
return self._runWithoutDecoration(test)
def _runWithoutDecoration(self, test):
"""
Private helper that runs the given test but doesn't decorate it.
"""
result = self._makeResult()
# decorate the suite with reactor cleanup and log starting
# This should move out of the runner and be presumed to be
# present
suite = TrialSuite([test])
startTime = time.time()
if self.mode == self.DRY_RUN:
for single in unittest._iterateTests(suite):
result.startTest(single)
result.addSuccess(single)
result.stopTest(single)
else:
if self.mode == self.DEBUG:
# open question - should this be self.debug() instead.
debugger = self._getDebugger()
run = lambda: debugger.runcall(suite.run, result)
else:
run = lambda: suite.run(result)
oldDir = self._setUpTestdir()
try:
self._setUpLogFile()
run()
finally:
self._tearDownLogFile()
self._tearDownTestdir(oldDir)
endTime = time.time()
done = getattr(result, 'done', None)
if done is None:
warnings.warn(
"%s should implement done() but doesn't. Falling back to "
"printErrors() and friends." % reflect.qual(result.__class__),
category=DeprecationWarning, stacklevel=3)
result.printErrors()
result.writeln(result.separator)
result.writeln('Ran %d tests in %.3fs', result.testsRun,
endTime - startTime)
result.write('\n')
result.printSummary()
else:
result.done()
return result
def runUntilFailure(self, test):
"""
Repeatedly run C{test} until it fails.
"""
count = 0
while True:
count += 1
self.stream.write("Test Pass %d\n" % (count,))
if count == 1:
result = self.run(test)
else:
result = self._runWithoutDecoration(test)
if result.testsRun == 0:
break
if not result.wasSuccessful():
break
return result
|
Varriount/Colliberation
|
libs/twisted/trial/runner.py
|
Python
|
mit
| 28,496
|
[
"VisIt"
] |
9a96c55ca7985abaaec214096520e32e70fda326bbfb1f72d978589521756d8a
|
class SynErr(Exception):
def __init__(self, *args, **info):
self.errinfo = info
Exception.__init__(self, self._getExcMsg())
def _getExcMsg(self):
props = sorted(self.errinfo.items())
displ = ' '.join(['%s=%r' % (p, v) for (p, v) in props])
return '%s: %s' % (self.__class__.__name__, displ)
def items(self):
return self.errinfo.items()
def get(self, name):
'''
Return a value from the errinfo dict.
Example:
try:
foothing()
except SynErr as e:
blah = e.get('blah')
'''
return self.errinfo.get(name)
class CliFini(SynErr):
'''
Raised when the CLI is to exit.
'''
pass
class Retry(SynErr): pass
class TxFull(Retry): pass
class NotReady(Retry): pass
class AuthDeny(SynErr): pass
class NoSuchMod(SynErr): pass
class NoModIden(SynErr): pass
class NoCertKey(SynErr): pass
class NoSuchAct(SynErr): pass
class NoSuchOpt(SynErr): pass
class NoSuchDir(SynErr): pass
class NoSuchDyn(SynErr): pass
class NoSuchSeq(SynErr): pass
class NoRevPath(SynErr): pass
class NoRevAllow(SynErr): pass
class NoSuchAlgo(SynErr): pass
class NoSuchConf(SynErr): pass
class NoSuchCtor(SynErr): pass
class NoSuchFifo(SynErr): pass
class NoSuchForm(SynErr): pass
class NoSuchHash(SynErr): pass
class NoSuchPath(SynErr): pass
class NoSuchStat(SynErr): pass
class NoSuchImpl(SynErr): pass
class NoSuchName(SynErr): pass
class NoSuchTufo(SynErr): pass
class NoSuchType(SynErr): pass
class NoSuchProp(SynErr): pass
class NoSuchOper(SynErr): pass
class NoSuchCmpr(SynErr): pass
class NoSuchCore(SynErr): pass
class NoSuchRule(SynErr): pass
class NoSuchIndx(SynErr): pass
class NoSuchGetBy(SynErr): pass
class NoSuchMembrane(SynErr): pass
class MembraneExists(SynErr): pass
class NoSuchDecoder(SynErr): pass
class NoSuchEncoder(SynErr): pass
class BadOperArg(SynErr): pass
class ReqConfOpt(SynErr): pass
class BadConfValu(SynErr):
'''
The configuration value provided is not valid.
This should contain the config name, valu and mesg.
'''
pass
class BadRevValu(SynErr): pass
class BadFifoSeq(SynErr): pass
class BadTypeValu(SynErr): pass
class DupTypeName(SynErr): pass
class DupPropName(SynErr): pass
class DupFileName(SynErr): pass
class DupIndx (SynErr): pass
class BadFileExt(SynErr): pass
class BadPropName(SynErr): pass
class BadCoreName(SynErr): pass
class BadCtorType(SynErr): pass
class BadMesgVers(SynErr): pass
class BadInfoValu(SynErr): pass
class BadStorValu(SynErr): pass
class BadRuleValu(SynErr): pass
class BadPropConf(SynErr):
'''
The configuration for the property is invalid.
'''
pass
class BadCoreStore(SynErr):
'''The storage layer has encountered an error'''
pass
class CantDelProp(SynErr): pass
class CantSetProp(SynErr): pass
class MustBeLocal(SynErr): pass
class MustBeProxy(SynErr): pass
class NoAuthUser(SynErr): pass
class WebAppErr(SynErr): pass
class BadUrl(Exception): pass
class BadJson(Exception): pass
class BadMesgResp(Exception): pass
class BadSpliceMesg(SynErr):
'''The splice message was invalid'''
pass
class BadPropValu(SynErr): pass
class BadPySource(Exception): pass
class BadRuleSyntax(SynErr): pass
class BadSyntaxError(SynErr): pass
class TeleClientSide(SynErr): pass
class HitStormLimit(SynErr): pass
class DupOpt(Exception): pass
class DupUserName(SynErr): pass
class DupRoleName(SynErr): pass
class IsRuntProp(SynErr): pass
class NoSuch(Exception): pass
class NoSuchJob(Exception): pass
class NoSuchObj(SynErr): pass
class NoSuchFile(SynErr): pass
class NoSuchIden(Exception): pass
class NoSuchMeth(SynErr): pass
class NoSuchFunc(SynErr): pass
class NoSuchPerm(SynErr): pass
class NoSuchPeer(Exception): pass
class NoSuchSess(Exception): pass
class NoSuchUser(SynErr): pass
class NoSuchRole(SynErr): pass
class NoSuchProto(Exception): pass
class NoInitCore(Exception): pass # API disabled because no cortex
class NoCurrSess(Exception): pass # API requires a current session
class SidNotFound(Exception): pass
class PropNotFound(SynErr): pass
class HitMaxTime(SynErr): pass
class HitMaxRetry(SynErr): pass
class HitCoreLimit(SynErr):
''' You've reached some limit of the storage layer.'''
pass
class NotEnoughFree(SynErr):
'''
There is not enough disk space free for the required operation.
'''
pass
class AxonErr(SynErr): pass
class AxonIsRo(AxonErr): pass
class AxonIsClone(AxonErr): pass
class AxonNotClone(AxonErr): pass
class AxonBadChunk(AxonErr): pass
class NoWritableAxons(SynErr):
'''
There are no writable axons available for the required operation.
'''
pass
class MustNotWait(Exception): pass # blocking function called by no-wait thread
class NoSuchEntity(SynErr): pass
class NoSuchData(SynErr): pass
class FileExists(SynErr): pass
class NotEmpty(SynErr): pass
class NotSupported(SynErr): pass
class NoCertKey(SynErr):
'''
Raised when a Cert object requires a RSA Private Key
to perform an operation and the key is not present.
'''
pass
class CellUserErr(SynErr):
'''
Exception raised by a CellUser
'''
pass
class BadAtomFile(SynErr):
'''
Raised when there is a internal issue with an atomfile.
'''
pass
class IsFini(SynErr): pass
class TimeOut(SynErr): pass
class CryptoErr(SynErr):
'''
Raised when there is a synapse.lib.crypto error.
'''
pass
class BadEccExchange(CryptoErr):
'''
Raised when there is an issue doing a ECC Key Exchange
'''
pass
class RetnErr(SynErr):
'''
Raised when a call using the retn convention has failed.
'''
def __init__(self, retn):
SynErr.__init__(self, excn=retn[0], **retn[1])
class StepTimeout(SynErr):
'''
Raised when a TestStep.wait() call times out.
'''
pass
class JobErr(Exception):
'''
Used for remote exception propagation.
'''
def __init__(self, job):
self.job = job
err = job[1].get('err')
errmsg = job[1].get('errmsg')
errfile = job[1].get('errfile')
errline = job[1].get('errline')
Exception.__init__(self, '%s: %s (%s:%s)' % (err, errmsg, errfile, errline))
class LinkTimeOut(SynErr): pass
# TODO: steal these names back for synapse/lib/net.py (and deprecate old users)
class LinkErr(SynErr):
retry = False
def __init__(self, link, mesg=''):
self.link = link
Exception.__init__(self, '%s %s' % (link[1].get('url'), mesg))
class LinkRefused(LinkErr):
retry = True
class LinkNotAuth(LinkErr): pass
class ProtoErr(SynErr):
'''
There's a network protocol failure (in neuron.Sess)
'''
pass
class CorruptDatabase(SynErr): pass
|
vivisect/synapse
|
synapse/exc.py
|
Python
|
apache-2.0
| 6,744
|
[
"NEURON"
] |
ef9625b0e7b96cf7f138afbc2ed4049aa67fd609d5ad0485c0a4fd8d806a1295
|
#copyright Jesse van Dam
import vcf
import inspect
import re
accessionMap = {}
mappingfile = open("mappingfile2accession.csv",'r')
first = True
for line in mappingfile:
if first:
first = False
continue
temp = line.strip().split('\t')
if len(temp) == 2:
accessionMap[temp[0]] = temp[1]
mappingfile.close()
rdf = open("snps.n3","w")
vcf_reader = vcf.Reader(open('tomatoExample.vcf', 'r'))
prefix = "http://pbr.wur.nl/VCF/"
refVersion = "Tomatov1"
isType = "<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>"
uniqId = {}
chromdone = {}
refId = "<" + prefix + refVersion + ">"
rdf.write(refId + " " + isType + " <" + prefix + "Reference> .\n")
rdf.write(refId + " <http://www.w3.org/1999/02/22-rdf-syntax-ns#comment> \"tomato reference assembly version 2.40(aka 2.31)\" .\n")
variant={}
subClassDef={}
firstRecord = True
for record in vcf_reader:
SNPidRaw = "http://pbr.wur.nl/VCF/" + str(refVersion) + "/chrom" + str(record.CHROM) + "/" + str(record.POS) + "/"
if not SNPidRaw in uniqId:
uniqId[SNPidRaw] = 0;
uniqId[SNPidRaw] = uniqId[SNPidRaw] + 1
SNPidRaw = SNPidRaw + str(uniqId[SNPidRaw])
SNPid = "<" + SNPidRaw + ">"
rdf.write(SNPid + " " + isType + " " + "<" + prefix + "SNP> . \n")
rdf.write(SNPid + " <" + prefix + "refSNP> \"" + record.REF + "\" . \n")
skip = True
for allel in record.alleles:
if skip:
skip = False
continue
rdf.write(SNPid + " <" + prefix + "allelySNP> \"" + str(allel) + "\" . \n")
#double position object are automaticly merged
posId = "<" + prefix + str(refVersion) + "/chrom" + str(record.CHROM) + "/" + str(record.POS) + ">"
rdf.write(SNPid + " <" + prefix + "position> " + posId + " . \n")
rdf.write(posId + " " + isType + " " + "<" + prefix + "Position> . \n")
rdf.write(posId + " <" + prefix + "location> \"" + str(record.POS) + "\"^^<http://www.w3.org/2001/XMLSchema#integer> . \n")
chromId = "<" + prefix + str(refVersion) + "/chrom" + str(record.CHROM) + ">"
rdf.write(posId + " <" + prefix + "chromoson> " + chromId + " . \n")
if not chromId in chromdone:
chromdone[chromId] = 1
rdf.write(chromId + " " + isType + " " + "<" + prefix + "Chromoson> . \n")
scaffold = "<http://pbr.wur.nl/SCAFFOLD#SL2.31ch"
if len(str(record.CHROM)) == 1:
scaffold = scaffold + "0"
#<http://pbr.wur.nl/SCAFFOLD#SL2.31ch00>
scaffold = scaffold + str(record.CHROM) + ">"
rdf.write(chromId + " <" + prefix + "scaffold> " + scaffold + " . \n")
rdf.write(chromId + " <" + prefix + "reference> " + refId + " . \n")
rdf.write("")
for sample in record.samples:
sampleId = re.search(".*/(.*)",sample.sample).group(1)
sampleIri = "<" + prefix + "sample/" + sampleId + ">"
SampleSNPId = SNPidRaw + sampleId
SampleSNPIri= "<" + SampleSNPId + ">"
if firstRecord:
rdf.write(sampleIri + " " + isType + " " + "<" + prefix + "Sample> . \n")
rdf.write(sampleIri + " <" + prefix + "accession> <http://purl.org/cgngenis/accenumb/" + accessionMap[sampleId] +"> . \n")
rdf.write(SNPid + " <" + prefix + "genotype> " + SampleSNPIri + " . \n")
rdf.write(SampleSNPIri + " " + isType + " " + "<" + prefix + "SNPGenotype> . \n")
rdf.write(SampleSNPIri + " <" + prefix + "sample> " + sampleIri + " . \n")
rdf.write(SampleSNPIri + " <" + prefix + "phased> \"" + str(sample.phased).lower() + "\"^^<http://www.w3.org/2001/XMLSchema#boolean> . \n")
#print(record)
if sample.gt_type:
count = 1;
for allel in sample.gt_alleles:
allelIri = "<" + SampleSNPId + "/allel_" + str(count) + ">"
rdf.write(SampleSNPIri + " <" + prefix + "allel> " + allelIri + " . \n")
rdf.write(allelIri + " " + isType + " " + "<" + prefix + "Allel> . \n")
rdf.write(allelIri + " <" + prefix + "allelNumber> \"" + str(count) + "\"^^<http://www.w3.org/2001/XMLSchema#integer> . \n")
rdf.write(allelIri + " <" + prefix + "variantAllel> \"" + str(record.alleles[int(allel)]) + "\" . \n")
count = count + 1
# rdf.write(SNPid + " <" + prefix + "genotype> " + SampleSNPId + " . \n")
firstRecord = False
count = 0
for info in record.INFO['EFF']:
match = re.search("(.*)\((.*\))",record.INFO['EFF'][0])
variantType = match.group(1)
data = match.group(2).split('|')
#print(match.group(1))
#if variantType == "missense_variant":
# <http://pbr.wur.nl/GENE#
variantClassType = "<" + prefix + variantType + "Annotation>"
if not variantType in subClassDef:
subClassDef[variantType] = False
rdf.write(variantClassType + " <http://www.w3.org/2000/01/rdf-schema#subClassOf> <" + prefix + "Annotation> . \n")
rdf.write(variantClassType + " " + isType + " <http://www.w3.org/2002/07/owl#Class> . \n")
infoIRI = "<" + SNPidRaw + "/annot_" + str(count) + ">"
rdf.write(infoIRI + " " + isType + " " + variantClassType +" . \n")
#rdf.write(SampleSNPIri + " <" + prefix + "allel> " + allelIri + " . \n")
rdf.write(SNPid + " <" + prefix + "annotation> " + infoIRI + " . \n")
print(variantType + str(data) + data[3])
geneName = data[8]
if geneName != "":
rdf.write(infoIRI + " <" + prefix + "gene> <http://pbr.wur.nl/GENE#" + geneName + "> . \n")
hgvs_P = data[3]
if hgvs_P != "":
rdf.write(infoIRI + " <" + prefix + "hgvs_P> \"" + hgvs_P + "\" . \n")
rdf.write(infoIRI + " <" + prefix + "importance> \"" + data[0] + "\" . \n")
count = count + 1
#print(dir(record.INFO['EFF'][0]))
SNPidRaw
rdf.write(SNPid + " <" + prefix + "refSNP> \"" + record.REF + "\" . \n")
# break
rdf.close()
print("done")
#for record in vcf_reader:
# print(record)
# fields = dir(record)
# for field,value in inspect.getmembers(record):
# if field.startswith("__"):
# continue;
# print(field + " : " + str(value))
# break
#ALT : [C]
#CHROM : 0
#FILTER : None
#FORMAT : GT:AD:DP:GQ:OG:PL
#ID : None
#INFO : {'NumGenotypesChanged': 0, 'BaseQRankSum': 0.874, 'MLEAC': [103], 'AN': 6, 'R2': 1.0, 'FS': 26.136, 'MLEAF': [0.725], 'HaplotypeScore': 2.4948, 'Dels': 0.0, 'MQRankSum': 2.235, 'MQ': 58.12, 'MQ0': 0, 'DP': 2520, 'AF': [0.667], 'EFF': ['intergenic_region(MODIFIER||||||||||1)'], 'ReadPosRankSum': -0.107, 'InbreedingCoeff': 0.9768, 'AC': [4], 'QD': 34.25}
#POS : 100944
#QUAL : 72819.21
#REF : T
#_sample_indexes : {'/ifshk5/PC_PA_EU/PMO/Tomato_reseq/01.BWA/SZAXPI009286-74': 2, '/ifshk5/PC_PA_EU/PMO/Tomato_reseq/01.BWA/SZAXPI009285-62': 1, '/ifshk5/PC_PA_EU/PMO/Tomato_reseq/01.BWA/SZAXPI009284-57': 0}
#aaf : [0.6666666666666666]
#add_filter : <bound method _Record.add_filter of <vcf.model._Record object at 0x7ffd3e401278>>
#add_format : <bound method _Record.add_format of <vcf.model._Record object at 0x7ffd3e401278>>
#add_info : <bound method _Record.add_info of <vcf.model._Record object at 0x7ffd3e401278>>
#alleles : ['T', C]
#call_rate : 1.0
#end : 100944
#genotype : <bound method _Record.genotype of <vcf.model._Record object at 0x7ffd3e401278>>
#get_hets : <bound method _Record.get_hets of <vcf.model._Record object at 0x7ffd3e401278>>
#get_hom_alts : <bound method _Record.get_hom_alts of <vcf.model._Record object at 0x7ffd3e401278>>
#get_hom_refs : <bound method _Record.get_hom_refs of <vcf.model._Record object at 0x7ffd3e401278>>
#get_unknowns : <bound method _Record.get_unknowns of <vcf.model._Record object at 0x7ffd3e401278>>
#heterozygosity : 0.4444444444444444
#is_deletion : False
##is_indel : False
#is_monomorphic : False
#is_snp : True
#is_sv : False
#is_sv_precise : False
#is_transition : True
#nucl_diversity : 0.5333333333333333
#num_called : 3
#num_het : 0
#num_hom_alt : 2
#num_hom_ref : 1
#num_unknown : 0
#samples : [Call(sample=/ifshk5/PC_PA_EU/PMO/Tomato_reseq/01.BWA/SZAXPI009284-57, CallData(GT=1|1, AD=[0, 32], DP=32, GQ=60, OG=None, PL=[1222, 96, 0])), Call(sample=/ifshk5/PC_PA_EU/PMO/Tomato_reseq/01.BWA/SZAXPI009285-62, CallData(GT=0|0, AD=[47, 0], DP=47, GQ=60, OG=None, PL=[0, 141, 1827])), Call(sample=/ifshk5/PC_PA_EU/PMO/Tomato_reseq/01.BWA/SZAXPI009286-74, CallData(GT=1|1, AD=[0, 31], DP=31, GQ=60, OG=None, PL=[1260, 93, 0]))]
#start : 100943
#sv_end : None
#var_subtype : ts
#var_type : snp
|
jessevdam/PBR_BYOD_WUR
|
main.py
|
Python
|
gpl-2.0
| 8,566
|
[
"BWA"
] |
6565440a21a2fb542999eed2be1f879b2fa062c7917f80d27013cbf6ace498d3
|
# Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from nose.plugins.skip import SkipTest
from rootpy.utils.silence import silence_sout
try:
with silence_sout():
from ROOT import (RooFit, RooRealVar, RooGaussian, RooArgusBG,
RooAddPdf, RooArgList, RooArgSet)
from rootpy.stats import mute_roostats; mute_roostats()
from rootpy.stats import Workspace
except ImportError:
raise SkipTest("ROOT is not compiled with RooFit and RooStats enabled")
from rootpy.io import TemporaryFile
from nose.tools import assert_false
def test_correlated_values():
try:
import uncertainties
except ImportError:
raise SkipTest("uncertainties package is not installed")
from rootpy.stats.correlated_values import correlated_values
# construct pdf and toy data following example at
# http://root.cern.ch/drupal/content/roofit
# --- Observable ---
mes = RooRealVar("mes", "m_{ES} (GeV)", 5.20, 5.30)
# --- Parameters ---
sigmean = RooRealVar("sigmean", "B^{#pm} mass", 5.28, 5.20, 5.30)
sigwidth = RooRealVar("sigwidth", "B^{#pm} width", 0.0027, 0.001, 1.)
# --- Build Gaussian PDF ---
signal = RooGaussian("signal", "signal PDF", mes, sigmean, sigwidth)
# --- Build Argus background PDF ---
argpar = RooRealVar("argpar", "argus shape parameter", -20.0, -100., -1.)
background = RooArgusBG("background", "Argus PDF",
mes, RooFit.RooConst(5.291), argpar)
# --- Construct signal+background PDF ---
nsig = RooRealVar("nsig", "#signal events", 200, 0., 10000)
nbkg = RooRealVar("nbkg", "#background events", 800, 0., 10000)
model = RooAddPdf("model", "g+a",
RooArgList(signal,background),
RooArgList(nsig,nbkg))
# --- Generate a toyMC sample from composite PDF ---
data = model.generate(RooArgSet(mes), 2000)
# --- Perform extended ML fit of composite PDF to toy data ---
fitresult = model.fitTo(data, RooFit.Save(), RooFit.PrintLevel(-1))
nsig, nbkg = correlated_values(["nsig", "nbkg"], fitresult)
# Arbitrary math expression according to what the `uncertainties`
# package supports, automatically computes correct error propagation
sum_value = nsig + nbkg
value, error = sum_value.nominal_value, sum_value.std_dev
workspace = Workspace(name='workspace')
# import the data
assert_false(workspace(data))
with TemporaryFile():
workspace.Write()
|
cms-btv-pog/rootpy
|
rootpy/stats/tests/test_correlated_values.py
|
Python
|
gpl-3.0
| 2,558
|
[
"Gaussian"
] |
a62d0a4c0441f58e0aea4c017c84e500515185e51a0c50a19bcb2bf2e76ae02a
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test for a specific race condition that used to stop a build cold when
a Node's ref_count would get decremented past 0 and "disappear" from
the Taskmaster's walk of the dependency graph.
Note that this test does not fail every time, but would at least fail
more than 60%-70% of the time on the system(s) I tested.
The rather complicated set up here creates a condition where,
after building four "object files" simultaneously while running with
-j4, sets up a race condition amongst the three dependencies of the
c6146/cpumanf.out file, where two of the dependencies are built at the
same time (that is, by the same command) and one is built separately.
We used to detect Nodes that had been started but not finished building
and just set the waiting ref_count to the number of Nodes. In this case,
if we got unlucky, we'd re-visit the Nodes for the two files first and
set the ref_count to two *before* the command that built individual node
completed and decremented the ref_count from two to one. Then when the
two files completed, we'd update the ref_count to 1 - 2 = -1, which would
cause the Taskmaster to *not* "wake up" the Node because it's ref_count
hadn't actually reached 0.
(The solution was to not set the ref_count, but to add to it only the
Nodes that were, in fact, added to the waiting_parents lists of various
child Nodes.)
"""
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('build.py', """\
import sys
import time
args = sys.argv[1:]
outputs = []
while args:
if args[0][0] != '-':
break
arg = args.pop(0)
if arg == '-o':
outputs.append(args.pop(0))
continue
if arg == '-s':
time.sleep(int(args.pop(0)))
contents = ''
for ifile in args:
contents = contents + open(ifile, 'rb').read()
for ofile in outputs:
ofp = open(ofile, 'wb')
ofp.write('%s: building from %s\\n' % (ofile, " ".join(args)))
ofp.write(contents)
ofp.close()
""")
#
#
#
test.write('SConstruct', """\
env = Environment()
cmd = r'%(_python_)s build.py -o $TARGET $SOURCES'
f1 = env.Command('c6416/cpumanf/file1.obj', 'file1.c', cmd)
f2 = env.Command('c6416/cpumanf/file2.obj', 'file2.c', cmd)
f3 = env.Command('c6416/cpumanf/file3.obj', 'file3.c', cmd)
f4 = env.Command('c6416/cpumanf/file4.obj', 'file4.c', cmd)
f5 = env.Command('c6416/cpumanf/file5.obj', 'file5.c', cmd)
f6 = env.Command('c6416/cpumanf/file6.obj', 'file6.c', cmd)
objs = f1 + f2 + f3 + f4 + f5 + f6
btc = env.Command('build/target/cpumanf.out', 'c6416/cpumanf.out', cmd)
addcheck_obj = env.Command('addcheck.obj', 'addcheck.c', cmd)
addcheck_exe = env.Command('addcheck.exe', addcheck_obj, cmd)
cmd2 = r'%(_python_)s build.py -s 2 -o ${TARGETS[0]} -o ${TARGETS[1]} $SOURCES'
cksums = env.Command(['c6416/cpumanf_pre_cksum.out',
'c6416/cpumanf_pre_cksum.map'],
objs,
cmd2)
cpumanf_out = env.Command('c6416/cpumanf.out',
cksums + addcheck_exe,
cmd)
cpumanf = env.Alias('cpumanf', objs + btc)
env.Command('after.out', cpumanf, r'%(_python_)s build.py -o $TARGET after.in')
""" % locals())
test.write('file1.c', "file1.c\n")
test.write('file2.c', "file2.c\n")
test.write('file3.c', "file3.c\n")
test.write('file4.c', "file4.c\n")
test.write('file5.c', "file5.c\n")
test.write('file6.c', "file6.c\n")
test.write('addcheck.c', "addcheck.c\n")
test.write('after.in', "after.in\n")
test.run(arguments = '-j4 after.out')
test.must_match('after.out', """\
after.out: building from after.in
after.in
""")
test.write('file5.c', "file5.c modified\n")
test.write('after.in', "after.in modified\n")
test.run(arguments = '-j4 after.out')
test.must_match('after.out', """\
after.out: building from after.in
after.in modified
""")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
timj/scons
|
test/Parallel/ref_count.py
|
Python
|
mit
| 5,115
|
[
"VisIt"
] |
817eaa72c3ce044a2d4791d4909ef31c3450bb6ea94c9aee22717fdc8a6e9a44
|
# -*- coding: cp1252 -*-
# Copyright (C) 2005 by Alex Duesel <alex@alex-duesel.de>
# homepage: http://www.mandarine.tv
# See file license.txt for licensing issues
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
slotmachine_main.py - Simulation of a state-of-the-art slotmachine
If you have ever wondered how a State-of-the-Art slotmachine works,
then this Python program is for you.
It features a complete state machine, bonus scheme and special rules of a commercial slotmachine.
Well actually I modified the rules in order to get a better payout rate :-]
I wrote this simulation after I quit my job in the casino games industry back in 2005.
It features various parameter options, even support for macros, see comments in source code.
Options:
-h : this help text
-r : TOTAL RISK Mode
-b : BLIND GAME
-a : Accept 40TS and Total Risk
-i : interactive mode do not use -b and -t options
-l : turn logging on in conjunction with -b, -r and -t
-n : number of games
-m : MACRO mode
Default option -i
Examples:
python slotmachine_main.py -r -l -n 3400 > output.txt
python slotmachine_main.py -b -n 1000
python slotmachine_main.py -i
python slotmachine_main.py -m a5[10]1 > ausgabe.txt
python slotmachine_main.py -t -l > test.txt
python slotmachine_main.py -t
slotmachine_main.py mini.py -l -z -r -n 100000 > test.txt
Hint: Hold 'Enter' key in interactive mode in order to speed run games
'''
import random
import string
import time
import os
import getopt
import sys
import math
from slotmachine_tables import *
# constants and variables
NUM_GAMES = 3400000
DEPOSIT = 20
EXTRAGAME_BOUNDS = 4
FOUR_ROW_MODE_BOUNDS = 100
INTERACTIVE = 0
LOGGING = 0
MACRO = 0
BLIND = 0
TOTAL = 0
TEST = 0
RECEIVING_40 = 0
ADVANCED_LOGGING = 1
TEXT_MODE = 0
RISK_FINISHED = 12
UNDEFINED = -1
COLUMN_WIN_COMBINATIONS = 0
COLUMN_NEXT_STATE = 1
COLUMN_MONEY = 2
COLUMN_SUPER = 3
COLUMN_TURBO = 4
COLUMN_FOUR_ROW = 5
COLUMN_EXTRA = 6
COLUMN_JACKPOT = 7
COLUMN_RANK = 8
COLUMN_SYMBOL = 9
DEFAULT = 255
ROLLERS = 'Normal Rotor-Game'
MONEY_TOTAL = "Summe of all money prizes"
SUPER_TOTAL = "Sum of all Super Games:"
TURBO_TOTAL = "Sum of all Turbo Games:"
FOUR_ROW_TOTAL = "Sum of all Four-Row Games:"
EXTRA_TOTAL = "Sum of all Extra Points:"
JPTOTAL = "Sum of all Jackpot Points:"
POWER_RUN_COUNTER = "Power Run Counter"
POWER_RUN_HITS = "Power Run Hits"
TURBO_RUN = "Turbo Run"
SUPER_RUN = "Super Run"
NO_HITS = 'Fail!'
LOG = 'Logging procedure'
END_OF_GAME = 'Game Over!'
ROWS = 'Winning Rows'
PROCESS = "Key Handling"
counter = {}
stats = {}
carry = {}
counter[EXTRA_POINT] = 0
counter[TURBO] = 0
counter[FOUR_ROW] = 0
counter[SUPER] = 0
counter[JP] = 1
counter[MONEY] = 2000
counter[MONEY_TOTAL] = 0
counter[EXTRA_TOTAL] = 0
counter[TURBO_TOTAL] = 0
counter[FOUR_ROW_TOTAL] = 0
counter[SUPER_TOTAL] = 0
counter[JPTOTAL] = 0
counter[POWER_RUN_COUNTER] = 0
counter[POWER_RUN_HITS] = 0
counter[EXTRA_PLUS_LEFT] = 0
counter[EXTRA_PLUS_RIGHT] = 0
counter[TURBO_RUN] = 0
counter[SUPER_RUN] = 0
counter[NO_HITS] = 0
carry[MONEY] = 0
carry[SUPER] = 0
carry[TURBO] = 0
carry[FOUR_ROW] = 0
carry[EXTRA_POINT] = 0
carry[JP] = 0
game_mode = NORMAL
winning_combinations = []
winning_combinations_stack = []
extra_double_jump = 0
in_run = 0
free_flg = 0
four_row_flg = 0
risk_accept_flg = 0
risk_start_flg = 0
num_games = 0
num_won = 0
num_risk_steps = 0
num_wheel = 0
num_draws = 0
num_jackpot = 0
risk_fail_flg = 0
risk_win_flg = 0
cheatrun = 0
cheatcode = 0
macro_list = []
picture = [ S20C, S20C, S20C,\
S20C, S20C, S20C ]
next_combination = ()
def get_turbo_plan():
return SUPER_PLAN
def get_normal_plan():
return NORMAL_PLAN
def get_turbol_plan():
if game_mode == SUPER and counter[SUPER] <= 19:
return TURBO_PLAN
else:
return SUPER_PLAN
BONUS_SCHEME = {
NORMAL : get_normal_plan,
TURBO : get_turbo_plan,
SUPER : get_turbol_plan,
FOUR_ROW: get_normal_plan
}
# cheatcodes
def get_cheatrun(cheatcode, picture):
'''... this procedure should be rather self-explanatory :-] ...'''
if cheatcode == 'a': return (S30C,S30C,S30C, \
S50C,S80C,S20C)
if cheatcode == 'b': return (S20C,S20C,S20C, \
S50C,S80C,S30C)
if cheatcode == 'c': return (JACK,JACK,JACK, \
S50C,S80C,S30C)
if cheatcode == 'd': return (S50C,S80C,S30C, \
S50C,S80C,S30C)
if cheatcode == 'e': return (S50C,S50C,S50C, \
S50C,S80C,S30C)
if cheatcode == 'f': return (S160C,S160C,S160C, \
S50C, S80C, S30C)
if cheatcode == 'g': return (S50C,S80C,S160C, \
S50C,S80C,S30C)
if cheatcode == 'h': return (S50C,S80C,JACK, \
S50C,S80C,S160)
if cheatcode == 'i': return (S50C,S80C,S3BAR, \
S80C,S50C,S1BAR)
if cheatcode == 'j': return (JACK,JACK,JACK, \
S80C,S50C,S1BAR)
if cheatcode == 'k': return (S50C,S80C,S7, \
S80C,S50C,S30C)
if cheatcode == 'l': return (S50C,S80C,S7, \
S50C,S80C,S30C)
if cheatcode == 'm': return (S80C,S50C,S80C, \
S50C,S80C,S30C)
if cheatcode == 'n': return (S50C, S80C, S30C, \
S160C,S160C,S160C)
if cheatcode == 'o': return (S2BAR, S2BAR, S2BAR, \
S160C,S160C,S160C)
if cheatcode == 'p': return (S7, S7, S7, \
S160C,S160C,S160C)
return picture
# rule base
def pre_rules(game_state, index):
'''... these special rules will be evaluated before a state change triggered by the state table ...'''
global extra_double_jump
global game_mode
global winning_combinations
global in_run
global free_flg
global four_row_flg
global risk_start_flg
global winning_line
# set game_mode { NORMAL | SUPER | TURBO | FOUR_ROW }
if counter[FOUR_ROW] >= FOUR_ROW_MODE_BOUNDS or four_row_flg:
game_mode = FOUR_ROW
four_row_flg = 1
if counter[TURBO] > 0:
game_mode = TURBO
counter[SUPER] = 0
elif counter[SUPER] > 0:
game_mode = SUPER
elif not four_row_flg:
game_mode = NORMAL
# reset free games mode
if len(winning_combinations) > 0:
counter[NO_HITS] = 0
free_flg = 0
# if we reach TURBO game mode we will change our SUPER games into TURBO games 1:1
if game_state == WIN and game_mode == TURBO and carry[SUPER] > 0:
if LOGGING or INTERACTIVE: print "Rule: Change Super-Games 1:1 into Turbo-Games"
carry[TURBO] = carry[SUPER]
carry[SUPER] = 0
# we let our Success-Run begin if we win SUPER or TURBO games for the first time
if game_state == WIN and (carry[TURBO] > 0 or carry[SUPER] > 0) \
and not in_run and counter[TURBO] == 0 and counter[SUPER] == 0:
if LOGGING or INTERACTIVE: print "Rule: Success-Run started!"
in_run = 1
counter[POWER_RUN_HITS] = 0
counter[SUPER_RUN] = 0
counter[TURBO_RUN] = 0
# restart Success-Run if we win TURBO games in SUPER games mode
if in_run and game_mode == SUPER and carry[TURBO] > 0 \
and game_state == WIN:
if LOGGING or INTERACTIVE: print "Rule: Restart Success-Run!"
counter[POWER_RUN_COUNTER] = 0
in_run = 1
counter[POWER_RUN_HITS] = 0
counter[SUPER_RUN] = 0
counter[TURBO_RUN] = 0
# count SUPER and TURBO games if we are in a Success-RUN
if game_state == WIN and in_run:
if carry[TURBO] > 0:
counter[TURBO_RUN]+=carry[TURBO]
elif carry[SUPER] > 0:
counter[SUPER_RUN]+=carry[SUPER]
# if not in FREE games mode charge money for one game
if counter[MONEY] > 20 and game_state == ROLLERS and not free_flg:
counter[MONEY]-=DEPOSIT
# set free games mod (well at the moment you can can get free games only when you do not win anything in 100 games)
# be careful this has a big impact on the payout rate
if counter[NO_HITS] >= 100:
if LOGGING or INTERACTIVE: print "Rule: Free Games running!"
free_flg = 1
# decrement FOUR-ROW games
if counter[FOUR_ROW] > 0 and game_state == ROLLERS and game_mode == FOUR_ROW:
counter[FOUR_ROW]-=1
# exit FOUR-ROW mode
if counter[FOUR_ROW]==0:
four_row_flg = 0
# decrement SUPER games
if counter[SUPER] > 0 and game_state == ROLLERS and game_mode == SUPER:
counter[SUPER]-=1
# exit Success-Run
if in_run and counter[SUPER] == 0:
in_run = 0
if counter[POWER_RUN_COUNTER] > 0:
counter[POWER_RUN_COUNTER] = 0
# decrement TURBO games
if counter[TURBO] > 0 and game_state == ROLLERS and game_mode == TURBO:
counter[TURBO]-=1
# exit Success-Run
if in_run and counter[TURBO] == 0:
in_run = 0
if counter[POWER_RUN_COUNTER] > 0:
counter[POWER_RUN_COUNTER] = 0
# calculate POWER-RUN when in SUPER games mode
if counter[SUPER] > 0 and game_mode == SUPER and in_run:
counter[POWER_RUN_COUNTER] = counter[POWER_RUN_HITS]-math.ceil(counter[SUPER_RUN]*0.5)
# calculate POWER-RUN when in TURBO games mode
if counter[TURBO] > 0 and game_mode == TURBO and in_run:
counter[POWER_RUN_COUNTER] = counter[POWER_RUN_HITS]-math.ceil(counter[TURBO_RUN]*0.75)
# win another TURBO game if POWER-RUN < 0
if game_mode == TURBO and counter[POWER_RUN_COUNTER] < 0 and counter[TURBO]==0:
if LOGGING or INTERACTIVE: print "Rule: Won another Turbo-Game for free"
counter[TURBO] = 1
# win another SUPER game if POWER-RUN < 0
if game_mode == SUPER and counter[POWER_RUN_COUNTER] < 0 and counter[SUPER]==0:
if LOGGING or INTERACTIVE: print "Rule: Won another Super-Game for free"
counter[SUPER] = 1
# win a Wheel of Fortune 60 draw
if counter[JP]==7 and (S50C, S50C, S50C) in winning_combinations:
if LOGGING or INTERACTIVE: print "Rule: Jackpot-Win a Wheel of Fortune 60 draw!"
counter[JP]=1
counter[EXTRA_POINT]+=3
counter[EXTRA_TOTAL]+=3
winning_combinations = []
return WHEEL60
# win a Wheel of Fortune 80 draw
elif counter[JP]==8 and (S160C, S160C, S160C) in winning_combinations:
if LOGGING or INTERACTIVE: print "Rule: Jackpot-Win a Wheel of Fortune 80 draw!"
counter[JP]=1
counter[EXTRA_POINT]+=4
counter[EXTRA_TOTAL]+=4
winning_combinations = []
return WHEEL80
## RISK-LINES
# no RISK lines when in blind game mode
if (game_state == RISK_L or game_state == RISK_R) and BLIND:
if LOGGING or INTERACTIVE: print "Rule: BLIND -> No risk line!"
if carry[SUPER] > 0 and counter[TURBO]>0:
if LOGGING or INTERACTIVE: print "Rule: Change Super-Games 1:1 into Turbo-Games"
carry[TURBO] = carry[SUPER]
carry[SUPER] = 0
return WIN
# no RISK lines when TURBO games or SUPER games counter > 200
if (counter[TURBO] > 200 or counter[SUPER] > 200) \
and (game_state == RISK_L or game_state == RISK_R):
if LOGGING or INTERACTIVE: print "Regel: Sonder > 150 -> Kein Risiko!"
if carry[SUPER] > 0:
if LOGGING or INTERACTIVE: print "Rule: Change Super-Games 1:1 into Turbo-Games"
carry[TURBO] = carry[SUPER]
carry[SUPER] = 0
return WIN
# if risk line right step 4 then execute a DRAW200 game
if index == 4 and game_state == RISK_R:
return DRAW200
# if risk line left step 4 then execute a DRAW300 game
elif index == 4 and game_state == RISK_L:
return DRAW300
# executea double jump in risk line
elif counter[EXTRA_POINT] > 0 and game_state == RISK_L and index == 5:
if LOGGING or INTERACTIVE: print "Rule: Double-Jump!"
extra_double_jump = 1
# executea double jump in risk line
elif counter[EXTRA_POINT] > 0 and game_state == RISK_R and index == 5:
if LOGGING or INTERACTIVE: print "Rule: Double-Jump!"
extra_double_jump = 1
# executea double jump in risk line
elif counter[EXTRA_POINT] > 0 and game_state == RISK_L and index == 7:
if LOGGING or INTERACTIVE: print "Rule: Double-Jump!"
extra_double_jump = 1
# executea double jump in risk line
elif counter[EXTRA_POINT] > 0 and game_state == RISK_R and index == 7:
if LOGGING or INTERACTIVE: print "Rule: Double-Jump!"
extra_double_jump = 1
# executea double jump in risk line
elif counter[EXTRA_POINT] > 0 and game_state == RISK_L and index == 9:
if LOGGING or INTERACTIVE: print "Rule: Double-Jump!"
extra_double_jump = 1
# executea double jump in risk line
elif counter[EXTRA_POINT] > 0 and game_state == RISK_R and index == 9:
if LOGGING or INTERACTIVE: print "Rule: Double-Jump!"
extra_double_jump = 1
# execute start draw when on risk line step
if risk_start_flg and game_state == RISK_L and index == 6:
risk_start_flg = 0
return START6S
# execute start draw when on risk line step
if risk_start_flg and game_state == RISK_L and index == 7:
risk_start_flg = 0
return START12S
# execute start draw when on risk line step
if risk_start_flg and game_state == RISK_L and index == 8:
risk_start_flg = 0
return START12T
# execute start draw when on risk line step
if risk_start_flg and game_state == RISK_L and index == 9:
risk_start_flg = 0
return START25T
# execute start draw when on risk line step
if risk_start_flg and game_state == RISK_L and index == 10:
risk_start_flg = 0
return START50T
# execute start draw when on risk line step
if risk_start_flg and game_state == RISK_R and index == 6:
risk_start_flg = 0
return START4S
# execute start draw when on risk line step
if risk_start_flg and game_state == RISK_R and index == 7:
risk_start_flg = 0
return START8S
# execute start draw when on risk line step
if risk_start_flg and game_state == RISK_R and index == 8:
risk_start_flg = 0
return START8T
# execute start draw when on risk line step
if risk_start_flg and game_state == RISK_R and index == 9:
risk_start_flg = 0
return START20T
# execute start draw when on risk line step
if risk_start_flg and game_state == RISK_R and index == 10:
risk_start_flg = 0
return START40T
# restart state
if counter[FOUR_ROW] >= FOUR_ROW_MODE_BOUNDS or four_row_flg:
game_mode = FOUR_ROW
four_row_flg = 1
if counter[TURBO] > 0:
game_mode = TURBO
counter[SUPER] = 0
elif counter[SUPER] > 0:
game_mode = SUPER
elif not four_row_flg:
game_mode = NORMAL
# default
return game_state
def post_rules(game_state, next_state, index):
'''... these special rules will be evaluated after a state change triggered by the state table ...'''
global winning_combinations_stack
global winning_combinations
global winning_line
# increment Jackpot-Points if RISK step accepted
if risk_accept_flg and game_state == RISK_R and next_state==WIN and index == 10:
if LOGGING or INTERACTIVE: print "Rule: Win Jackpot Points!"
counter[JP]+=1
counter[JPTOTAL]+=1
# if RISK step failed then evaluate next winning line (only in FOUR-ROW and NORMAL games)
if (game_state == RISK_L or game_state == RISK_R) and next_state == ROLLERS and \
len(winning_combinations_stack)>0 and \
(game_mode == NORMAL or game_mode==FOUR_ROW):
return NEXT_ROW
# another winning rule for Jackpot points
if game_state == DRAW3BAR and carry[TURBO] == 150 and next_state == WIN:
if LOGGING or INTERACTIVE: print "Rule: Win Jackpot Points!"
counter[JP]+=1
counter[JPTOTAL]+=1
# count FREE-GAMES mode games
if game_state == ROLLERS and len(winning_combinations)==0:
counter[NO_HITS]+=1
# split 150 SUPER games into 50 TURBO games + 1EXTRA Point
if (game_state==RISK_L or game_state==RISK_R or game_state.startswith("START") or \
game_state.startswith("FAIL")) \
and next_state == WIN and carry[TURBO] == 150:
if LOGGING or INTERACTIVE: print "Rule: Split into 50 TURBO games + 1EXTRA Point"
carry[TURBO] = 50
carry[EXTRA_POINT] = 1
# default
return next_state
def compare_picture_with_plan(picture, winning_lines, winning_plan):
'''... check if win in standard draw ...'''
global winning_combinations
i = 0
for row in winning_plan:
found = 1
j = 0
for symbol in row[COLUMN_WIN_COMBINATIONS]:
if(picture[winning_lines[j]]!=symbol and \
symbol!=JOKER):
found = 0
i+=1
break
j+=1
if found:
# hit!
return i, row[COLUMN_WIN_COMBINATIONS]
# no hit!
return UNDEFINED, UNDEFINED
def compare_draw_table(drawtable, rand_value):
'''... check if win in risk draw ...'''
prob = 0
index = 0
for wert in drawtable[PROPABILITY]:
prob += wert
if prob >= rand_value:
break
index+=1
if index < len(drawtable[PROPABILITY]):
# win!
return index
# no win!?
print "ERROR! No match in table found"
sys.exit(1)
def put_into_account(xtable, index, plan_row=None):
'''... put prize onto player's account ...'''
if plan_row==None:
# handle drawtable
prize = xtable[PRIZE][index]
typ = xtable[PRIZE_TYPE][index]
carry[typ] = prize
if prize > 0:
symbol = xtable[SYMBOLS][index]
carry[symbol] = 1
else:
# handle winning_plan put onto carry over
carry[MONEY] = plan_row[COLUMN_MONEY]
carry[SUPER] = plan_row[COLUMN_SUPER]
carry[TURBO] = plan_row[COLUMN_TURBO]
# instant win
counter[FOUR_ROW] += plan_row[COLUMN_FOUR_ROW]
counter[FOUR_ROW_TOTAL] += plan_row[COLUMN_FOUR_ROW]
counter[EXTRA_POINT] += plan_row[COLUMN_EXTRA]
counter[EXTRA_TOTAL] += plan_row[COLUMN_EXTRA]
counter[JP] += plan_row[COLUMN_JACKPOT]
counter[JPTOTAL] += plan_row[COLUMN_JACKPOT]
def win_route(statedict,name,index):
'''... puts carry overs onto counters ...'''
for z in carry.keys():
try:
counter[z] += carry[z]
except KeyError:
counter[z] = 0
counter[z] += carry[z]
counter[MONEY_TOTAL] += carry[MONEY]
counter[SUPER_TOTAL] += carry[SUPER]
counter[TURBO_TOTAL] += carry[TURBO]
counter[FOUR_ROW_TOTAL] += carry[FOUR_ROW]
counter[EXTRA_TOTAL] += carry[EXTRA_POINT]
counter[JPTOTAL] += carry[JP]
return ROLLERS, DEFAULT
def draw_process(statedict, name,index):
'''... random normal draw ...'''
rand_value = random.randint(1, 1000)
index = compare_draw_table(statedict[name][TABLE],rand_value)
if index != UNDEFINED:
if (MACRO or TEST) and (name == DRAW200 or name == DRAW300 \
or name == DRAW20 or name == DRAW30):
index= 0
# prize
put_into_account(statedict[name][TABLE], index)
return statedict[name][TABLE][NEXT_STATE][index], index
# should not happen
print "ERROR! no matchin value found"
sys.exit(1)
def risk_process(statedict, name,index):
'''... random risk draw ...'''
global risk_win_flg
global risk_fail_flg
global risk_accept_flg
if risk_accept_flg:
# accept risk step
put_into_account(statedict[name][TABLE],index)
return WIN, index
# step up
index+=1
if (random.randint(0, 1) or risk_win_flg) \
and not risk_fail_flg:
# prize
put_into_account(statedict[name][TABLE],index)
if index >= RISK_FINISHED:
# reached final step
return WIN, DEFAULT
# next step
return name, index
# risk failed!
if game_mode == NORMAL and index>=9 and index<=12:
# failed in top area win FOUR-ROW game
counter[FOUR_ROW]+=1
counter[FOUR_ROW_TOTAL]+=1
if index >= RISK_FINISHED:
# fail safe
if name == RISK_L:
return FAIL50, DEFAULT
else:
return FAIL40, DEFAULT
# normal fail
return ROLLERS, DEFAULT
def extra_risk_process(statedict, name,index):
'''... risk draw when EXTRA points on player's account ...'''
global risk_win_flg
global risk_fail_flg
global risk_accept_flg
if risk_accept_flg:
# accept
put_into_account(statedict[name][TABLE],index)
return WIN, index
# double setpup
index +=2
if(random.randint(1, 100)>30 or risk_win_flg) \
and not risk_fail_flg:
# prize
put_into_account(statedict[name][TABLE],index)
if index >= RISK_FINISHED-1:
# decrement EXTRA_POINT s
counter[EXTRA_POINT]-=1
return WIN, DEFAULT
# next step
return name, index
# fail
if game_mode == NORMAL and index>=9 and index<=12:
# failed in top area win FOUR-ROW game
counter[FOUR_ROW]+=1
counter[FOUR_ROW_TOTAL]+=1
return ROLLERS, DEFAULT
def extra_plus_risk_process(statedict, name,index):
'''... risk draw when extra_plus counters > 0 ...'''
global risk_win_flg
global risk_fail_flg
global risk_accept_flg
if risk_accept_flg:
# accept
put_into_account(statedict[name][TABLE],index)
return WIN, index
# stepup
index +=1
if(random.randint(1, 100)>30 or risk_win_flg) \
and not risk_fail_flg:
# prize
put_into_account(statedict[name][TABLE],index)
if name == RISK_L:
# decrement extra_plus counter
if index == counter[EXTRA_PLUS_LEFT]:
counter[EXTRA_PLUS_LEFT]-=1
else:
# decrement extra_plus counter
if index == counter[EXTRA_PLUS_RIGHT]:
counter[EXTRA_PLUS_RIGHT]-=1
# next step
return name, index
# fail
return ROLLERS, DEFAULT
def risk_dispatch_process(statedict, name,index):
'''... dispatches into various risk draw modes ...'''
global extra_double_jump
if counter[EXTRA_PLUS_LEFT] and name == RISK_L \
and index < counter[EXTRA_PLUS_LEFT]:
if (LOGGING or INTERACTIVE) and ADVANCED_LOGGING:
print "70% Risk Chance!"
return extra_plus_risk_process(statedict, name,index)
elif counter[EXTRA_PLUS_RIGHT] and name == RISK_R \
and index < counter[EXTRA_PLUS_RIGHT]:
if (LOGGING or INTERACTIVE) and ADVANCED_LOGGING:
print "70% Risk Chance!"
return extra_plus_risk_process(statedict, name,index)
elif extra_double_jump:
extra_double_jump = 0
if (LOGGING or INTERACTIVE) and ADVANCED_LOGGING:
print "70% Risk Chance!"
return extra_risk_process(statedict, name,index)
else:
if (LOGGING or INTERACTIVE) and ADVANCED_LOGGING:
print "50% Risk Chance!"
return risk_process(statedict, name,index)
def normal_process(statedict, name,index):
'''... normal rotor game ...'''
global winning_combinations
global winning_combinations_stack
global in_run
global picture
global restart_flg
global cheatrun
global winning_line
# draw
rotorpos = [0,0,0,0,0,0]
for i in range(len(rotorpos)):
rotorpos[i] = random.randint( 0, len(CYLINDER[i%3])-1)
picture = [ CYLINDER[0][rotorpos[0]], CYLINDER[1][rotorpos[1]], CYLINDER[2][rotorpos[2]],
CYLINDER[0][rotorpos[3]], CYLINDER[1][rotorpos[4]], CYLINDER[2][rotorpos[5]]]
# cheating
if cheatrun:
picture = get_cheatrun(cheatcode, picture)
rotorpos[0] = list(CYLINDER[0]).index(picture[0])
rotorpos[1] = list(CYLINDER[1]).index(picture[1])
rotorpos[2] = list(CYLINDER[2]).index(picture[2])
rotorpos[3] = list(CYLINDER[0]).index(picture[3])
rotorpos[4] = list(CYLINDER[1]).index(picture[4])
rotorpos[5] = list(CYLINDER[2]).index(picture[5])
# restart rotors
winsymbole = (S1BAR, S2BAR,S3BAR,S7,JACK)
if picture[TOP_LEFT] not in winsymbole and \
picture[BOTTOM_LEFT] not in winsymbole and not cheatrun:
n1 = random.randint( 0, len(CYLINDER[0])-1)
n2 = random.randint( 0, len(CYLINDER[0])-1)
picture[TOP_LEFT] = CYLINDER[0][n1]
picture[BOTTOM_LEFT] = CYLINDER[0][n2]
restart_flg = 1
cheatrun = 0
# evaluation
winning_plan = statedict[name][TABLE][game_mode]()
wiin_rows = statedict[name][ROWS][game_mode]
max_rang = UNDEFINED
winning_combinations = []
winning_combinations_stack = []
idx = 0
for row in wiin_rows:
i, kombi = compare_picture_with_plan(picture, row, winning_plan)
rang = UNDEFINED
if i != UNDEFINED:
rang = winning_plan[i][COLUMN_RANK]
winning_combinations_stack.append((rang,i,row))
winning_combinations.append(kombi)
if rang > max_rang:
max_rang = rang
winning_line = row
idx = i
# hit!
if len(winning_combinations)>0:
# Success-Run!
if in_run:
counter[POWER_RUN_HITS]+=1
if counter[POWER_RUN_COUNTER] < 0:
counter[POWER_RUN_COUNTER]+=1
elif counter[POWER_RUN_COUNTER] == 0:
in_run = 0
counter[POWER_RUN_HITS] = 0
counter[SUPER_RUN] = 0
counter[TURBO_RUN] = 0
# sort winning combinations
winning_combinations_stack.sort()
winning_combinations_stack.reverse()
# put_into_account
if max_rang != UNDEFINED:
# pop stack
winning_combinations_stack = winning_combinations_stack[1:]
# put_into_account and set next step
put_into_account(statedict[name][TABLE], idx, winning_plan[idx])
return winning_plan[idx][COLUMN_NEXT_STATE], idx
# no hit
return ROLLERS, DEFAULT
def jackpot_route(statedict, name,index):
'''... won jackpot: 2 EXTRA_POINT points and wheel of fortune 40'''
counter[JP]=1
counter[EXTRA_POINT]+=2
counter[EXTRA_TOTAL]+=2
return WHEEL40, DEFAULT
def extra_plus_process(statedict, name,index):
'''... increment extra plus counters and win 10C ...'''
if counter[EXTRA_PLUS_LEFT] < EXTRAGAME_BOUNDS or \
counter[EXTRA_PLUS_RIGHT] < EXTRAGAME_BOUNDS:
if counter[EXTRA_PLUS_LEFT] > counter[EXTRA_PLUS_RIGHT]:
counter[EXTRA_PLUS_RIGHT]+=1
else:
counter[EXTRA_PLUS_LEFT]+=1
carry[MONEY] = 10
return WIN, DEFAULT
def next_row_process(statedict, name,index):
'''... if more then one winning row check for the next one ...'''
global winning_combinations_stack
global next_combination
global naechste_linie
if len(winning_combinations_stack) > 0:
idx = winning_combinations_stack[0][1]
naechste_linie = winning_combinations_stack[0][2]
winning_combinations_stack = winning_combinations_stack[1:]
winning_plan = statedict[name][TABLE][game_mode]()
next_combination = winning_plan[idx][COLUMN_WIN_COMBINATIONS]
put_into_account(statedict[name][TABLE], idx, winning_plan[idx])
return winning_plan[idx][COLUMN_NEXT_STATE], idx
else:
# should not happen
print "ERROR: There is no next winning row!!!"
sys.exit(1)
# logging
def print_head():
'''... print counters ...'''
print "-------------------------------------------------------------"
print "Counters:"
c = (JP, TURBO, SUPER, FOUR_ROW, MONEY, EXTRA_POINT, EXTRA_PLUS_LEFT, EXTRA_PLUS_RIGHT, POWER_RUN_COUNTER)
for z in counter.keys():
if counter[z]!=0 and z in c:
print z, counter[z]
print "-------------------------------------------------------------"
def print_bottom():
''' ... print carry overs ...'''
for z in carry.keys():
if carry[z]>0:
print z, carry[z]
def log_normal(frame_nr, statedict, name, index):
'''... normal rotor game has been finished print result ...'''
global num_games
global picture
num_games+=1
if LOGGING or INTERACTIVE:
print "#############################################################"
print "Game No.: %s is a %s" % (str(frame_nr),name)
print "Mode: ",game_mode
if ADVANCED_LOGGING:
print "Rotor Picture:"
print picture[:3]
print picture[3:]
if len(winning_combinations)>0:
print "Winning combinations:"
for prize in winning_combinations:
print prize
print_head()
print_bottom()
def log_draw(frame_nr, statedict, name, index):
'''... risk draw has been finished ...'''
global num_draws
num_draws+=1
stats[name][statedict[name][TABLE][SYMBOLS][index]]+=1
if LOGGING or INTERACTIVE:
print "Game No. %s features a %s:" % (str(frame_nr), name)
print_bottom()
def log_wheel(frame_nr, statedict, name, index):
'''... wheel of fortune has been finsihed ...'''
global num_wheel
num_wheel+=1
stats[name][statedict[name][TABLE][SYMBOLS][index]]+=1
if LOGGING or INTERACTIVE:
print "Game No.: %s features a %s:" % (str(frame_nr), name)
print_bottom()
def log_risiko(frame_nr, statedict, name, index):
'''... risk step has been evaluated ...'''
global num_risk_steps
num_risk_steps+=1
if LOGGING or INTERACTIVE:
if not risk_accept_flg:
print "Risk accepted"
else:
print "Risk declined, price accepted!"
print_bottom()
def log_win(frame_nr, statedict, name, index):
'''... win!!!! ....'''
global num_won
num_won+=1
if LOGGING or INTERACTIVE:
print "Game No. %s wins! Prize will be put onto account!" % str(frame_nr)
def log_jackpot(frame_nr, statedict, name, index):
'''... Jackpot win!!! ...'''
global num_jackpot
num_jackpot+=1
if LOGGING or INTERACTIVE:
print "Game No. %s features a JACKPOT!!!" % (str(frame_nr))
print_bottom()
def log_srplus(frame_nr, statedict, name, index):
'''... extra point plus has been won ...'''
if LOGGING or INTERACTIVE:
print "EXTRA_POINT-Plus won!!!"
print_bottom()
def log_next_row(frame_nr, statedict, name, index):
'''... risk failed check next row ...'''
global next_combination
if LOGGING or INTERACTIVE:
print "There is another winning row:"
print next_combination
print_bottom()
# key handling and print game ro console
def handle_keys_textmode(state, index):
global risk_fail_flg
global risk_win_flg
global risk_accept_flg
global cheatrun
global cheatcode
global macro_list
global gfx_toggle
global TEXT_MODE
risk_fail_flg = 0
risk_win_flg = 0
risk_accept_flg = 0
cheatrun = 0
if state != ROLLERS and state != RISK_L and state != RISK_R:
return state
if (state==RISK_L or state==RISK_R) and index==4:
return state
if INTERACTIVE:
# interactive user input
strg = "Next State: %s \nInput: " % state
xinput = raw_input(strg)
if state == ROLLERS:
try:
if xinput.startswith("SUPER "):
i = int(xinput[7:])
carry[SUPER]=i
return WIN
elif xinput.startswith("TURBO "):
i = int(xinput[4:])
carry[TURBO]=i
return WIN
elif xinput.startswith("EXTRA "):
i = int(xinput[3:])
carry[EXTRA_POINT]=i
return WIN
elif xinput.startswith("JACKS "):
i = int(xinput[3:])
carry[JP]=i
return WIN
elif xinput.startswith("MONEY "):
i = int(xinput[5:])
carry[MONEY]=i
return WIN
except:
pass
print " "
if xinput == "q":
sys.exit()
if state == RISK_L or state == RISK_R:
if xinput == "1": risk_fail_flg = 1
elif xinput == "5": risk_win_flg = 1
elif xinput == "4": risk_accept_flg = 1
elif state == ROLLERS:
cheatrun = 1
cheatcode = xinput
elif MACRO:
# handle macro string
if len(macro_list)==0: return END_OF_GAME
xinput = macro_list[0]
if LOGGING:
strg = "Next state: %s \nInput: %s " % (state, xinput)
print strg
macro_list = macro_list[1:]
if state == RISK_L or state == RISK_R:
if xinput == "1": risk_fail_flg = 1
elif xinput == "5": risk_win_flg = 1
elif xinput == "4": risk_accept_flg = 1
elif state == ROLLERS:
cheatrun = 1
cheatcode = xinput
else:
# no interaction loop game
if state == RISK_R and index == 10 and RECEIVING_40:
# always decline risk draw
risk_accept_flg = 1
return state
# state dispatch table
STATES = {
ROLLERS :
{ TABLE : BONUS_SCHEME,
ROWS : WINNING_ROWS,
ROUTE : normal_process,
LOG : log_normal
},
DRAW20 :
{ TABLE : DRAW_20,
ROUTE : draw_process,
LOG : log_draw
},
DRAW30 :
{ TABLE : DRAW_30,
ROUTE : draw_process,
LOG : log_draw
},
DRAW200 :
{ TABLE : DRAW_200,
ROUTE : draw_process,
LOG : log_draw
},
DRAW300 :
{ TABLE : DRAW_300,
ROUTE : draw_process,
LOG : log_draw
},
DRAW2S :
{ TABLE : DRAWS2S,
ROUTE : draw_process,
LOG : log_draw
},
DRAW3S :
{ TABLE : DRAWS3S,
ROUTE : draw_process,
LOG : log_draw
},
FAIL40 :
{ TABLE : DRAW_FAIL_40,
ROUTE : draw_process,
LOG : log_wheel
},
FAIL50 :
{ TABLE : DRAW_FAIL_50,
ROUTE : draw_process,
LOG : log_wheel
},
WHEEL40 :
{ TABLE : WHEEL_40,
ROUTE : draw_process,
LOG : log_wheel
},
WHEEL60 :
{ TABLE : WHEEL_60,
ROUTE : draw_process,
LOG : log_wheel
},
WHEEL80 :
{ TABLE : WHEEL_80,
ROUTE : draw_process,
LOG : log_wheel
},
DRAW3BAR :
{ TABLE : DRAWS3BAR,
ROUTE : draw_process,
LOG : log_wheel
},
DRAWGOLD :
{ TABLE : DRAW_GOLD,
ROUTE : draw_process,
LOG : log_wheel
},
DRAW3X7 :
{ TABLE : DRAW_TRIPEL7,
ROUTE : draw_process,
LOG : log_wheel
},
RISK_R :
{ TABLE : RISK_RIGHT,
ROUTE : risk_dispatch_process,
LOG : log_risiko
},
RISK_L :
{ TABLE : RISK_LEFT,
ROUTE : risk_dispatch_process,
LOG : log_risiko
},
JACKPOT :
{ ROUTE : jackpot_route,
LOG : log_jackpot
},
WIN :
{ ROUTE : win_route,
LOG : log_win
},
EXTRA_PLUS :
{ ROUTE : extra_plus_process,
LOG : log_srplus
},
NEXT_ROW :
{ TABLE : BONUS_SCHEME,
ROWS : WINNING_ROWS,
ROUTE : next_row_process,
LOG : log_next_row
},
START4S :
{ TABLE : WHEEL_START4S,
ROUTE : draw_process,
LOG : log_wheel
},
START8S :
{ TABLE : WHEEL_START8S,
ROUTE : draw_process,
LOG : log_wheel
},
START8T :
{ TABLE : WHEEL_START8T,
ROUTE : draw_process,
LOG : log_wheel
},
START20T :
{ TABLE : WHEEL_START20T,
ROUTE : draw_process,
LOG : log_wheel
},
START40T :
{ TABLE : WHEEL_START40T,
ROUTE : draw_process,
LOG : log_wheel
},
START6S :
{ TABLE : WHEEL_START6S,
ROUTE : draw_process,
LOG : log_wheel
},
START12S :
{ TABLE : WHEEL_START12S,
ROUTE : draw_process,
LOG : log_wheel
},
START12T :
{ TABLE : WHEEL_START12T,
ROUTE : draw_process,
LOG : log_wheel
},
START25T :
{ TABLE : WHEEL_START25T,
ROUTE : draw_process,
LOG : log_wheel
},
START50T :
{ TABLE : WHEEL_START50T,
ROUTE : draw_process,
LOG : log_wheel
}
}
def get_risk_step(state, next_state, win_index):
'''... determins current risk step ...'''
if (next_state == RISK_L):
if state == ROLLERS or state == NEXT_ROW:
symbol = STATES[state][TABLE][game_mode]()\
[win_index][COLUMN_SYMBOL]
if symbol != NOPE:
return RISK_LEFT[SYMBOLS].index(symbol)
else:
return win_index
else:
return RISK_LEFT[SYMBOLS].\
index(STATES[state][TABLE][SYMBOLS][win_index])
elif (next_state == RISK_R):
if state == ROLLERS or state == NEXT_ROW:
symbol = STATES[state][TABLE][game_mode]()\
[win_index][COLUMN_SYMBOL]
if symbol != NOPE:
return RISK_RIGHT[SYMBOLS].index(symbol)
else:
return win_index
else:
return RISK_RIGHT[SYMBOLS].\
index(STATES[state][TABLE][SYMBOLS][win_index])
else:
return win_index
def main():
'''game loop'''
global di
global do
global winning_line
global anim_einlauf_flg
global anim_risiko_anbieten_flg
global rad_done
global tick_mark
global anim_abbruch
state = ROLLERS
state_old = ROLLERS
tick = 1
ze = 0
game_cnt = 1
flg = 1
index = UNDEFINED
old_index = UNDEFINED
events = []
while (state != END_OF_GAME and game_cnt < NUM_GAMES+1):
# process pre rules
state = pre_rules(state, index)
# handle keys
state = handle_keys_textmode(state, index)
# game over
if state == END_OF_GAME:
break
# delete carry over counters
if state != WIN:
for z in carry.keys():
carry[z] = 0
# determine next state
next_state, index = STATES[state][ROUTE] \
(STATES,state,index)
# process post rules
next_state = post_rules \
(state, next_state, index)
# logging
STATES[state][LOG](game_cnt,STATES,state,index)
# set risk step
old_step = index
index = get_risk_step(state, next_state, index)
# count games
if next_state == ROLLERS:
game_cnt+=1
# set next state
old_state = state
old_index = old_step
state = next_state
tick+=1
def print_stats():
'''... prints statistics ...'''
quote = counter[MONEY_TOTAL]*100.0/(NUM_GAMES*DEPOSIT)
print "--------------------------------------------------------"
print "Rotor games: ", num_games
print "--------------------------------------------------------"
print "--------------------------------------------------------"
print "Payout Rate: %5.2f" % quote
print "--------------------------------------------------------"
print "--------------------------------------------------------"
print "Occurrence of symbols:"
print "--------------------------------------------------------"
print " "
sortiert = stats.keys()
sortiert.sort()
for name in sortiert:
print name
print "--------------------------------------------------------"
cash = 0
top = 0
master = 0
for symbol in stats[name]:
i = STATES[name][TABLE][SYMBOLS].index(symbol)
a = stats[name][symbol]
summe = a*STATES[name][TABLE][PRIZE][i]
wahr = STATES[name][TABLE][PROPABILITY][i]
print symbol+(20-len(symbol))*" ", "mal", \
str(a)+(10-len(str(a)))*" ","Wahrsch.", wahr
if symbol[-1]=="C": cash+=summe
if symbol[-1]=="T": top+=summe
if symbol[-1]=="M": master+=summe
print "--------------------------------------------------------"
print "Sum of Turbo-Games:",top," Super-Games:",master," Money:",cash
print " "
print "--------------------------------------------------------"
print "--------------------------------------------------------"
print "Sum of all draws: ", num_draws
print "--------------------------------------------------------"
print "--------------------------------------------------------"
print "Sum of all prizes: ", num_won
print "--------------------------------------------------------"
print "--------------------------------------------------------"
print "Sum of all wheels of forutune: ", num_wheel
print "--------------------------------------------------------"
print "--------------------------------------------------------"
print "All counters:"
print "--------------------------------------------------------"
summen = []
sortiert = counter.keys()
sortiert.sort()
for z in sortiert:
if not z.startswith("Sum"):
print z+(20-len(z))*" ", "Value:", counter[z]
print "--------------------------------------------------------"
else:
summen.append(z)
print "--------------------------------------------------------"
print "Sum of all prizes"
print "--------------------------------------------------------"
for s in summen:
print s, counter[s]
print "--------------------------------------------------------"
print "--------------------------------------------------------"
print "Payout Rate: %5.2f" % quote
print "--------------------------------------------------------"
def create_inner_string(strg):
inner = ''
int_puff = ""
i = 0
do_int = 0
c = ''
while i < len(strg):
if (strg[i] != '[' and strg[i] != ']'\
and not do_int):
c = strg[i]
if strg[i] == '[':
do_int = 1
elif strg[i] == ']':
do_int = 0
x = int(int_puff)-1
inner+=x*c
int_puff = ''
c = ''
elif do_int:
int_puff+=strg[i]
elif not do_int:
inner+=c
i+=1
return inner
def create_macro(strg):
'''... creates code for macro mode ...'''
tokens = string.split(strg, 'X')
i = 0
result = ''
try:
if len(tokens) > 1:
if len(tokens)%2==0:
maxi = len(tokens)-1
else:
maxi = len(tokens)-2
while i < maxi:
result+=create_inner_string(tokens[i])*int(tokens[i+1])
i+=2
if maxi == len(tokens)-2:
result+=create_inner_string(tokens[-1])
else:
result = create_inner_string(tokens[0])
except:
print "ERROR: Macro string does not fit!!!"
sys.exit(1)
return result
exclude = (ROLLERS, WIN, JACKPOT, RISK_R, RISK_L, EXTRA_PLUS, NEXT_ROW)
for name in STATES.keys():
if name not in exclude:
stats[name] = {}
for name in stats.keys():
for symbol in STATES[name][TABLE][SYMBOLS]:
stats[name][symbol] = 0
# command line parameters
if __name__ == '__main__':
try:
opts,args = getopt.getopt(sys.argv[1:],"abrtlim:n:xz")
for o,v in opts:
if o == '-b':
if MACRO or TOTAL or RECEIVING_40 or INTERACTIVE or TEST:
print "ERROR: Option conflict!!!"
sys.exit(1)
BLIND = 1
TEXT_MODE=1
print "Blind game mode active...\n"
elif o == '-r':
if BLIND or MACRO or INTERACTIVE or RECEIVING_40 or TEST:
print "ERROR: Option conflict!!!"
sys.exit(1)
TOTAL = 1
TEXT_MODE=1
print "Total Risk Mode activated...\n"
elif o == '-a':
if BLIND or TOTAL or MACRO or INTERACTIVE or TEST:
print "ERROR: Option conflict!!!"
sys.exit(1)
RECEIVING_40 = 1
TEXT_MODE=1
print "Accept 40TS Mode activated...\n"
elif o == '-l':
if MACRO or INTERACTIVE:
print "ERROR: Option conflict!!!"
sys.exit(1)
LOGGING = 1
print "Logging activated...\n"
elif o == '-i':
if MACRO or TOTAL or BLIND or RECEIVING_40 or TEST:
print "ERROR: Option conflict!!!"
sys.exit(1)
print "Interactive mode...\n"
INTERACTIVE = 1
LOGGING = 1
elif o == '-m':
if BLIND or TOTAL or INTERACTIVE or RECEIVING_40 or TEST:
print "ERROR: Option conflict!!!"
sys.exit(1)
MACRO = 1
LOGGING = 1
letters = '[]abcdefghijklmnopqrstuvwxyz0123456789X'
for c in v:
if c not in letters:
print "ERROR: Macro Option has to be in "+letters+" sein!!!"
sys.exit(1)
print "Macro Modus..."
print "Code: ", v
print "\n"
macro_list = create_macro(v)
elif o == '-n':
NUM_GAMES = int(v)
elif o == "-t":
TEST = 1
elif o == "-z":
ADVANCED_LOGGING = 0
elif o == "-x":
TEXT_MODE = 1
except:
print __doc__
sys.exit(1)
if len(opts)==0: INTERACTIVE = 1
ti = time.time()
if not TEST:
main()
print "\n"
print "*************************"
print "* RESULT *"
print "*************************\n"
print "Time:", time.time()-ti
print_stats()
sys.exit(1)
|
alexdd/SLOTMACHINE_PY
|
slotmachine_main.py
|
Python
|
gpl-3.0
| 44,733
|
[
"CASINO"
] |
6ef1cf10318fa6a1db183c3e0dfffe4cc3dac13d3c095fa8fbeb0584d37c8d49
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Michael Rabbitt.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Warning: URL formats of "raw" gists are undocummented and subject to change.
# See also: http://developer.github.com/v3/gists/
#
# Inspired by "[Python] reStructuredText GitHub Gist directive"
# (https://gist.github.com/brianhsu/1407759), public domain by Brian Hsu
'''
Extension to Python Markdown for Embedded Gists (gist.github.com)
Basic Example:
>>> import markdown
>>> text = """
... Text of the gist:
... [:gist: 4747847]
... """
>>> html = markdown.markdown(text, [GistExtension()])
>>> print(html)
<p>Text of the gist:
<div class="gist">
<script src="https://gist.github.com/4747847.js"></script>
<noscript>
<pre>import this</pre>
</noscript>
</div>
</p>
Example with filename:
>>> import markdown
>>> text = """
... Text of the gist:
... [:gist: 4747847 zen.py]
... """
>>> html = markdown.markdown(text, [GistExtension()])
>>> print(html)
<p>Text of the gist:
<div class="gist">
<script src="https://gist.github.com/4747847.js?file=zen.py"></script>
<noscript>
<pre>import this</pre>
</noscript>
</div>
</p>
Example using reStructuredText syntax:
>>> import markdown
>>> text = """
... Text of the gist:
... .. gist:: 4747847 zen.py
... """
>>> html = markdown.markdown(text, [GistExtension()])
>>> print(html)
<p>Text of the gist:
<div class="gist">
<script src="https://gist.github.com/4747847.js?file=zen.py"></script>
<noscript>
<pre>import this</pre>
</noscript>
</div>
</p>
Error Case: non-existent Gist ID:
>>> import markdown
>>> text = """
... Text of the gist:
... [:gist: 0]
... """
>>> html = markdown.markdown(text, [GistExtension()])
>>> print(html)
<p>Text of the gist:
<div class="gist">
<script src="https://gist.github.com/0.js"></script>
<noscript><!-- WARNING: Received a 404 response from Gist URL: https://gist.github.com/raw/0 --></noscript>
</div>
</p>
Error Case: non-existent file:
>>> import markdown
>>> text = """
... Text of the gist:
... [:gist: 4747847 doesntexist.py]
... """
>>> html = markdown.markdown(text, [GistExtension()])
>>> print(html)
<p>Text of the gist:
<div class="gist">
<script src="https://gist.github.com/4747847.js?file=doesntexist.py"></script>
<noscript><!-- WARNING: Received a 404 response from Gist URL: https://gist.github.com/raw/4747847/doesntexist.py --></noscript>
</div>
</p>
'''
from __future__ import unicode_literals, print_function
from markdown.extensions import Extension
from markdown.inlinepatterns import Pattern
from markdown.util import AtomicString
from markdown.util import etree
from nikola.utils import get_logger, req_missing, STDERR_HANDLER
LOGGER = get_logger('compile_markdown.mdx_gist', STDERR_HANDLER)
try:
import requests
except ImportError:
requests = None # NOQA
GIST_JS_URL = "https://gist.github.com/{0}.js"
GIST_FILE_JS_URL = "https://gist.github.com/{0}.js?file={1}"
GIST_RAW_URL = "https://gist.github.com/raw/{0}"
GIST_FILE_RAW_URL = "https://gist.github.com/raw/{0}/{1}"
GIST_MD_RE = r'\[:gist:\s*(?P<gist_id>\d+)(?:\s*(?P<filename>.+?))?\s*\]'
GIST_RST_RE = r'(?m)^\.\.\s*gist::\s*(?P<gist_id>\d+)(?:\s*(?P<filename>.+))\s*$'
class GistFetchException(Exception):
'''Raised when attempt to fetch content of a Gist from github.com fails.'''
def __init__(self, url, status_code):
Exception.__init__(self)
self.message = 'Received a {0} response from Gist URL: {1}'.format(
status_code, url)
class GistPattern(Pattern):
""" InlinePattern for footnote markers in a document's body text. """
def __init__(self, pattern, configs):
Pattern.__init__(self, pattern)
def get_raw_gist_with_filename(self, gist_id, filename):
url = GIST_FILE_RAW_URL.format(gist_id, filename)
resp = requests.get(url)
if not resp.ok:
raise GistFetchException(url, resp.status_code)
return resp.text
def get_raw_gist(self, gist_id):
url = GIST_RAW_URL.format(gist_id)
resp = requests.get(url)
if not resp.ok:
raise GistFetchException(url, resp.status_code)
return resp.text
def handleMatch(self, m):
gist_id = m.group('gist_id')
gist_file = m.group('filename')
gist_elem = etree.Element('div')
gist_elem.set('class', 'gist')
script_elem = etree.SubElement(gist_elem, 'script')
if requests:
noscript_elem = etree.SubElement(gist_elem, 'noscript')
try:
if gist_file:
script_elem.set('src', GIST_FILE_JS_URL.format(
gist_id, gist_file))
raw_gist = (self.get_raw_gist_with_filename(
gist_id, gist_file))
else:
script_elem.set('src', GIST_JS_URL.format(
gist_id))
raw_gist = (self.get_raw_gist(gist_id))
# Insert source as <pre/> within <noscript>
pre_elem = etree.SubElement(noscript_elem, 'pre')
pre_elem.text = AtomicString(raw_gist)
except GistFetchException as e:
LOGGER.warn(e.message)
warning_comment = etree.Comment(' WARNING: {0} '.format(e.message))
noscript_elem.append(warning_comment)
else:
req_missing('requests', 'have inline gist source', optional=True)
return gist_elem
class GistExtension(Extension):
def __init__(self, configs={}):
# set extension defaults
self.config = {}
# Override defaults with user settings
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
gist_md_pattern = GistPattern(GIST_MD_RE, self.getConfigs())
gist_md_pattern.md = md
md.inlinePatterns.add('gist', gist_md_pattern, "<not_strong")
gist_rst_pattern = GistPattern(GIST_RST_RE, self.getConfigs())
gist_rst_pattern.md = md
md.inlinePatterns.add('gist-rst', gist_rst_pattern, ">gist")
md.registerExtension(self)
def makeExtension(configs=None):
return GistExtension(configs)
if __name__ == '__main__':
import doctest
# Silence user warnings thrown by tests:
doctest.testmod(optionflags=(doctest.NORMALIZE_WHITESPACE +
doctest.REPORT_NDIFF))
|
Proteus-tech/nikola
|
nikola/plugins/compile/markdown/mdx_gist.py
|
Python
|
mit
| 7,718
|
[
"Brian"
] |
e6d8749d6425cf5b5320c54a86d168caa0d3368ebef6e5c6c6e9ccec74178d16
|
from subprocess import check_call
from future.moves.subprocess import check_output
def test_redis_build(tmpdir):
dest = tmpdir.mkdir('default_configs')
check_call([
'env2config',
'build',
'redis',
'3.0.1',
str(dest),
])
subpaths = list(dest.visit())
assert dest / 'redis' / '3.0.1' / 'redis.conf' in subpaths
return dest
def test_redis_build_and_inject(tmpdir):
dest = test_redis_build(tmpdir)
redis_conf = str(check_output([
'env',
'ENV_INJECT=redis.conf:-',
'REDIS_APPENDONLY=yes',
'REDIS_ASDF=asdf',
'env2config',
'inject',
str(dest),
]))
assert 'replacing default' in redis_conf
assert 'appendonly yes' in redis_conf
assert 'not matching any default' in redis_conf
assert 'asdf asdf' in redis_conf
|
dacjames/env2config
|
tests/test_redis.py
|
Python
|
mit
| 863
|
[
"VisIt"
] |
e2b6cde81d06e7afe01c9fb44b2641492f43237e7c7ed8847c72ac35de42c3e8
|
# -*- coding: utf-8 -*-
__author__ = "Konstantin Klementiev, Roman Chernikov"
__date__ = "22 Jan 2016"
import numpy as np
import matplotlib.pyplot as plt
# path to xrt:
import os, sys; sys.path.append(os.path.join('..', '..', '..')) # analysis:ignore
import xrt.backends.raycing.materials as rm
crystal = rm.CrystalSi(hkl=(1, 1, 1))
E = 9000
dtheta = np.linspace(-20, 80, 501)
theta = crystal.get_Bragg_angle(E) + dtheta*1e-6
curS, curP = crystal.get_amplitude(E, np.sin(theta))
print(crystal.get_a())
print(crystal.get_F_chi(E, 0.5/crystal.d))
print(u'Darwin width at E={0:.0f} eV is {1:.5f} µrad for s-polarization'.
format(E, crystal.get_Darwin_width(E) * 1e6))
plt.plot(dtheta, abs(curS)**2, 'r', dtheta, abs(curP)**2, 'b')
plt.gca().set_xlabel(u'$\\theta - \\theta_{B}$ (µrad)')
plt.gca().set_ylabel(r'reflectivity')
plt.show()
|
kklmn/xrt
|
examples/withRaycing/00_xRayCalculator/calc_crystal.py
|
Python
|
mit
| 848
|
[
"CRYSTAL"
] |
1be26fc6715c861dbf3908cf33248495cb287326b76d17b6bf8728a242671efa
|
# ipython --pylab
# import some needed functions
from scipy.integrate import odeint
from numpy import *
from matplotlib.pyplot import *
###seterr(all='raise')
import pint
ureg = pint.UnitRegistry()
_0 = ureg['']
_prob = _0 # TODO: custom 'probability' unit?
_m0 = _0/1000
_m = ureg['meter']
_s = ureg['second']
_m_s = _m/_s
_m_s2 = _m_s/_s
_mV = ureg['mV']
_V = ureg['V']
_mM = ureg['millimol/liter']
_microM = ureg['micromol/liter']
_M = ureg['mol/liter']
_ms = _s/1000
_nm = ureg['nanometer']
_nS = ureg['nanomho']
_pF = ureg['picofarad']
_microN = ureg['micronewton']
_pN = ureg['piconewton']
_l = ureg['liter']
_S = ureg['siemens']
# constants
kB = 1.38064852 * (10 ^ -23) * ureg['J/K'] # Boltzmann constant
F = 96485.3329 * ureg['s A / mol'] # faraday constants
R = 8.3144598 * ureg['J / mol / K'] # universal gas constants
# parameters equation (1)
Cm = 10*_pF # cell capacitance
# parameters equation (2) (3) and (4)
Lambda = 2.8*_microN*_s/_m
X0 = 12*_nm # typical for the bullfrog
Z = 0.7*_pN # gating force typical for bullfrog
T = 295.15 * ureg['K'] # temperature
EMET = 0*_mV # reversal potential
### gMET = 0.65*_nS # maximum conductance Met channels according to Holton and Hudspeth
gMET = 0*_nS
K = 1350*_microN/_m # stiffness hair bundle
# parameters equation (8)
EK = -95*_mV # Potassium equilibrium potential
# parameters equation (9a)
gh = 2.2*_nS # maximum conductance
Eh = -45*_mV # reversal potential of h
# parameters equation (10)
PDRK = 2.4*pow(10,-14)*_l/_s # maximum permeability of DRK
Kin = 112*_mM #intracellular K+ concentration
Kex = 2*_mM #extracellular K+ concentration
# parameters equation (11a)
gCa = 1.2*_nS # the maximum Ca2+ conductance
ECa = 42.5*_mV # todo
# parameters equation (12)
PBKS = 2*pow(10,-13)*_l/_s # maximum permeability of BKS
PBKT = 14*pow(10,-13)*_l/_s # maximum permeability of BKT
# parameters equation (14)
K1_0 = 6*_microM
K2_0 = 45*_microM
K3_0 = 20*_microM
k_1 = 300/_s
k_2 = 5000/_s
k_3 = 1500/_s
delta1 = 0.2*_0
delta2 = 0*_0
delta3 = 0.2*_0
VA = 33*_mV
z = 2*_0
alphac0 = 450/_s
betac = 2500/_s
# for equation (A29) in [14], replacing the 0.00061 constant
U = 0.005*_0
epsilon = 3.4e-5*_0
Cvol = 1.2*ureg['picoliter']
point00061 = U/(z*F*Cvol*epsilon)
# parameters equation (17)
EL = 0*_mV #todo
# make explicit the unit for each ODE in our ODE system
STATE_UNITS = [_mV, _nm, _0, _0, _0, _0, _0, _prob, _prob, _prob, _prob, _mM, _0]
def state_to_quantities(state):
return [ mag * unit for (mag, unit) in zip(state, STATE_UNITS) ]
def state_to_numbers(state, unit_factor=ureg['']):
return [ val.to(unit/unit_factor).magnitude for (val, unit) in zip(state, STATE_UNITS) ]
# define our ODE function
def hair_cell(state, t):
"""
Purpose: simulate TODO model for the action potential using
the equations from TODO.
Input: state (TODO),
t (time),
and the params (parameters of neuron; see paper).
Output: statep (state derivatives).
"""
(V, X, mK1f, mK1s, mh, mDRK, mCa, C1, C2, O2, O3, Ca, hBKT) = state_to_quantities(state)
FV = F*V
RT = R*T
#computation 8 computes the delayed rectifier current
IK1 = gK1*(V-EK)*(0.7*mK1f+0.3*mK1s) # equation 8a
mK1infinite = pow(1+ exp((V+110*_mV)/(11*_mV)),-1) # equation 8b (originally 8c)
tauK1f = 0.7*_ms*exp(-(V+120*_mV)/(43.8*_mV))+0.04*_ms # equation 8c (originally 8d)
tauK1s = 14.1*_ms*exp(-(V+120*_mV)/(28*_mV))+0.04*_ms # equation 8d (originally 8e)
dmK1f_dt = (mK1infinite-mK1f)/tauK1f # equation 8e1 (originally 8b1)
dmK1s_dt = (mK1infinite-mK1s)/tauK1s # equation 8e2 (originally 8b2)
#computation 9 computes the Cation h-current
Ih = gh*(V-Eh)*(3*pow(mh,2)*(1-mh)+pow(mh,3))# equation (9a)
mhinfinite = pow(1+exp((V+87*_mV)/(16.7*_mV)),-1) # equation 9b (originally 9c)
tauh = 63.7*_ms+135.7*_ms*exp(-pow((V+91.4*_mV)/(21.2*_mV),2)) # equation 9c (originally 9d)
dmh_dt = (mhinfinite-mh)/tauh # equation 9d (9b)
#computation 10 computes The DRK current
IDRK = PDRK * ((V*pow(F,2))/RT) * ((Kin - Kex * exp(-FV/RT))/(1-exp(-FV/RT))) * pow(mDRK,2) # equation (10a)
alphaDRK = pow(3.2*_ms*exp(-V/(20.9*_mV))+3*_ms, -1) # equation (10e)
betaDRK = pow(1467*_ms*exp(V/(5.96*_mV))+9*_ms, -1) # equation (10f)
tauDRK = pow(alphaDRK+betaDRK,-1) # equation (10d)
mDRKinfinite = pow(1+exp(-(V+48.3*_mV)/(4.19*_mV)),-0.5) # equation (10c)
dmDRK_dt = (mDRKinfinite-mDRK)/tauDRK # equation (10b)
# mdrkinf= 0.59296250511768533
# alfdrk= 25.327338763213305
# betdrk= 107.66080256193725
# dmdrk_dt= -22.357289277952258
# Idrk= 4.0827377021036806E-011
# t: 1.7311500000000002
#computation 11 computes Voltage-gated Ca2+current
ICa = gCa*pow(mCa,3)*(V-ECa) # equation (11a)
mCainfinite = pow(1+exp(-(V+55*_mV)/(12.2*_mV)),-1) # equation (11c)
tauCa = 0.046*_ms+0.325*_ms*exp(-pow((V+77*_mV)/(51.67*_mV), 2)) # equation (11d)
dmCa_dt = (mCainfinite-mCa)/tauCa # equation (11b)
#[14] equation A14 and A15
k1 = k_1/(K1_0*exp(-delta1*z*FV/RT))
k2 = k_2/(K2_0*exp(-delta2*z*FV/RT))
k3 = k_3/(K3_0*exp(-delta3*z*FV/RT))
alphac = alphac0*exp(V/VA)
#computation 14 computes The kinetics of Ca-activated BK currents
C0 = 1*_prob-(C1+C2+O2+O3) # equation (14e)
dC1_dt = k1*Ca*C0+k_2*C2-(k_1+k2*Ca)*C1 # equation (14a)
dC2_dt = k2*Ca*C1+(alphac)*(O2)-(k_2+betac)*C2 # equation (14b)
dO2_dt = betac*C2+k_3*O3-(alphac +k3*Ca)*O2 # equation (14c)
dO3_dt = k3*Ca*O2-k_3*O3 # equation (14d)
#computation 15 computes the dynamics of the Ca2+ concentration
dCa_dt = -point00061*ICa-2800/_s*Ca # equation (15)
hBKTinfinite = pow(1+exp((V+61.6*_mV)/(3.65*_mV)),-1) # equation (16b)
tauBKT = 2.1*_ms+9.4*_ms*exp(-pow(((V+66.9*_mV)/(17.7*_mV)),2)) # equation (16c)
dhBKT_dt = (hBKTinfinite-hBKT)/tauBKT # equation (16a)
IBKS = b*PBKS * ((V*pow(F,2))/RT) * ((Kin - Kex * exp(-FV/RT))/(1-exp(-FV/RT))) * (O2 + O3) # equation (12)
IBKT = b*PBKT * ((V*pow(F,2))/RT) * ((Kin - Kex * exp(-FV/RT))/(1-exp(-FV/RT))) * (O2 + O3)*hBKT # equation (13)
IL = gL*(V-EL) # equation (17)
dX_dt = (-K*X)/Lambda # equation (4)
POX = 1/(1+exp(-(Z*(X-X0)/(kB*T)))) # equation (3)
IMET = gMET*POX*(V-EMET) # equation (2)
dV_dt = (-IK1-Ih-IDRK-ICa-IBKS-IBKT-IL-IMET) / Cm # equation (1)
statep = [dV_dt, dX_dt, dmK1f_dt, dmK1s_dt, dmh_dt, dmDRK_dt, dmCa_dt, dC1_dt, dC2_dt, dO2_dt, dO3_dt, dCa_dt, dhBKT_dt]
return state_to_numbers(statep, unit_factor=_s)
# simulate
# control parameters
gK1 = 29.25*_nS # maximum conductance of the MET channels
b = 0.01*_0 # dimensionless
gL = 0.174*_nS # leak conductance
# set initial states and time vector
state0 = [-70.*_mV, 0*_nm, 0.1*_0, 0.1*_0, 0.1*_0, 0.1*_0, 0.1*_0, 0.1*_prob, 0.1*_prob, 0.1*_prob, 0.1*_prob, 3*_microM, 0.1*_0]
t = arange(0, 5, 0.0001)
#if False:
#t: 1.7311500000000002
#input_state=[-5.0864253782598942E-002*_V, 0*_nm, 4.4823959651917305E-003*_prob, 3.9063196933636409E-003*_prob, 0.22635496281413278*_prob, 0.76107741412517049*_prob, 0.59362705410106820*_prob, 0.14128592217145175*_prob, 1.8027339494674426E-002*_prob, 0.50349641569042014*_prob, 6.7372605859965212E-002*_prob, 5.3808312530758806E-006*_M , 2.0695504490729426E-002*_prob]
#print("input: ", input_state)
#new_state = hair_cell(state_to_numbers(input_state), 0)
#print("output:", state_to_quantities(new_state))
#expected output: -1.6258547663311815 0.66535236908952200 0.56641281952265254 -1.8347760854750577 -22.357289277952258 -32.525095972570099 -4.0997254733989053 -2.2264539369557497 6.3129513679814124 -9.7525609444206793 -7.7757874071416287E-004 4.7225883106837401
#import sys
#sys.exit()
#t: 1.7311500000000002
#input: -5.0863938611937526E-002 4.4825585706179064E-003 3.9064719696103754E-003 0.22635543716756984 0.76106021973173243 0.59363010900463153 0.14128685371835825 1.8028039962771136E-002 0.50348631821283085 6.7373132758083917E-002 5.3810333925573272E-006 2.0700830182175056E-002
#output: -1.6257312871138283 0.66376213616037072 0.56618909544822105 -1.8348145403769360 -22.353109221187687 -32.514442035503720 -4.0978859378079164 -2.2284867480385913 6.3139509674742840 -9.7512998346415856 -7.7797236871247379E-004 4.7211759592762359
# TODO: set some specific params here, perhaps
# run simulation
state = odeint(hair_cell, state_to_numbers(state0), t, args=())
# plot the results
print("Start plotting...")
"figure(figsize=(8,12))"
subplot(5,3,1)
plot(t, state[:,0])
title('V')
subplot(5,3,4)
plot(t, state[:,1])
title('X')
subplot(5,3,2)
plot(t, state[:,2])
title('mK1f')
subplot(5,3,5)
plot(t, state[:,3])
title('mK1s')
subplot(5,3,8)
plot(t, state[:,4])
title('mh')
subplot(5,3,11)
plot(t, state[:,5])
title('mDRK')
subplot(5,3,14)
plot(t, state[:,6])
title('mCa')
subplot(5,3,6)
plot(t, state[:,7])
title('C1')
subplot(5,3,9)
plot(t, state[:,8])
title('C2')
subplot(5,3,12)
plot(t, state[:,9])
title('O2')
subplot(5,3,15)
plot(t, state[:,10])
title('O3')
subplot(5,3,10)
plot(t, state[:,11])
title('[Ca]')
subplot(5,3,13)
plot(t, state[:,12])
title('hBKT')
xlabel('TIME (sec)')
show()
|
piethanegraaf/haircell-simulation
|
simulating_vestibular_hair_cells.py
|
Python
|
apache-2.0
| 9,978
|
[
"NEURON"
] |
1bb2f3e7846c229a54030487281b0960c6ca472abe9d67494f4a59054a46bb78
|
# -*- coding: utf-8 -*-
"""
functions.py - Miscellaneous functions with no other home
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
"""
from __future__ import division
from .python2_3 import asUnicode
from .Qt import QtGui, QtCore, USE_PYSIDE
Colors = {
'b': QtGui.QColor(0,0,255,255),
'g': QtGui.QColor(0,255,0,255),
'r': QtGui.QColor(255,0,0,255),
'c': QtGui.QColor(0,255,255,255),
'm': QtGui.QColor(255,0,255,255),
'y': QtGui.QColor(255,255,0,255),
'k': QtGui.QColor(0,0,0,255),
'w': QtGui.QColor(255,255,255,255),
'd': QtGui.QColor(150,150,150,255),
'l': QtGui.QColor(200,200,200,255),
's': QtGui.QColor(100,100,150,255),
}
SI_PREFIXES = asUnicode('yzafpnµm kMGTPEZY')
SI_PREFIXES_ASCII = 'yzafpnum kMGTPEZY'
from .Qt import QtGui, QtCore, USE_PYSIDE
from . import getConfigOption, setConfigOptions
from .reload import getPreviousVersion
import numpy as np
import decimal, re
import ctypes
import sys, struct
from . import debug
def siScale(x, minVal=1e-25, allowUnicode=True):
"""
Return the recommended scale factor and SI prefix string for x.
Example::
siScale(0.0001) # returns (1e6, 'μ')
# This indicates that the number 0.0001 is best represented as 0.0001 * 1e6 = 100 μUnits
"""
if isinstance(x, decimal.Decimal):
x = float(x)
try:
if np.isnan(x) or np.isinf(x):
return(1, '')
except:
print(x, type(x))
raise
if abs(x) < minVal:
m = 0
x = 0
else:
m = int(np.clip(np.floor(np.log(abs(x))/np.log(1000)), -9.0, 9.0))
if m == 0:
pref = ''
elif m < -8 or m > 8:
pref = 'e%d' % (m*3)
else:
if allowUnicode:
pref = SI_PREFIXES[m+8]
else:
pref = SI_PREFIXES_ASCII[m+8]
p = .001**m
return (p, pref)
def siFormat(x, precision=3, suffix='', space=True, error=None, minVal=1e-25, allowUnicode=True):
"""
Return the number x formatted in engineering notation with SI prefix.
Example::
siFormat(0.0001, suffix='V') # returns "100 μV"
"""
if space is True:
space = ' '
if space is False:
space = ''
(p, pref) = siScale(x, minVal, allowUnicode)
if not (len(pref) > 0 and pref[0] == 'e'):
pref = space + pref
if error is None:
fmt = "%." + str(precision) + "g%s%s"
return fmt % (x*p, pref, suffix)
else:
if allowUnicode:
plusminus = space + asUnicode("±") + space
else:
plusminus = " +/- "
fmt = "%." + str(precision) + "g%s%s%s%s"
return fmt % (x*p, pref, suffix, plusminus, siFormat(error, precision=precision, suffix=suffix, space=space, minVal=minVal))
def siEval(s):
"""
Convert a value written in SI notation to its equivalent prefixless value
Example::
siEval("100 μV") # returns 0.0001
"""
s = asUnicode(s)
m = re.match(r'(-?((\d+(\.\d*)?)|(\.\d+))([eE]-?\d+)?)\s*([u' + SI_PREFIXES + r']?).*$', s)
if m is None:
raise Exception("Can't convert string '%s' to number." % s)
v = float(m.groups()[0])
p = m.groups()[6]
#if p not in SI_PREFIXES:
#raise Exception("Can't convert string '%s' to number--unknown prefix." % s)
if p == '':
n = 0
elif p == 'u':
n = -2
else:
n = SI_PREFIXES.index(p) - 8
return v * 1000**n
class Color(QtGui.QColor):
def __init__(self, *args):
QtGui.QColor.__init__(self, mkColor(*args))
def glColor(self):
"""Return (r,g,b,a) normalized for use in opengl"""
return (self.red()/255., self.green()/255., self.blue()/255., self.alpha()/255.)
def __getitem__(self, ind):
return (self.red, self.green, self.blue, self.alpha)[ind]()
def mkColor(*args):
"""
Convenience function for constructing QColor from a variety of argument types. Accepted arguments are:
================ ================================================
'c' one of: r, g, b, c, m, y, k, w
R, G, B, [A] integers 0-255
(R, G, B, [A]) tuple of integers 0-255
float greyscale, 0.0-1.0
int see :func:`intColor() <pyqtgraph.intColor>`
(int, hues) see :func:`intColor() <pyqtgraph.intColor>`
"RGB" hexadecimal strings; may begin with '#'
"RGBA"
"RRGGBB"
"RRGGBBAA"
QColor QColor instance; makes a copy.
================ ================================================
"""
err = 'Not sure how to make a color from "%s"' % str(args)
if len(args) == 1:
if isinstance(args[0], basestring):
c = args[0]
if c[0] == '#':
c = c[1:]
if len(c) == 1:
try:
return Colors[c]
except KeyError:
raise Exception('No color named "%s"' % c)
if len(c) == 3:
r = int(c[0]*2, 16)
g = int(c[1]*2, 16)
b = int(c[2]*2, 16)
a = 255
elif len(c) == 4:
r = int(c[0]*2, 16)
g = int(c[1]*2, 16)
b = int(c[2]*2, 16)
a = int(c[3]*2, 16)
elif len(c) == 6:
r = int(c[0:2], 16)
g = int(c[2:4], 16)
b = int(c[4:6], 16)
a = 255
elif len(c) == 8:
r = int(c[0:2], 16)
g = int(c[2:4], 16)
b = int(c[4:6], 16)
a = int(c[6:8], 16)
elif isinstance(args[0], QtGui.QColor):
return QtGui.QColor(args[0])
elif isinstance(args[0], float):
r = g = b = int(args[0] * 255)
a = 255
elif hasattr(args[0], '__len__'):
if len(args[0]) == 3:
(r, g, b) = args[0]
a = 255
elif len(args[0]) == 4:
(r, g, b, a) = args[0]
elif len(args[0]) == 2:
return intColor(*args[0])
else:
raise Exception(err)
elif type(args[0]) == int:
return intColor(args[0])
else:
raise Exception(err)
elif len(args) == 3:
(r, g, b) = args
a = 255
elif len(args) == 4:
(r, g, b, a) = args
else:
raise Exception(err)
args = [r,g,b,a]
args = [0 if np.isnan(a) or np.isinf(a) else a for a in args]
args = list(map(int, args))
return QtGui.QColor(*args)
def mkBrush(*args, **kwds):
"""
| Convenience function for constructing Brush.
| This function always constructs a solid brush and accepts the same arguments as :func:`mkColor() <pyqtgraph.mkColor>`
| Calling mkBrush(None) returns an invisible brush.
"""
if 'color' in kwds:
color = kwds['color']
elif len(args) == 1:
arg = args[0]
if arg is None:
return QtGui.QBrush(QtCore.Qt.NoBrush)
elif isinstance(arg, QtGui.QBrush):
return QtGui.QBrush(arg)
else:
color = arg
elif len(args) > 1:
color = args
return QtGui.QBrush(mkColor(color))
def mkPen(*args, **kargs):
"""
Convenience function for constructing QPen.
Examples::
mkPen(color)
mkPen(color, width=2)
mkPen(cosmetic=False, width=4.5, color='r')
mkPen({'color': "FF0", width: 2})
mkPen(None) # (no pen)
In these examples, *color* may be replaced with any arguments accepted by :func:`mkColor() <pyqtgraph.mkColor>` """
color = kargs.get('color', None)
width = kargs.get('width', 1)
style = kargs.get('style', None)
dash = kargs.get('dash', None)
cosmetic = kargs.get('cosmetic', True)
hsv = kargs.get('hsv', None)
if len(args) == 1:
arg = args[0]
if isinstance(arg, dict):
return mkPen(**arg)
if isinstance(arg, QtGui.QPen):
return QtGui.QPen(arg) ## return a copy of this pen
elif arg is None:
style = QtCore.Qt.NoPen
else:
color = arg
if len(args) > 1:
color = args
if color is None:
color = mkColor('l')
if hsv is not None:
color = hsvColor(*hsv)
else:
color = mkColor(color)
pen = QtGui.QPen(QtGui.QBrush(color), width)
pen.setCosmetic(cosmetic)
if style is not None:
pen.setStyle(style)
if dash is not None:
pen.setDashPattern(dash)
return pen
def hsvColor(hue, sat=1.0, val=1.0, alpha=1.0):
"""Generate a QColor from HSVa values. (all arguments are float 0.0-1.0)"""
c = QtGui.QColor()
c.setHsvF(hue, sat, val, alpha)
return c
def colorTuple(c):
"""Return a tuple (R,G,B,A) from a QColor"""
return (c.red(), c.green(), c.blue(), c.alpha())
def colorStr(c):
"""Generate a hex string code from a QColor"""
return ('%02x'*4) % colorTuple(c)
def intColor(index, hues=9, values=1, maxValue=255, minValue=150, maxHue=360, minHue=0, sat=255, alpha=255, **kargs):
"""
Creates a QColor from a single index. Useful for stepping through a predefined list of colors.
The argument *index* determines which color from the set will be returned. All other arguments determine what the set of predefined colors will be
Colors are chosen by cycling across hues while varying the value (brightness).
By default, this selects from a list of 9 hues."""
hues = int(hues)
values = int(values)
ind = int(index) % (hues * values)
indh = ind % hues
indv = ind / hues
if values > 1:
v = minValue + indv * ((maxValue-minValue) / (values-1))
else:
v = maxValue
h = minHue + (indh * (maxHue-minHue)) / hues
c = QtGui.QColor()
c.setHsv(h, sat, v)
c.setAlpha(alpha)
return c
def glColor(*args, **kargs):
"""
Convert a color to OpenGL color format (r,g,b,a) floats 0.0-1.0
Accepts same arguments as :func:`mkColor <pyqtgraph.mkColor>`.
"""
c = mkColor(*args, **kargs)
return (c.red()/255., c.green()/255., c.blue()/255., c.alpha()/255.)
def makeArrowPath(headLen=20, tipAngle=20, tailLen=20, tailWidth=3, baseAngle=0):
"""
Construct a path outlining an arrow with the given dimensions.
The arrow points in the -x direction with tip positioned at 0,0.
If *tipAngle* is supplied (in degrees), it overrides *headWidth*.
If *tailLen* is None, no tail will be drawn.
"""
headWidth = headLen * np.tan(tipAngle * 0.5 * np.pi/180.)
path = QtGui.QPainterPath()
path.moveTo(0,0)
path.lineTo(headLen, -headWidth)
if tailLen is None:
innerY = headLen - headWidth * np.tan(baseAngle*np.pi/180.)
path.lineTo(innerY, 0)
else:
tailWidth *= 0.5
innerY = headLen - (headWidth-tailWidth) * np.tan(baseAngle*np.pi/180.)
path.lineTo(innerY, -tailWidth)
path.lineTo(headLen + tailLen, -tailWidth)
path.lineTo(headLen + tailLen, tailWidth)
path.lineTo(innerY, tailWidth)
path.lineTo(headLen, headWidth)
path.lineTo(0,0)
return path
def affineSlice(data, shape, origin, vectors, axes, order=1, returnCoords=False, **kargs):
"""
Take a slice of any orientation through an array. This is useful for extracting sections of multi-dimensional arrays such as MRI images for viewing as 1D or 2D data.
The slicing axes are aribtrary; they do not need to be orthogonal to the original data or even to each other. It is possible to use this function to extract arbitrary linear, rectangular, or parallelepiped shapes from within larger datasets. The original data is interpolated onto a new array of coordinates using scipy.ndimage.map_coordinates if it is available (see the scipy documentation for more information about this). If scipy is not available, then a slower implementation of map_coordinates is used.
For a graphical interface to this function, see :func:`ROI.getArrayRegion <pyqtgraph.ROI.getArrayRegion>`
============== ====================================================================================================
**Arguments:**
*data* (ndarray) the original dataset
*shape* the shape of the slice to take (Note the return value may have more dimensions than len(shape))
*origin* the location in the original dataset that will become the origin of the sliced data.
*vectors* list of unit vectors which point in the direction of the slice axes. Each vector must have the same
length as *axes*. If the vectors are not unit length, the result will be scaled relative to the
original data. If the vectors are not orthogonal, the result will be sheared relative to the
original data.
*axes* The axes in the original dataset which correspond to the slice *vectors*
*order* The order of spline interpolation. Default is 1 (linear). See scipy.ndimage.map_coordinates
for more information.
*returnCoords* If True, return a tuple (result, coords) where coords is the array of coordinates used to select
values from the original dataset.
*All extra keyword arguments are passed to scipy.ndimage.map_coordinates.*
--------------------------------------------------------------------------------------------------------------------
============== ====================================================================================================
Note the following must be true:
| len(shape) == len(vectors)
| len(origin) == len(axes) == len(vectors[i])
Example: start with a 4D fMRI data set, take a diagonal-planar slice out of the last 3 axes
* data = array with dims (time, x, y, z) = (100, 40, 40, 40)
* The plane to pull out is perpendicular to the vector (x,y,z) = (1,1,1)
* The origin of the slice will be at (x,y,z) = (40, 0, 0)
* We will slice a 20x20 plane from each timepoint, giving a final shape (100, 20, 20)
The call for this example would look like::
affineSlice(data, shape=(20,20), origin=(40,0,0), vectors=((-1, 1, 0), (-1, 0, 1)), axes=(1,2,3))
"""
try:
import scipy.ndimage
have_scipy = True
except ImportError:
have_scipy = False
have_scipy = False
# sanity check
if len(shape) != len(vectors):
raise Exception("shape and vectors must have same length.")
if len(origin) != len(axes):
raise Exception("origin and axes must have same length.")
for v in vectors:
if len(v) != len(axes):
raise Exception("each vector must be same length as axes.")
shape = list(map(np.ceil, shape))
## transpose data so slice axes come first
trAx = list(range(data.ndim))
for x in axes:
trAx.remove(x)
tr1 = tuple(axes) + tuple(trAx)
data = data.transpose(tr1)
#print "tr1:", tr1
## dims are now [(slice axes), (other axes)]
## make sure vectors are arrays
if not isinstance(vectors, np.ndarray):
vectors = np.array(vectors)
if not isinstance(origin, np.ndarray):
origin = np.array(origin)
origin.shape = (len(axes),) + (1,)*len(shape)
## Build array of sample locations.
grid = np.mgrid[tuple([slice(0,x) for x in shape])] ## mesh grid of indexes
x = (grid[np.newaxis,...] * vectors.transpose()[(Ellipsis,) + (np.newaxis,)*len(shape)]).sum(axis=1) ## magic
x += origin
## iterate manually over unused axes since map_coordinates won't do it for us
if have_scipy:
extraShape = data.shape[len(axes):]
output = np.empty(tuple(shape) + extraShape, dtype=data.dtype)
for inds in np.ndindex(*extraShape):
ind = (Ellipsis,) + inds
output[ind] = scipy.ndimage.map_coordinates(data[ind], x, order=order, **kargs)
else:
# map_coordinates expects the indexes as the first axis, whereas
# interpolateArray expects indexes at the last axis.
tr = tuple(range(1,x.ndim)) + (0,)
output = interpolateArray(data, x.transpose(tr))
tr = list(range(output.ndim))
trb = []
for i in range(min(axes)):
ind = tr1.index(i) + (len(shape)-len(axes))
tr.remove(ind)
trb.append(ind)
tr2 = tuple(trb+tr)
## Untranspose array before returning
output = output.transpose(tr2)
if returnCoords:
return (output, x)
else:
return output
def interpolateArray(data, x, default=0.0):
"""
N-dimensional interpolation similar to scipy.ndimage.map_coordinates.
This function returns linearly-interpolated values sampled from a regular
grid of data.
*data* is an array of any shape containing the values to be interpolated.
*x* is an array with (shape[-1] <= data.ndim) containing the locations
within *data* to interpolate.
Returns array of shape (x.shape[:-1] + data.shape[x.shape[-1]:])
For example, assume we have the following 2D image data::
>>> data = np.array([[1, 2, 4 ],
[10, 20, 40 ],
[100, 200, 400]])
To compute a single interpolated point from this data::
>>> x = np.array([(0.5, 0.5)])
>>> interpolateArray(data, x)
array([ 8.25])
To compute a 1D list of interpolated locations::
>>> x = np.array([(0.5, 0.5),
(1.0, 1.0),
(1.0, 2.0),
(1.5, 0.0)])
>>> interpolateArray(data, x)
array([ 8.25, 20. , 40. , 55. ])
To compute a 2D array of interpolated locations::
>>> x = np.array([[(0.5, 0.5), (1.0, 2.0)],
[(1.0, 1.0), (1.5, 0.0)]])
>>> interpolateArray(data, x)
array([[ 8.25, 40. ],
[ 20. , 55. ]])
..and so on. The *x* argument may have any shape as long as
```x.shape[-1] <= data.ndim```. In the case that
```x.shape[-1] < data.ndim```, then the remaining axes are simply
broadcasted as usual. For example, we can interpolate one location
from an entire row of the data::
>>> x = np.array([[0.5]])
>>> interpolateArray(data, x)
array([[ 5.5, 11. , 22. ]])
This is useful for interpolating from arrays of colors, vertexes, etc.
"""
prof = debug.Profiler()
nd = data.ndim
md = x.shape[-1]
if md > nd:
raise TypeError("x.shape[-1] must be less than or equal to data.ndim")
# First we generate arrays of indexes that are needed to
# extract the data surrounding each point
fields = np.mgrid[(slice(0,2),) * md]
xmin = np.floor(x).astype(int)
xmax = xmin + 1
indexes = np.concatenate([xmin[np.newaxis, ...], xmax[np.newaxis, ...]])
fieldInds = []
totalMask = np.ones(x.shape[:-1], dtype=bool) # keep track of out-of-bound indexes
for ax in range(md):
mask = (xmin[...,ax] >= 0) & (x[...,ax] <= data.shape[ax]-1)
# keep track of points that need to be set to default
totalMask &= mask
# ..and keep track of indexes that are out of bounds
# (note that when x[...,ax] == data.shape[ax], then xmax[...,ax] will be out
# of bounds, but the interpolation will work anyway)
mask &= (xmax[...,ax] < data.shape[ax])
axisIndex = indexes[...,ax][fields[ax]]
axisIndex[axisIndex < 0] = 0
axisIndex[axisIndex >= data.shape[ax]] = 0
fieldInds.append(axisIndex)
prof()
# Get data values surrounding each requested point
fieldData = data[tuple(fieldInds)]
prof()
## Interpolate
s = np.empty((md,) + fieldData.shape, dtype=float)
dx = x - xmin
# reshape fields for arithmetic against dx
for ax in range(md):
f1 = fields[ax].reshape(fields[ax].shape + (1,)*(dx.ndim-1))
sax = f1 * dx[...,ax] + (1-f1) * (1-dx[...,ax])
sax = sax.reshape(sax.shape + (1,) * (s.ndim-1-sax.ndim))
s[ax] = sax
s = np.product(s, axis=0)
result = fieldData * s
for i in range(md):
result = result.sum(axis=0)
prof()
if totalMask.ndim > 0:
result[~totalMask] = default
else:
if totalMask is False:
result[:] = default
prof()
return result
def subArray(data, offset, shape, stride):
"""
Unpack a sub-array from *data* using the specified offset, shape, and stride.
Note that *stride* is specified in array elements, not bytes.
For example, we have a 2x3 array packed in a 1D array as follows::
data = [_, _, 00, 01, 02, _, 10, 11, 12, _]
Then we can unpack the sub-array with this call::
subArray(data, offset=2, shape=(2, 3), stride=(4, 1))
..which returns::
[[00, 01, 02],
[10, 11, 12]]
This function operates only on the first axis of *data*. So changing
the input in the example above to have shape (10, 7) would cause the
output to have shape (2, 3, 7).
"""
#data = data.flatten()
data = data[offset:]
shape = tuple(shape)
extraShape = data.shape[1:]
strides = list(data.strides[::-1])
itemsize = strides[-1]
for s in stride[1::-1]:
strides.append(itemsize * s)
strides = tuple(strides[::-1])
return np.ndarray(buffer=data, shape=shape+extraShape, strides=strides, dtype=data.dtype)
def transformToArray(tr):
"""
Given a QTransform, return a 3x3 numpy array.
Given a QMatrix4x4, return a 4x4 numpy array.
Example: map an array of x,y coordinates through a transform::
## coordinates to map are (1,5), (2,6), (3,7), and (4,8)
coords = np.array([[1,2,3,4], [5,6,7,8], [1,1,1,1]]) # the extra '1' coordinate is needed for translation to work
## Make an example transform
tr = QtGui.QTransform()
tr.translate(3,4)
tr.scale(2, 0.1)
## convert to array
m = pg.transformToArray()[:2] # ignore the perspective portion of the transformation
## map coordinates through transform
mapped = np.dot(m, coords)
"""
#return np.array([[tr.m11(), tr.m12(), tr.m13()],[tr.m21(), tr.m22(), tr.m23()],[tr.m31(), tr.m32(), tr.m33()]])
## The order of elements given by the method names m11..m33 is misleading--
## It is most common for x,y translation to occupy the positions 1,3 and 2,3 in
## a transformation matrix. However, with QTransform these values appear at m31 and m32.
## So the correct interpretation is transposed:
if isinstance(tr, QtGui.QTransform):
return np.array([[tr.m11(), tr.m21(), tr.m31()], [tr.m12(), tr.m22(), tr.m32()], [tr.m13(), tr.m23(), tr.m33()]])
elif isinstance(tr, QtGui.QMatrix4x4):
return np.array(tr.copyDataTo()).reshape(4,4)
else:
raise Exception("Transform argument must be either QTransform or QMatrix4x4.")
def transformCoordinates(tr, coords, transpose=False):
"""
Map a set of 2D or 3D coordinates through a QTransform or QMatrix4x4.
The shape of coords must be (2,...) or (3,...)
The mapping will _ignore_ any perspective transformations.
For coordinate arrays with ndim=2, this is basically equivalent to matrix multiplication.
Most arrays, however, prefer to put the coordinate axis at the end (eg. shape=(...,3)). To
allow this, use transpose=True.
"""
if transpose:
## move last axis to beginning. This transposition will be reversed before returning the mapped coordinates.
coords = coords.transpose((coords.ndim-1,) + tuple(range(0,coords.ndim-1)))
nd = coords.shape[0]
if isinstance(tr, np.ndarray):
m = tr
else:
m = transformToArray(tr)
m = m[:m.shape[0]-1] # remove perspective
## If coords are 3D and tr is 2D, assume no change for Z axis
if m.shape == (2,3) and nd == 3:
m2 = np.zeros((3,4))
m2[:2, :2] = m[:2,:2]
m2[:2, 3] = m[:2,2]
m2[2,2] = 1
m = m2
## if coords are 2D and tr is 3D, ignore Z axis
if m.shape == (3,4) and nd == 2:
m2 = np.empty((2,3))
m2[:,:2] = m[:2,:2]
m2[:,2] = m[:2,3]
m = m2
## reshape tr and coords to prepare for multiplication
m = m.reshape(m.shape + (1,)*(coords.ndim-1))
coords = coords[np.newaxis, ...]
# separate scale/rotate and translation
translate = m[:,-1]
m = m[:, :-1]
## map coordinates and return
mapped = (m*coords).sum(axis=1) ## apply scale/rotate
mapped += translate
if transpose:
## move first axis to end.
mapped = mapped.transpose(tuple(range(1,mapped.ndim)) + (0,))
return mapped
def solve3DTransform(points1, points2):
"""
Find a 3D transformation matrix that maps points1 onto points2.
Points must be specified as either lists of 4 Vectors or
(4, 3) arrays.
"""
import numpy.linalg
pts = []
for inp in (points1, points2):
if isinstance(inp, np.ndarray):
A = np.empty((4,4), dtype=float)
A[:,:3] = inp[:,:3]
A[:,3] = 1.0
else:
A = np.array([[inp[i].x(), inp[i].y(), inp[i].z(), 1] for i in range(4)])
pts.append(A)
## solve 3 sets of linear equations to determine transformation matrix elements
matrix = np.zeros((4,4))
for i in range(3):
## solve Ax = B; x is one row of the desired transformation matrix
matrix[i] = numpy.linalg.solve(pts[0], pts[1][:,i])
return matrix
def solveBilinearTransform(points1, points2):
"""
Find a bilinear transformation matrix (2x4) that maps points1 onto points2.
Points must be specified as a list of 4 Vector, Point, QPointF, etc.
To use this matrix to map a point [x,y]::
mapped = np.dot(matrix, [x*y, x, y, 1])
"""
import numpy.linalg
## A is 4 rows (points) x 4 columns (xy, x, y, 1)
## B is 4 rows (points) x 2 columns (x, y)
A = np.array([[points1[i].x()*points1[i].y(), points1[i].x(), points1[i].y(), 1] for i in range(4)])
B = np.array([[points2[i].x(), points2[i].y()] for i in range(4)])
## solve 2 sets of linear equations to determine transformation matrix elements
matrix = np.zeros((2,4))
for i in range(2):
matrix[i] = numpy.linalg.solve(A, B[:,i]) ## solve Ax = B; x is one row of the desired transformation matrix
return matrix
def rescaleData(data, scale, offset, dtype=None):
"""Return data rescaled and optionally cast to a new dtype::
data => (data-offset) * scale
Uses scipy.weave (if available) to improve performance.
"""
if dtype is None:
dtype = data.dtype
else:
dtype = np.dtype(dtype)
try:
if not getConfigOption('useWeave'):
raise Exception('Weave is disabled; falling back to slower version.')
try:
import scipy.weave
except ImportError:
raise Exception('scipy.weave is not importable; falling back to slower version.')
## require native dtype when using weave
if not data.dtype.isnative:
data = data.astype(data.dtype.newbyteorder('='))
if not dtype.isnative:
weaveDtype = dtype.newbyteorder('=')
else:
weaveDtype = dtype
newData = np.empty((data.size,), dtype=weaveDtype)
flat = np.ascontiguousarray(data).reshape(data.size)
size = data.size
code = """
double sc = (double)scale;
double off = (double)offset;
for( int i=0; i<size; i++ ) {
newData[i] = ((double)flat[i] - off) * sc;
}
"""
scipy.weave.inline(code, ['flat', 'newData', 'size', 'offset', 'scale'], compiler='gcc')
if dtype != weaveDtype:
newData = newData.astype(dtype)
data = newData.reshape(data.shape)
except:
if getConfigOption('useWeave'):
if getConfigOption('weaveDebug'):
debug.printExc("Error; disabling weave.")
setConfigOptions(useWeave=False)
#p = np.poly1d([scale, -offset*scale])
#data = p(data).astype(dtype)
d2 = data-offset
d2 *= scale
data = d2.astype(dtype)
return data
def applyLookupTable(data, lut):
"""
Uses values in *data* as indexes to select values from *lut*.
The returned data has shape data.shape + lut.shape[1:]
Note: color gradient lookup tables can be generated using GradientWidget.
"""
if data.dtype.kind not in ('i', 'u'):
data = data.astype(int)
return np.take(lut, data, axis=0, mode='clip')
def makeRGBA(*args, **kwds):
"""Equivalent to makeARGB(..., useRGBA=True)"""
kwds['useRGBA'] = True
return makeARGB(*args, **kwds)
def makeARGB(data, lut=None, levels=None, scale=None, useRGBA=False):
"""
Convert an array of values into an ARGB array suitable for building QImages, OpenGL textures, etc.
Returns the ARGB array (values 0-255) and a boolean indicating whether there is alpha channel data.
This is a two stage process:
1) Rescale the data based on the values in the *levels* argument (min, max).
2) Determine the final output by passing the rescaled values through a lookup table.
Both stages are optional.
============== ==================================================================================
**Arguments:**
data numpy array of int/float types. If
levels List [min, max]; optionally rescale data before converting through the
lookup table. The data is rescaled such that min->0 and max->*scale*::
rescaled = (clip(data, min, max) - min) * (*scale* / (max - min))
It is also possible to use a 2D (N,2) array of values for levels. In this case,
it is assumed that each pair of min,max values in the levels array should be
applied to a different subset of the input data (for example, the input data may
already have RGB values and the levels are used to independently scale each
channel). The use of this feature requires that levels.shape[0] == data.shape[-1].
scale The maximum value to which data will be rescaled before being passed through the
lookup table (or returned if there is no lookup table). By default this will
be set to the length of the lookup table, or 256 is no lookup table is provided.
For OpenGL color specifications (as in GLColor4f) use scale=1.0
lut Optional lookup table (array with dtype=ubyte).
Values in data will be converted to color by indexing directly from lut.
The output data shape will be input.shape + lut.shape[1:].
Note: the output of makeARGB will have the same dtype as the lookup table, so
for conversion to QImage, the dtype must be ubyte.
Lookup tables can be built using GradientWidget.
useRGBA If True, the data is returned in RGBA order (useful for building OpenGL textures).
The default is False, which returns in ARGB order for use with QImage
(Note that 'ARGB' is a term used by the Qt documentation; the _actual_ order
is BGRA).
============== ==================================================================================
"""
profile = debug.Profiler()
if lut is not None and not isinstance(lut, np.ndarray):
lut = np.array(lut)
if levels is not None and not isinstance(levels, np.ndarray):
levels = np.array(levels)
if levels is not None:
if levels.ndim == 1:
if len(levels) != 2:
raise Exception('levels argument must have length 2')
elif levels.ndim == 2:
if lut is not None and lut.ndim > 1:
raise Exception('Cannot make ARGB data when bot levels and lut have ndim > 2')
if levels.shape != (data.shape[-1], 2):
raise Exception('levels must have shape (data.shape[-1], 2)')
else:
print(levels)
raise Exception("levels argument must be 1D or 2D.")
profile()
if scale is None:
if lut is not None:
scale = lut.shape[0]
else:
scale = 255.
## Apply levels if given
if levels is not None:
if isinstance(levels, np.ndarray) and levels.ndim == 2:
## we are going to rescale each channel independently
if levels.shape[0] != data.shape[-1]:
raise Exception("When rescaling multi-channel data, there must be the same number of levels as channels (data.shape[-1] == levels.shape[0])")
newData = np.empty(data.shape, dtype=int)
for i in range(data.shape[-1]):
minVal, maxVal = levels[i]
if minVal == maxVal:
maxVal += 1e-16
rng = maxVal-minVal
rng = 1 if rng == 0 else rng
newData[...,i] = rescaleData(data[...,i], scale / rng, minVal, dtype=int)
data = newData
else:
minVal, maxVal = levels
if minVal == maxVal:
maxVal += 1e-16
if maxVal == minVal:
data = rescaleData(data, 1, minVal, dtype=int)
else:
rng = maxVal-minVal
rng = 1 if rng == 0 else rng
data = rescaleData(data, scale / rng, minVal, dtype=int)
profile()
## apply LUT if given
if lut is not None:
data = applyLookupTable(data, lut)
else:
if data.dtype is not np.ubyte:
data = np.clip(data, 0, 255).astype(np.ubyte)
profile()
## copy data into ARGB ordered array
imgData = np.empty(data.shape[:2]+(4,), dtype=np.ubyte)
profile()
if useRGBA:
order = [0,1,2,3] ## array comes out RGBA
else:
order = [2,1,0,3] ## for some reason, the colors line up as BGR in the final image.
if data.ndim == 2:
# This is tempting:
# imgData[..., :3] = data[..., np.newaxis]
# ..but it turns out this is faster:
for i in range(3):
imgData[..., i] = data
elif data.shape[2] == 1:
for i in range(3):
imgData[..., i] = data[..., 0]
else:
for i in range(0, data.shape[2]):
imgData[..., i] = data[..., order[i]]
profile()
if data.ndim == 2 or data.shape[2] == 3:
alpha = False
imgData[..., 3] = 255
else:
alpha = True
profile()
return imgData, alpha
def makeQImage(imgData, alpha=None, copy=True, transpose=True):
"""
Turn an ARGB array into QImage.
By default, the data is copied; changes to the array will not
be reflected in the image. The image will be given a 'data' attribute
pointing to the array which shares its data to prevent python
freeing that memory while the image is in use.
============== ===================================================================
**Arguments:**
imgData Array of data to convert. Must have shape (width, height, 3 or 4)
and dtype=ubyte. The order of values in the 3rd axis must be
(b, g, r, a).
alpha If True, the QImage returned will have format ARGB32. If False,
the format will be RGB32. By default, _alpha_ is True if
array.shape[2] == 4.
copy If True, the data is copied before converting to QImage.
If False, the new QImage points directly to the data in the array.
Note that the array must be contiguous for this to work
(see numpy.ascontiguousarray).
transpose If True (the default), the array x/y axes are transposed before
creating the image. Note that Qt expects the axes to be in
(height, width) order whereas pyqtgraph usually prefers the
opposite.
============== ===================================================================
"""
## create QImage from buffer
profile = debug.Profiler()
## If we didn't explicitly specify alpha, check the array shape.
if alpha is None:
alpha = (imgData.shape[2] == 4)
copied = False
if imgData.shape[2] == 3: ## need to make alpha channel (even if alpha==False; QImage requires 32 bpp)
if copy is True:
d2 = np.empty(imgData.shape[:2] + (4,), dtype=imgData.dtype)
d2[:,:,:3] = imgData
d2[:,:,3] = 255
imgData = d2
copied = True
else:
raise Exception('Array has only 3 channels; cannot make QImage without copying.')
if alpha:
imgFormat = QtGui.QImage.Format_ARGB32
else:
imgFormat = QtGui.QImage.Format_RGB32
if transpose:
imgData = imgData.transpose((1, 0, 2)) ## QImage expects the row/column order to be opposite
profile()
if not imgData.flags['C_CONTIGUOUS']:
if copy is False:
extra = ' (try setting transpose=False)' if transpose else ''
raise Exception('Array is not contiguous; cannot make QImage without copying.'+extra)
imgData = np.ascontiguousarray(imgData)
copied = True
if copy is True and copied is False:
imgData = imgData.copy()
if USE_PYSIDE:
ch = ctypes.c_char.from_buffer(imgData, 0)
img = QtGui.QImage(ch, imgData.shape[1], imgData.shape[0], imgFormat)
else:
#addr = ctypes.addressof(ctypes.c_char.from_buffer(imgData, 0))
## PyQt API for QImage changed between 4.9.3 and 4.9.6 (I don't know exactly which version it was)
## So we first attempt the 4.9.6 API, then fall back to 4.9.3
#addr = ctypes.c_char.from_buffer(imgData, 0)
#try:
#img = QtGui.QImage(addr, imgData.shape[1], imgData.shape[0], imgFormat)
#except TypeError:
#addr = ctypes.addressof(addr)
#img = QtGui.QImage(addr, imgData.shape[1], imgData.shape[0], imgFormat)
try:
img = QtGui.QImage(imgData.ctypes.data, imgData.shape[1], imgData.shape[0], imgFormat)
except:
if copy:
# does not leak memory, is not mutable
img = QtGui.QImage(buffer(imgData), imgData.shape[1], imgData.shape[0], imgFormat)
else:
# mutable, but leaks memory
img = QtGui.QImage(memoryview(imgData), imgData.shape[1], imgData.shape[0], imgFormat)
img.data = imgData
return img
#try:
#buf = imgData.data
#except AttributeError: ## happens when image data is non-contiguous
#buf = imgData.data
#profiler()
#qimage = QtGui.QImage(buf, imgData.shape[1], imgData.shape[0], imgFormat)
#profiler()
#qimage.data = imgData
#return qimage
def imageToArray(img, copy=False, transpose=True):
"""
Convert a QImage into numpy array. The image must have format RGB32, ARGB32, or ARGB32_Premultiplied.
By default, the image is not copied; changes made to the array will appear in the QImage as well (beware: if
the QImage is collected before the array, there may be trouble).
The array will have shape (width, height, (b,g,r,a)).
"""
fmt = img.format()
ptr = img.bits()
if USE_PYSIDE:
arr = np.frombuffer(ptr, dtype=np.ubyte)
else:
ptr.setsize(img.byteCount())
arr = np.asarray(ptr)
if img.byteCount() != arr.size * arr.itemsize:
# Required for Python 2.6, PyQt 4.10
# If this works on all platforms, then there is no need to use np.asarray..
arr = np.frombuffer(ptr, np.ubyte, img.byteCount())
if fmt == img.Format_RGB32:
arr = arr.reshape(img.height(), img.width(), 3)
elif fmt == img.Format_ARGB32 or fmt == img.Format_ARGB32_Premultiplied:
arr = arr.reshape(img.height(), img.width(), 4)
if copy:
arr = arr.copy()
if transpose:
return arr.transpose((1,0,2))
else:
return arr
def colorToAlpha(data, color):
"""
Given an RGBA image in *data*, convert *color* to be transparent.
*data* must be an array (w, h, 3 or 4) of ubyte values and *color* must be
an array (3) of ubyte values.
This is particularly useful for use with images that have a black or white background.
Algorithm is taken from Gimp's color-to-alpha function in plug-ins/common/colortoalpha.c
Credit:
/*
* Color To Alpha plug-in v1.0 by Seth Burgess, sjburges@gimp.org 1999/05/14
* with algorithm by clahey
*/
"""
data = data.astype(float)
if data.shape[-1] == 3: ## add alpha channel if needed
d2 = np.empty(data.shape[:2]+(4,), dtype=data.dtype)
d2[...,:3] = data
d2[...,3] = 255
data = d2
color = color.astype(float)
alpha = np.zeros(data.shape[:2]+(3,), dtype=float)
output = data.copy()
for i in [0,1,2]:
d = data[...,i]
c = color[i]
mask = d > c
alpha[...,i][mask] = (d[mask] - c) / (255. - c)
imask = d < c
alpha[...,i][imask] = (c - d[imask]) / c
output[...,3] = alpha.max(axis=2) * 255.
mask = output[...,3] >= 1.0 ## avoid zero division while processing alpha channel
correction = 255. / output[...,3][mask] ## increase value to compensate for decreased alpha
for i in [0,1,2]:
output[...,i][mask] = ((output[...,i][mask]-color[i]) * correction) + color[i]
output[...,3][mask] *= data[...,3][mask] / 255. ## combine computed and previous alpha values
#raise Exception()
return np.clip(output, 0, 255).astype(np.ubyte)
def gaussianFilter(data, sigma):
"""
Drop-in replacement for scipy.ndimage.gaussian_filter.
(note: results are only approximately equal to the output of
gaussian_filter)
"""
if np.isscalar(sigma):
sigma = (sigma,) * data.ndim
baseline = data.mean()
filtered = data - baseline
for ax in range(data.ndim):
s = sigma[ax]
if s == 0:
continue
# generate 1D gaussian kernel
ksize = int(s * 6)
x = np.arange(-ksize, ksize)
kernel = np.exp(-x**2 / (2*s**2))
kshape = [1,] * data.ndim
kshape[ax] = len(kernel)
kernel = kernel.reshape(kshape)
# convolve as product of FFTs
shape = data.shape[ax] + ksize
scale = 1.0 / (abs(s) * (2*np.pi)**0.5)
filtered = scale * np.fft.irfft(np.fft.rfft(filtered, shape, axis=ax) *
np.fft.rfft(kernel, shape, axis=ax),
axis=ax)
# clip off extra data
sl = [slice(None)] * data.ndim
sl[ax] = slice(filtered.shape[ax]-data.shape[ax],None,None)
filtered = filtered[sl]
return filtered + baseline
def downsample(data, n, axis=0, xvals='subsample'):
"""Downsample by averaging points together across axis.
If multiple axes are specified, runs once per axis.
If a metaArray is given, then the axis values can be either subsampled
or downsampled to match.
"""
ma = None
if (hasattr(data, 'implements') and data.implements('MetaArray')):
ma = data
data = data.view(np.ndarray)
if hasattr(axis, '__len__'):
if not hasattr(n, '__len__'):
n = [n]*len(axis)
for i in range(len(axis)):
data = downsample(data, n[i], axis[i])
return data
if n <= 1:
return data
nPts = int(data.shape[axis] / n)
s = list(data.shape)
s[axis] = nPts
s.insert(axis+1, n)
sl = [slice(None)] * data.ndim
sl[axis] = slice(0, nPts*n)
d1 = data[tuple(sl)]
#print d1.shape, s
d1.shape = tuple(s)
d2 = d1.mean(axis+1)
if ma is None:
return d2
else:
info = ma.infoCopy()
if 'values' in info[axis]:
if xvals == 'subsample':
info[axis]['values'] = info[axis]['values'][::n][:nPts]
elif xvals == 'downsample':
info[axis]['values'] = downsample(info[axis]['values'], n)
return MetaArray(d2, info=info)
def arrayToQPath(x, y, connect='all'):
"""Convert an array of x,y coordinats to QPainterPath as efficiently as possible.
The *connect* argument may be 'all', indicating that each point should be
connected to the next; 'pairs', indicating that each pair of points
should be connected, or an array of int32 values (0 or 1) indicating
connections.
"""
## Create all vertices in path. The method used below creates a binary format so that all
## vertices can be read in at once. This binary format may change in future versions of Qt,
## so the original (slower) method is left here for emergencies:
#path.moveTo(x[0], y[0])
#if connect == 'all':
#for i in range(1, y.shape[0]):
#path.lineTo(x[i], y[i])
#elif connect == 'pairs':
#for i in range(1, y.shape[0]):
#if i%2 == 0:
#path.lineTo(x[i], y[i])
#else:
#path.moveTo(x[i], y[i])
#elif isinstance(connect, np.ndarray):
#for i in range(1, y.shape[0]):
#if connect[i] == 1:
#path.lineTo(x[i], y[i])
#else:
#path.moveTo(x[i], y[i])
#else:
#raise Exception('connect argument must be "all", "pairs", or array')
## Speed this up using >> operator
## Format is:
## numVerts(i4) 0(i4)
## x(f8) y(f8) 0(i4) <-- 0 means this vertex does not connect
## x(f8) y(f8) 1(i4) <-- 1 means this vertex connects to the previous vertex
## ...
## 0(i4)
##
## All values are big endian--pack using struct.pack('>d') or struct.pack('>i')
path = QtGui.QPainterPath()
#profiler = debug.Profiler()
n = x.shape[0]
# create empty array, pad with extra space on either end
arr = np.empty(n+2, dtype=[('x', '>f8'), ('y', '>f8'), ('c', '>i4')])
# write first two integers
#profiler('allocate empty')
byteview = arr.view(dtype=np.ubyte)
byteview[:12] = 0
byteview.data[12:20] = struct.pack('>ii', n, 0)
#profiler('pack header')
# Fill array with vertex values
arr[1:-1]['x'] = x
arr[1:-1]['y'] = y
# decide which points are connected by lines
if connect == 'pairs':
connect = np.empty((n/2,2), dtype=np.int32)
if connect.size != n:
raise Exception("x,y array lengths must be multiple of 2 to use connect='pairs'")
connect[:,0] = 1
connect[:,1] = 0
connect = connect.flatten()
if connect == 'finite':
connect = np.isfinite(x) & np.isfinite(y)
arr[1:-1]['c'] = connect
if connect == 'all':
arr[1:-1]['c'] = 1
elif isinstance(connect, np.ndarray):
arr[1:-1]['c'] = connect
else:
raise Exception('connect argument must be "all", "pairs", or array')
#profiler('fill array')
# write last 0
lastInd = 20*(n+1)
byteview.data[lastInd:lastInd+4] = struct.pack('>i', 0)
#profiler('footer')
# create datastream object and stream into path
## Avoiding this method because QByteArray(str) leaks memory in PySide
#buf = QtCore.QByteArray(arr.data[12:lastInd+4]) # I think one unnecessary copy happens here
path.strn = byteview.data[12:lastInd+4] # make sure data doesn't run away
try:
buf = QtCore.QByteArray.fromRawData(path.strn)
except TypeError:
buf = QtCore.QByteArray(bytes(path.strn))
#profiler('create buffer')
ds = QtCore.QDataStream(buf)
ds >> path
#profiler('load')
return path
#def isosurface(data, level):
#"""
#Generate isosurface from volumetric data using marching tetrahedra algorithm.
#See Paul Bourke, "Polygonising a Scalar Field Using Tetrahedrons" (http://local.wasp.uwa.edu.au/~pbourke/geometry/polygonise/)
#*data* 3D numpy array of scalar values
#*level* The level at which to generate an isosurface
#"""
#facets = []
### mark everything below the isosurface level
#mask = data < level
#### make eight sub-fields
#fields = np.empty((2,2,2), dtype=object)
#slices = [slice(0,-1), slice(1,None)]
#for i in [0,1]:
#for j in [0,1]:
#for k in [0,1]:
#fields[i,j,k] = mask[slices[i], slices[j], slices[k]]
### split each cell into 6 tetrahedra
### these all have the same 'orienation'; points 1,2,3 circle
### clockwise around point 0
#tetrahedra = [
#[(0,1,0), (1,1,1), (0,1,1), (1,0,1)],
#[(0,1,0), (0,1,1), (0,0,1), (1,0,1)],
#[(0,1,0), (0,0,1), (0,0,0), (1,0,1)],
#[(0,1,0), (0,0,0), (1,0,0), (1,0,1)],
#[(0,1,0), (1,0,0), (1,1,0), (1,0,1)],
#[(0,1,0), (1,1,0), (1,1,1), (1,0,1)]
#]
### each tetrahedron will be assigned an index
### which determines how to generate its facets.
### this structure is:
### facets[index][facet1, facet2, ...]
### where each facet is triangular and its points are each
### interpolated between two points on the tetrahedron
### facet = [(p1a, p1b), (p2a, p2b), (p3a, p3b)]
### facet points always circle clockwise if you are looking
### at them from below the isosurface.
#indexFacets = [
#[], ## all above
#[[(0,1), (0,2), (0,3)]], # 0 below
#[[(1,0), (1,3), (1,2)]], # 1 below
#[[(0,2), (1,3), (1,2)], [(0,2), (0,3), (1,3)]], # 0,1 below
#[[(2,0), (2,1), (2,3)]], # 2 below
#[[(0,3), (1,2), (2,3)], [(0,3), (0,1), (1,2)]], # 0,2 below
#[[(1,0), (2,3), (2,0)], [(1,0), (1,3), (2,3)]], # 1,2 below
#[[(3,0), (3,1), (3,2)]], # 3 above
#[[(3,0), (3,2), (3,1)]], # 3 below
#[[(1,0), (2,0), (2,3)], [(1,0), (2,3), (1,3)]], # 0,3 below
#[[(0,3), (2,3), (1,2)], [(0,3), (1,2), (0,1)]], # 1,3 below
#[[(2,0), (2,3), (2,1)]], # 0,1,3 below
#[[(0,2), (1,2), (1,3)], [(0,2), (1,3), (0,3)]], # 2,3 below
#[[(1,0), (1,2), (1,3)]], # 0,2,3 below
#[[(0,1), (0,3), (0,2)]], # 1,2,3 below
#[] ## all below
#]
#for tet in tetrahedra:
### get the 4 fields for this tetrahedron
#tetFields = [fields[c] for c in tet]
### generate an index for each grid cell
#index = tetFields[0] + tetFields[1]*2 + tetFields[2]*4 + tetFields[3]*8
### add facets
#for i in xrange(index.shape[0]): # data x-axis
#for j in xrange(index.shape[1]): # data y-axis
#for k in xrange(index.shape[2]): # data z-axis
#for f in indexFacets[index[i,j,k]]: # faces to generate for this tet
#pts = []
#for l in [0,1,2]: # points in this face
#p1 = tet[f[l][0]] # tet corner 1
#p2 = tet[f[l][1]] # tet corner 2
#pts.append([(p1[x]+p2[x])*0.5+[i,j,k][x]+0.5 for x in [0,1,2]]) ## interpolate between tet corners
#facets.append(pts)
#return facets
def isocurve(data, level, connected=False, extendToEdge=False, path=False):
"""
Generate isocurve from 2D data using marching squares algorithm.
============== =========================================================
**Arguments:**
data 2D numpy array of scalar values
level The level at which to generate an isosurface
connected If False, return a single long list of point pairs
If True, return multiple long lists of connected point
locations. (This is slower but better for drawing
continuous lines)
extendToEdge If True, extend the curves to reach the exact edges of
the data.
path if True, return a QPainterPath rather than a list of
vertex coordinates. This forces connected=True.
============== =========================================================
This function is SLOW; plenty of room for optimization here.
"""
if path is True:
connected = True
if extendToEdge:
d2 = np.empty((data.shape[0]+2, data.shape[1]+2), dtype=data.dtype)
d2[1:-1, 1:-1] = data
d2[0, 1:-1] = data[0]
d2[-1, 1:-1] = data[-1]
d2[1:-1, 0] = data[:, 0]
d2[1:-1, -1] = data[:, -1]
d2[0,0] = d2[0,1]
d2[0,-1] = d2[1,-1]
d2[-1,0] = d2[-1,1]
d2[-1,-1] = d2[-1,-2]
data = d2
sideTable = [
[],
[0,1],
[1,2],
[0,2],
[0,3],
[1,3],
[0,1,2,3],
[2,3],
[2,3],
[0,1,2,3],
[1,3],
[0,3],
[0,2],
[1,2],
[0,1],
[]
]
edgeKey=[
[(0,1), (0,0)],
[(0,0), (1,0)],
[(1,0), (1,1)],
[(1,1), (0,1)]
]
lines = []
## mark everything below the isosurface level
mask = data < level
### make four sub-fields and compute indexes for grid cells
index = np.zeros([x-1 for x in data.shape], dtype=np.ubyte)
fields = np.empty((2,2), dtype=object)
slices = [slice(0,-1), slice(1,None)]
for i in [0,1]:
for j in [0,1]:
fields[i,j] = mask[slices[i], slices[j]]
#vertIndex = i - 2*j*i + 3*j + 4*k ## this is just to match Bourk's vertex numbering scheme
vertIndex = i+2*j
#print i,j,k," : ", fields[i,j,k], 2**vertIndex
index += fields[i,j] * 2**vertIndex
#print index
#print index
## add lines
for i in range(index.shape[0]): # data x-axis
for j in range(index.shape[1]): # data y-axis
sides = sideTable[index[i,j]]
for l in range(0, len(sides), 2): ## faces for this grid cell
edges = sides[l:l+2]
pts = []
for m in [0,1]: # points in this face
p1 = edgeKey[edges[m]][0] # p1, p2 are points at either side of an edge
p2 = edgeKey[edges[m]][1]
v1 = data[i+p1[0], j+p1[1]] # v1 and v2 are the values at p1 and p2
v2 = data[i+p2[0], j+p2[1]]
f = (level-v1) / (v2-v1)
fi = 1.0 - f
p = ( ## interpolate between corners
p1[0]*fi + p2[0]*f + i + 0.5,
p1[1]*fi + p2[1]*f + j + 0.5
)
if extendToEdge:
## check bounds
p = (
min(data.shape[0]-2, max(0, p[0]-1)),
min(data.shape[1]-2, max(0, p[1]-1)),
)
if connected:
gridKey = i + (1 if edges[m]==2 else 0), j + (1 if edges[m]==3 else 0), edges[m]%2
pts.append((p, gridKey)) ## give the actual position and a key identifying the grid location (for connecting segments)
else:
pts.append(p)
lines.append(pts)
if not connected:
return lines
## turn disjoint list of segments into continuous lines
#lines = [[2,5], [5,4], [3,4], [1,3], [6,7], [7,8], [8,6], [11,12], [12,15], [11,13], [13,14]]
#lines = [[(float(a), a), (float(b), b)] for a,b in lines]
points = {} ## maps each point to its connections
for a,b in lines:
if a[1] not in points:
points[a[1]] = []
points[a[1]].append([a,b])
if b[1] not in points:
points[b[1]] = []
points[b[1]].append([b,a])
## rearrange into chains
for k in list(points.keys()):
try:
chains = points[k]
except KeyError: ## already used this point elsewhere
continue
#print "===========", k
for chain in chains:
#print " chain:", chain
x = None
while True:
if x == chain[-1][1]:
break ## nothing left to do on this chain
x = chain[-1][1]
if x == k:
break ## chain has looped; we're done and can ignore the opposite chain
y = chain[-2][1]
connects = points[x]
for conn in connects[:]:
if conn[1][1] != y:
#print " ext:", conn
chain.extend(conn[1:])
#print " del:", x
del points[x]
if chain[0][1] == chain[-1][1]: # looped chain; no need to continue the other direction
chains.pop()
break
## extract point locations
lines = []
for chain in points.values():
if len(chain) == 2:
chain = chain[1][1:][::-1] + chain[0] # join together ends of chain
else:
chain = chain[0]
lines.append([p[0] for p in chain])
if not path:
return lines ## a list of pairs of points
path = QtGui.QPainterPath()
for line in lines:
path.moveTo(*line[0])
for p in line[1:]:
path.lineTo(*p)
return path
def traceImage(image, values, smooth=0.5):
"""
Convert an image to a set of QPainterPath curves.
One curve will be generated for each item in *values*; each curve outlines the area
of the image that is closer to its value than to any others.
If image is RGB or RGBA, then the shape of values should be (nvals, 3/4)
The parameter *smooth* is expressed in pixels.
"""
try:
import scipy.ndimage as ndi
except ImportError:
raise Exception("traceImage() requires the package scipy.ndimage, but it is not importable.")
if values.ndim == 2:
values = values.T
values = values[np.newaxis, np.newaxis, ...].astype(float)
image = image[..., np.newaxis].astype(float)
diff = np.abs(image-values)
if values.ndim == 4:
diff = diff.sum(axis=2)
labels = np.argmin(diff, axis=2)
paths = []
for i in range(diff.shape[-1]):
d = (labels==i).astype(float)
d = gaussianFilter(d, (smooth, smooth))
lines = isocurve(d, 0.5, connected=True, extendToEdge=True)
path = QtGui.QPainterPath()
for line in lines:
path.moveTo(*line[0])
for p in line[1:]:
path.lineTo(*p)
paths.append(path)
return paths
IsosurfaceDataCache = None
def isosurface(data, level):
"""
Generate isosurface from volumetric data using marching cubes algorithm.
See Paul Bourke, "Polygonising a Scalar Field"
(http://paulbourke.net/geometry/polygonise/)
*data* 3D numpy array of scalar values
*level* The level at which to generate an isosurface
Returns an array of vertex coordinates (Nv, 3) and an array of
per-face vertex indexes (Nf, 3)
"""
## For improvement, see:
##
## Efficient implementation of Marching Cubes' cases with topological guarantees.
## Thomas Lewiner, Helio Lopes, Antonio Wilson Vieira and Geovan Tavares.
## Journal of Graphics Tools 8(2): pp. 1-15 (december 2003)
## Precompute lookup tables on the first run
global IsosurfaceDataCache
if IsosurfaceDataCache is None:
## map from grid cell index to edge index.
## grid cell index tells us which corners are below the isosurface,
## edge index tells us which edges are cut by the isosurface.
## (Data stolen from Bourk; see above.)
edgeTable = np.array([
0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0
], dtype=np.uint16)
## Table of triangles to use for filling each grid cell.
## Each set of three integers tells us which three edges to
## draw a triangle between.
## (Data stolen from Bourk; see above.)
triTable = [
[],
[0, 8, 3],
[0, 1, 9],
[1, 8, 3, 9, 8, 1],
[1, 2, 10],
[0, 8, 3, 1, 2, 10],
[9, 2, 10, 0, 2, 9],
[2, 8, 3, 2, 10, 8, 10, 9, 8],
[3, 11, 2],
[0, 11, 2, 8, 11, 0],
[1, 9, 0, 2, 3, 11],
[1, 11, 2, 1, 9, 11, 9, 8, 11],
[3, 10, 1, 11, 10, 3],
[0, 10, 1, 0, 8, 10, 8, 11, 10],
[3, 9, 0, 3, 11, 9, 11, 10, 9],
[9, 8, 10, 10, 8, 11],
[4, 7, 8],
[4, 3, 0, 7, 3, 4],
[0, 1, 9, 8, 4, 7],
[4, 1, 9, 4, 7, 1, 7, 3, 1],
[1, 2, 10, 8, 4, 7],
[3, 4, 7, 3, 0, 4, 1, 2, 10],
[9, 2, 10, 9, 0, 2, 8, 4, 7],
[2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4],
[8, 4, 7, 3, 11, 2],
[11, 4, 7, 11, 2, 4, 2, 0, 4],
[9, 0, 1, 8, 4, 7, 2, 3, 11],
[4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1],
[3, 10, 1, 3, 11, 10, 7, 8, 4],
[1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4],
[4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3],
[4, 7, 11, 4, 11, 9, 9, 11, 10],
[9, 5, 4],
[9, 5, 4, 0, 8, 3],
[0, 5, 4, 1, 5, 0],
[8, 5, 4, 8, 3, 5, 3, 1, 5],
[1, 2, 10, 9, 5, 4],
[3, 0, 8, 1, 2, 10, 4, 9, 5],
[5, 2, 10, 5, 4, 2, 4, 0, 2],
[2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8],
[9, 5, 4, 2, 3, 11],
[0, 11, 2, 0, 8, 11, 4, 9, 5],
[0, 5, 4, 0, 1, 5, 2, 3, 11],
[2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5],
[10, 3, 11, 10, 1, 3, 9, 5, 4],
[4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10],
[5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3],
[5, 4, 8, 5, 8, 10, 10, 8, 11],
[9, 7, 8, 5, 7, 9],
[9, 3, 0, 9, 5, 3, 5, 7, 3],
[0, 7, 8, 0, 1, 7, 1, 5, 7],
[1, 5, 3, 3, 5, 7],
[9, 7, 8, 9, 5, 7, 10, 1, 2],
[10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3],
[8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2],
[2, 10, 5, 2, 5, 3, 3, 5, 7],
[7, 9, 5, 7, 8, 9, 3, 11, 2],
[9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11],
[2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7],
[11, 2, 1, 11, 1, 7, 7, 1, 5],
[9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11],
[5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0],
[11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0],
[11, 10, 5, 7, 11, 5],
[10, 6, 5],
[0, 8, 3, 5, 10, 6],
[9, 0, 1, 5, 10, 6],
[1, 8, 3, 1, 9, 8, 5, 10, 6],
[1, 6, 5, 2, 6, 1],
[1, 6, 5, 1, 2, 6, 3, 0, 8],
[9, 6, 5, 9, 0, 6, 0, 2, 6],
[5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8],
[2, 3, 11, 10, 6, 5],
[11, 0, 8, 11, 2, 0, 10, 6, 5],
[0, 1, 9, 2, 3, 11, 5, 10, 6],
[5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11],
[6, 3, 11, 6, 5, 3, 5, 1, 3],
[0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6],
[3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9],
[6, 5, 9, 6, 9, 11, 11, 9, 8],
[5, 10, 6, 4, 7, 8],
[4, 3, 0, 4, 7, 3, 6, 5, 10],
[1, 9, 0, 5, 10, 6, 8, 4, 7],
[10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4],
[6, 1, 2, 6, 5, 1, 4, 7, 8],
[1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7],
[8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6],
[7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9],
[3, 11, 2, 7, 8, 4, 10, 6, 5],
[5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11],
[0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6],
[9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6],
[8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6],
[5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11],
[0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7],
[6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9],
[10, 4, 9, 6, 4, 10],
[4, 10, 6, 4, 9, 10, 0, 8, 3],
[10, 0, 1, 10, 6, 0, 6, 4, 0],
[8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10],
[1, 4, 9, 1, 2, 4, 2, 6, 4],
[3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4],
[0, 2, 4, 4, 2, 6],
[8, 3, 2, 8, 2, 4, 4, 2, 6],
[10, 4, 9, 10, 6, 4, 11, 2, 3],
[0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6],
[3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10],
[6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1],
[9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3],
[8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1],
[3, 11, 6, 3, 6, 0, 0, 6, 4],
[6, 4, 8, 11, 6, 8],
[7, 10, 6, 7, 8, 10, 8, 9, 10],
[0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10],
[10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0],
[10, 6, 7, 10, 7, 1, 1, 7, 3],
[1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7],
[2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9],
[7, 8, 0, 7, 0, 6, 6, 0, 2],
[7, 3, 2, 6, 7, 2],
[2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7],
[2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7],
[1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11],
[11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1],
[8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6],
[0, 9, 1, 11, 6, 7],
[7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0],
[7, 11, 6],
[7, 6, 11],
[3, 0, 8, 11, 7, 6],
[0, 1, 9, 11, 7, 6],
[8, 1, 9, 8, 3, 1, 11, 7, 6],
[10, 1, 2, 6, 11, 7],
[1, 2, 10, 3, 0, 8, 6, 11, 7],
[2, 9, 0, 2, 10, 9, 6, 11, 7],
[6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8],
[7, 2, 3, 6, 2, 7],
[7, 0, 8, 7, 6, 0, 6, 2, 0],
[2, 7, 6, 2, 3, 7, 0, 1, 9],
[1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6],
[10, 7, 6, 10, 1, 7, 1, 3, 7],
[10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8],
[0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7],
[7, 6, 10, 7, 10, 8, 8, 10, 9],
[6, 8, 4, 11, 8, 6],
[3, 6, 11, 3, 0, 6, 0, 4, 6],
[8, 6, 11, 8, 4, 6, 9, 0, 1],
[9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6],
[6, 8, 4, 6, 11, 8, 2, 10, 1],
[1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6],
[4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9],
[10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3],
[8, 2, 3, 8, 4, 2, 4, 6, 2],
[0, 4, 2, 4, 6, 2],
[1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8],
[1, 9, 4, 1, 4, 2, 2, 4, 6],
[8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1],
[10, 1, 0, 10, 0, 6, 6, 0, 4],
[4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3],
[10, 9, 4, 6, 10, 4],
[4, 9, 5, 7, 6, 11],
[0, 8, 3, 4, 9, 5, 11, 7, 6],
[5, 0, 1, 5, 4, 0, 7, 6, 11],
[11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5],
[9, 5, 4, 10, 1, 2, 7, 6, 11],
[6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5],
[7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2],
[3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6],
[7, 2, 3, 7, 6, 2, 5, 4, 9],
[9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7],
[3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0],
[6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8],
[9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7],
[1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4],
[4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10],
[7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10],
[6, 9, 5, 6, 11, 9, 11, 8, 9],
[3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5],
[0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11],
[6, 11, 3, 6, 3, 5, 5, 3, 1],
[1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6],
[0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10],
[11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5],
[6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3],
[5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2],
[9, 5, 6, 9, 6, 0, 0, 6, 2],
[1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8],
[1, 5, 6, 2, 1, 6],
[1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6],
[10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0],
[0, 3, 8, 5, 6, 10],
[10, 5, 6],
[11, 5, 10, 7, 5, 11],
[11, 5, 10, 11, 7, 5, 8, 3, 0],
[5, 11, 7, 5, 10, 11, 1, 9, 0],
[10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1],
[11, 1, 2, 11, 7, 1, 7, 5, 1],
[0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11],
[9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7],
[7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2],
[2, 5, 10, 2, 3, 5, 3, 7, 5],
[8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5],
[9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2],
[9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2],
[1, 3, 5, 3, 7, 5],
[0, 8, 7, 0, 7, 1, 1, 7, 5],
[9, 0, 3, 9, 3, 5, 5, 3, 7],
[9, 8, 7, 5, 9, 7],
[5, 8, 4, 5, 10, 8, 10, 11, 8],
[5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0],
[0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5],
[10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4],
[2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8],
[0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11],
[0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5],
[9, 4, 5, 2, 11, 3],
[2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4],
[5, 10, 2, 5, 2, 4, 4, 2, 0],
[3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9],
[5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2],
[8, 4, 5, 8, 5, 3, 3, 5, 1],
[0, 4, 5, 1, 0, 5],
[8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5],
[9, 4, 5],
[4, 11, 7, 4, 9, 11, 9, 10, 11],
[0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11],
[1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11],
[3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4],
[4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2],
[9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3],
[11, 7, 4, 11, 4, 2, 2, 4, 0],
[11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4],
[2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9],
[9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7],
[3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10],
[1, 10, 2, 8, 7, 4],
[4, 9, 1, 4, 1, 7, 7, 1, 3],
[4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1],
[4, 0, 3, 7, 4, 3],
[4, 8, 7],
[9, 10, 8, 10, 11, 8],
[3, 0, 9, 3, 9, 11, 11, 9, 10],
[0, 1, 10, 0, 10, 8, 8, 10, 11],
[3, 1, 10, 11, 3, 10],
[1, 2, 11, 1, 11, 9, 9, 11, 8],
[3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9],
[0, 2, 11, 8, 0, 11],
[3, 2, 11],
[2, 3, 8, 2, 8, 10, 10, 8, 9],
[9, 10, 2, 0, 9, 2],
[2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8],
[1, 10, 2],
[1, 3, 8, 9, 1, 8],
[0, 9, 1],
[0, 3, 8],
[]
]
edgeShifts = np.array([ ## maps edge ID (0-11) to (x,y,z) cell offset and edge ID (0-2)
[0, 0, 0, 0],
[1, 0, 0, 1],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 1, 1],
[0, 1, 1, 0],
[0, 0, 1, 1],
[0, 0, 0, 2],
[1, 0, 0, 2],
[1, 1, 0, 2],
[0, 1, 0, 2],
#[9, 9, 9, 9] ## fake
], dtype=np.uint16) # don't use ubyte here! This value gets added to cell index later; will need the extra precision.
nTableFaces = np.array([len(f)/3 for f in triTable], dtype=np.ubyte)
faceShiftTables = [None]
for i in range(1,6):
## compute lookup table of index: vertexes mapping
faceTableI = np.zeros((len(triTable), i*3), dtype=np.ubyte)
faceTableInds = np.argwhere(nTableFaces == i)
faceTableI[faceTableInds[:,0]] = np.array([triTable[j] for j in faceTableInds])
faceTableI = faceTableI.reshape((len(triTable), i, 3))
faceShiftTables.append(edgeShifts[faceTableI])
## Let's try something different:
#faceTable = np.empty((256, 5, 3, 4), dtype=np.ubyte) # (grid cell index, faces, vertexes, edge lookup)
#for i,f in enumerate(triTable):
#f = np.array(f + [12] * (15-len(f))).reshape(5,3)
#faceTable[i] = edgeShifts[f]
IsosurfaceDataCache = (faceShiftTables, edgeShifts, edgeTable, nTableFaces)
else:
faceShiftTables, edgeShifts, edgeTable, nTableFaces = IsosurfaceDataCache
## mark everything below the isosurface level
mask = data < level
### make eight sub-fields and compute indexes for grid cells
index = np.zeros([x-1 for x in data.shape], dtype=np.ubyte)
fields = np.empty((2,2,2), dtype=object)
slices = [slice(0,-1), slice(1,None)]
for i in [0,1]:
for j in [0,1]:
for k in [0,1]:
fields[i,j,k] = mask[slices[i], slices[j], slices[k]]
vertIndex = i - 2*j*i + 3*j + 4*k ## this is just to match Bourk's vertex numbering scheme
index += fields[i,j,k] * 2**vertIndex
### Generate table of edges that have been cut
cutEdges = np.zeros([x+1 for x in index.shape]+[3], dtype=np.uint32)
edges = edgeTable[index]
for i, shift in enumerate(edgeShifts[:12]):
slices = [slice(shift[j],cutEdges.shape[j]+(shift[j]-1)) for j in range(3)]
cutEdges[slices[0], slices[1], slices[2], shift[3]] += edges & 2**i
## for each cut edge, interpolate to see where exactly the edge is cut and generate vertex positions
m = cutEdges > 0
vertexInds = np.argwhere(m) ## argwhere is slow!
vertexes = vertexInds[:,:3].astype(np.float32)
dataFlat = data.reshape(data.shape[0]*data.shape[1]*data.shape[2])
## re-use the cutEdges array as a lookup table for vertex IDs
cutEdges[vertexInds[:,0], vertexInds[:,1], vertexInds[:,2], vertexInds[:,3]] = np.arange(vertexInds.shape[0])
for i in [0,1,2]:
vim = vertexInds[:,3] == i
vi = vertexInds[vim, :3]
viFlat = (vi * (np.array(data.strides[:3]) // data.itemsize)[np.newaxis,:]).sum(axis=1)
v1 = dataFlat[viFlat]
v2 = dataFlat[viFlat + data.strides[i]//data.itemsize]
vertexes[vim,i] += (level-v1) / (v2-v1)
### compute the set of vertex indexes for each face.
## This works, but runs a bit slower.
#cells = np.argwhere((index != 0) & (index != 255)) ## all cells with at least one face
#cellInds = index[cells[:,0], cells[:,1], cells[:,2]]
#verts = faceTable[cellInds]
#mask = verts[...,0,0] != 9
#verts[...,:3] += cells[:,np.newaxis,np.newaxis,:] ## we now have indexes into cutEdges
#verts = verts[mask]
#faces = cutEdges[verts[...,0], verts[...,1], verts[...,2], verts[...,3]] ## and these are the vertex indexes we want.
## To allow this to be vectorized efficiently, we count the number of faces in each
## grid cell and handle each group of cells with the same number together.
## determine how many faces to assign to each grid cell
nFaces = nTableFaces[index]
totFaces = nFaces.sum()
faces = np.empty((totFaces, 3), dtype=np.uint32)
ptr = 0
#import debug
#p = debug.Profiler()
## this helps speed up an indexing operation later on
cs = np.array(cutEdges.strides)//cutEdges.itemsize
cutEdges = cutEdges.flatten()
## this, strangely, does not seem to help.
#ins = np.array(index.strides)/index.itemsize
#index = index.flatten()
for i in range(1,6):
### expensive:
#profiler()
cells = np.argwhere(nFaces == i) ## all cells which require i faces (argwhere is expensive)
#profiler()
if cells.shape[0] == 0:
continue
cellInds = index[cells[:,0], cells[:,1], cells[:,2]] ## index values of cells to process for this round
#profiler()
### expensive:
verts = faceShiftTables[i][cellInds]
#profiler()
verts[...,:3] += cells[:,np.newaxis,np.newaxis,:] ## we now have indexes into cutEdges
verts = verts.reshape((verts.shape[0]*i,)+verts.shape[2:])
#profiler()
### expensive:
verts = (verts * cs[np.newaxis, np.newaxis, :]).sum(axis=2)
vertInds = cutEdges[verts]
#profiler()
nv = vertInds.shape[0]
#profiler()
faces[ptr:ptr+nv] = vertInds #.reshape((nv, 3))
#profiler()
ptr += nv
return vertexes, faces
def invertQTransform(tr):
"""Return a QTransform that is the inverse of *tr*.
Rasises an exception if tr is not invertible.
Note that this function is preferred over QTransform.inverted() due to
bugs in that method. (specifically, Qt has floating-point precision issues
when determining whether a matrix is invertible)
"""
try:
import numpy.linalg
arr = np.array([[tr.m11(), tr.m12(), tr.m13()], [tr.m21(), tr.m22(), tr.m23()], [tr.m31(), tr.m32(), tr.m33()]])
inv = numpy.linalg.inv(arr)
return QtGui.QTransform(inv[0,0], inv[0,1], inv[0,2], inv[1,0], inv[1,1], inv[1,2], inv[2,0], inv[2,1])
except ImportError:
inv = tr.inverted()
if inv[1] is False:
raise Exception("Transform is not invertible.")
return inv[0]
def pseudoScatter(data, spacing=None, shuffle=True, bidir=False):
"""
Used for examining the distribution of values in a set. Produces scattering as in beeswarm or column scatter plots.
Given a list of x-values, construct a set of y-values such that an x,y scatter-plot
will not have overlapping points (it will look similar to a histogram).
"""
inds = np.arange(len(data))
if shuffle:
np.random.shuffle(inds)
data = data[inds]
if spacing is None:
spacing = 2.*np.std(data)/len(data)**0.5
s2 = spacing**2
yvals = np.empty(len(data))
if len(data) == 0:
return yvals
yvals[0] = 0
for i in range(1,len(data)):
x = data[i] # current x value to be placed
x0 = data[:i] # all x values already placed
y0 = yvals[:i] # all y values already placed
y = 0
dx = (x0-x)**2 # x-distance to each previous point
xmask = dx < s2 # exclude anything too far away
if xmask.sum() > 0:
if bidir:
dirs = [-1, 1]
else:
dirs = [1]
yopts = []
for direction in dirs:
y = 0
dx2 = dx[xmask]
dy = (s2 - dx2)**0.5
limits = np.empty((2,len(dy))) # ranges of y-values to exclude
limits[0] = y0[xmask] - dy
limits[1] = y0[xmask] + dy
while True:
# ignore anything below this y-value
if direction > 0:
mask = limits[1] >= y
else:
mask = limits[0] <= y
limits2 = limits[:,mask]
# are we inside an excluded region?
mask = (limits2[0] < y) & (limits2[1] > y)
if mask.sum() == 0:
break
if direction > 0:
y = limits2[:,mask].max()
else:
y = limits2[:,mask].min()
yopts.append(y)
if bidir:
y = yopts[0] if -yopts[0] < yopts[1] else yopts[1]
else:
y = yopts[0]
yvals[i] = y
return yvals[np.argsort(inds)] ## un-shuffle values before returning
def toposort(deps, nodes=None, seen=None, stack=None, depth=0):
"""Topological sort. Arguments are:
deps dictionary describing dependencies where a:[b,c] means "a depends on b and c"
nodes optional, specifies list of starting nodes (these should be the nodes
which are not depended on by any other nodes). Other candidate starting
nodes will be ignored.
Example::
# Sort the following graph:
#
# B ──┬─────> C <── D
# │ │
# E <─┴─> A <─┘
#
deps = {'a': ['b', 'c'], 'c': ['b', 'd'], 'e': ['b']}
toposort(deps)
=> ['b', 'd', 'c', 'a', 'e']
"""
# fill in empty dep lists
deps = deps.copy()
for k,v in list(deps.items()):
for k in v:
if k not in deps:
deps[k] = []
if nodes is None:
## run through deps to find nodes that are not depended upon
rem = set()
for dep in deps.values():
rem |= set(dep)
nodes = set(deps.keys()) - rem
if seen is None:
seen = set()
stack = []
sorted = []
for n in nodes:
if n in stack:
raise Exception("Cyclic dependency detected", stack + [n])
if n in seen:
continue
seen.add(n)
sorted.extend( toposort(deps, deps[n], seen, stack+[n], depth=depth+1))
sorted.append(n)
return sorted
def disconnect(signal, slot):
"""Disconnect a Qt signal from a slot.
This method augments Qt's Signal.disconnect():
* Return bool indicating whether disconnection was successful, rather than
raising an exception
* Attempt to disconnect prior versions of the slot when using pg.reload
"""
while True:
try:
signal.disconnect(slot)
return True
except TypeError, RuntimeError:
slot = getPreviousVersion(slot)
if slot is None:
return False
class SignalBlock(object):
"""Class used to temporarily block a Qt signal connection::
with SignalBlock(signal, slot):
# do something that emits a signal; it will
# not be delivered to slot
"""
def __init__(self, signal, slot):
self.signal = signal
self.slot = slot
def __enter__(self):
disconnect(self.signal, self.slot)
return self
def __exit__(self, *args):
self.signal.connect(self.slot)
|
tropp/acq4
|
acq4/pyqtgraph/functions.py
|
Python
|
mit
| 87,061
|
[
"Gaussian"
] |
007c189a9fd3584ceb3e4e5ce2065f29955bf6a00233a6a2fd2b57e8ecad2881
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005-2006 Joe Wreschnig
# Copyright (C) 2006-2007 Lukas Lalinsky
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
import struct
from mutagen_culrc._util import MutagenError
class error(IOError, MutagenError):
"""Error raised by :mod:`mutagen_culrc.asf`"""
class ASFError(error):
pass
class ASFHeaderError(error):
pass
def guid2bytes(s):
"""Converts a GUID to the serialized bytes representation"""
assert isinstance(s, str)
assert len(s) == 36
p = struct.pack
return b"".join([
p("<IHH", int(s[:8], 16), int(s[9:13], 16), int(s[14:18], 16)),
p(">H", int(s[19:23], 16)),
p(">Q", int(s[24:], 16))[2:],
])
def bytes2guid(s):
"""Converts a serialized GUID to a text GUID"""
assert isinstance(s, bytes)
u = struct.unpack
v = []
v.extend(u("<IHH", s[:8]))
v.extend(u(">HQ", s[8:10] + b"\x00\x00" + s[10:]))
return "%08X-%04X-%04X-%04X-%012X" % tuple(v)
# Names from http://windows.microsoft.com/en-za/windows7/c00d10d1-[0-9A-F]{1,4}
CODECS = {
0x0000: u"Unknown Wave Format",
0x0001: u"Microsoft PCM Format",
0x0002: u"Microsoft ADPCM Format",
0x0003: u"IEEE Float",
0x0004: u"Compaq Computer VSELP",
0x0005: u"IBM CVSD",
0x0006: u"Microsoft CCITT A-Law",
0x0007: u"Microsoft CCITT u-Law",
0x0008: u"Microsoft DTS",
0x0009: u"Microsoft DRM",
0x000A: u"Windows Media Audio 9 Voice",
0x000B: u"Windows Media Audio 10 Voice",
0x000C: u"OGG Vorbis",
0x000D: u"FLAC",
0x000E: u"MOT AMR",
0x000F: u"Nice Systems IMBE",
0x0010: u"OKI ADPCM",
0x0011: u"Intel IMA ADPCM",
0x0012: u"Videologic MediaSpace ADPCM",
0x0013: u"Sierra Semiconductor ADPCM",
0x0014: u"Antex Electronics G.723 ADPCM",
0x0015: u"DSP Solutions DIGISTD",
0x0016: u"DSP Solutions DIGIFIX",
0x0017: u"Dialogic OKI ADPCM",
0x0018: u"MediaVision ADPCM",
0x0019: u"Hewlett-Packard CU codec",
0x001A: u"Hewlett-Packard Dynamic Voice",
0x0020: u"Yamaha ADPCM",
0x0021: u"Speech Compression SONARC",
0x0022: u"DSP Group True Speech",
0x0023: u"Echo Speech EchoSC1",
0x0024: u"Ahead Inc. Audiofile AF36",
0x0025: u"Audio Processing Technology APTX",
0x0026: u"Ahead Inc. AudioFile AF10",
0x0027: u"Aculab Prosody 1612",
0x0028: u"Merging Technologies S.A. LRC",
0x0030: u"Dolby Labs AC2",
0x0031: u"Microsoft GSM 6.10",
0x0032: u"Microsoft MSNAudio",
0x0033: u"Antex Electronics ADPCME",
0x0034: u"Control Resources VQLPC",
0x0035: u"DSP Solutions Digireal",
0x0036: u"DSP Solutions DigiADPCM",
0x0037: u"Control Resources CR10",
0x0038: u"Natural MicroSystems VBXADPCM",
0x0039: u"Crystal Semiconductor IMA ADPCM",
0x003A: u"Echo Speech EchoSC3",
0x003B: u"Rockwell ADPCM",
0x003C: u"Rockwell DigiTalk",
0x003D: u"Xebec Multimedia Solutions",
0x0040: u"Antex Electronics G.721 ADPCM",
0x0041: u"Antex Electronics G.728 CELP",
0x0042: u"Intel G.723",
0x0043: u"Intel G.723.1",
0x0044: u"Intel G.729 Audio",
0x0045: u"Sharp G.726 Audio",
0x0050: u"Microsoft MPEG-1",
0x0052: u"InSoft RT24",
0x0053: u"InSoft PAC",
0x0055: u"MP3 - MPEG Layer III",
0x0059: u"Lucent G.723",
0x0060: u"Cirrus Logic",
0x0061: u"ESS Technology ESPCM",
0x0062: u"Voxware File-Mode",
0x0063: u"Canopus Atrac",
0x0064: u"APICOM G.726 ADPCM",
0x0065: u"APICOM G.722 ADPCM",
0x0066: u"Microsoft DSAT",
0x0067: u"Microsoft DSAT Display",
0x0069: u"Voxware Byte Aligned",
0x0070: u"Voxware AC8",
0x0071: u"Voxware AC10",
0x0072: u"Voxware AC16",
0x0073: u"Voxware AC20",
0x0074: u"Voxware RT24 MetaVoice",
0x0075: u"Voxware RT29 MetaSound",
0x0076: u"Voxware RT29HW",
0x0077: u"Voxware VR12",
0x0078: u"Voxware VR18",
0x0079: u"Voxware TQ40",
0x007A: u"Voxware SC3",
0x007B: u"Voxware SC3",
0x0080: u"Softsound",
0x0081: u"Voxware TQ60",
0x0082: u"Microsoft MSRT24",
0x0083: u"AT&T Labs G.729A",
0x0084: u"Motion Pixels MVI MV12",
0x0085: u"DataFusion Systems G.726",
0x0086: u"DataFusion Systems GSM610",
0x0088: u"Iterated Systems ISIAudio",
0x0089: u"Onlive",
0x008A: u"Multitude FT SX20",
0x008B: u"Infocom ITS ACM G.721",
0x008C: u"Convedia G.729",
0x008D: u"Congruency Audio",
0x0091: u"Siemens Business Communications SBC24",
0x0092: u"Sonic Foundry Dolby AC3 SPDIF",
0x0093: u"MediaSonic G.723",
0x0094: u"Aculab Prosody 8KBPS",
0x0097: u"ZyXEL ADPCM",
0x0098: u"Philips LPCBB",
0x0099: u"Studer Professional Audio AG Packed",
0x00A0: u"Malden Electronics PHONYTALK",
0x00A1: u"Racal Recorder GSM",
0x00A2: u"Racal Recorder G720.a",
0x00A3: u"Racal Recorder G723.1",
0x00A4: u"Racal Recorder Tetra ACELP",
0x00B0: u"NEC AAC",
0x00FF: u"CoreAAC Audio",
0x0100: u"Rhetorex ADPCM",
0x0101: u"BeCubed Software IRAT",
0x0111: u"Vivo G.723",
0x0112: u"Vivo Siren",
0x0120: u"Philips CELP",
0x0121: u"Philips Grundig",
0x0123: u"Digital G.723",
0x0125: u"Sanyo ADPCM",
0x0130: u"Sipro Lab Telecom ACELP.net",
0x0131: u"Sipro Lab Telecom ACELP.4800",
0x0132: u"Sipro Lab Telecom ACELP.8V3",
0x0133: u"Sipro Lab Telecom ACELP.G.729",
0x0134: u"Sipro Lab Telecom ACELP.G.729A",
0x0135: u"Sipro Lab Telecom ACELP.KELVIN",
0x0136: u"VoiceAge AMR",
0x0140: u"Dictaphone G.726 ADPCM",
0x0141: u"Dictaphone CELP68",
0x0142: u"Dictaphone CELP54",
0x0150: u"Qualcomm PUREVOICE",
0x0151: u"Qualcomm HALFRATE",
0x0155: u"Ring Zero Systems TUBGSM",
0x0160: u"Windows Media Audio Standard",
0x0161: u"Windows Media Audio 9 Standard",
0x0162: u"Windows Media Audio 9 Professional",
0x0163: u"Windows Media Audio 9 Lossless",
0x0164: u"Windows Media Audio Pro over SPDIF",
0x0170: u"Unisys NAP ADPCM",
0x0171: u"Unisys NAP ULAW",
0x0172: u"Unisys NAP ALAW",
0x0173: u"Unisys NAP 16K",
0x0174: u"Sycom ACM SYC008",
0x0175: u"Sycom ACM SYC701 G725",
0x0176: u"Sycom ACM SYC701 CELP54",
0x0177: u"Sycom ACM SYC701 CELP68",
0x0178: u"Knowledge Adventure ADPCM",
0x0180: u"Fraunhofer IIS MPEG-2 AAC",
0x0190: u"Digital Theater Systems DTS",
0x0200: u"Creative Labs ADPCM",
0x0202: u"Creative Labs FastSpeech8",
0x0203: u"Creative Labs FastSpeech10",
0x0210: u"UHER informatic GmbH ADPCM",
0x0215: u"Ulead DV Audio",
0x0216: u"Ulead DV Audio",
0x0220: u"Quarterdeck",
0x0230: u"I-link Worldwide ILINK VC",
0x0240: u"Aureal Semiconductor RAW SPORT",
0x0249: u"Generic Passthru",
0x0250: u"Interactive Products HSX",
0x0251: u"Interactive Products RPELP",
0x0260: u"Consistent Software CS2",
0x0270: u"Sony SCX",
0x0271: u"Sony SCY",
0x0272: u"Sony ATRAC3",
0x0273: u"Sony SPC",
0x0280: u"Telum Audio",
0x0281: u"Telum IA Audio",
0x0285: u"Norcom Voice Systems ADPCM",
0x0300: u"Fujitsu TOWNS SND",
0x0350: u"Micronas SC4 Speech",
0x0351: u"Micronas CELP833",
0x0400: u"Brooktree BTV Digital",
0x0401: u"Intel Music Coder",
0x0402: u"Intel Audio",
0x0450: u"QDesign Music",
0x0500: u"On2 AVC0 Audio",
0x0501: u"On2 AVC1 Audio",
0x0680: u"AT&T Labs VME VMPCM",
0x0681: u"AT&T Labs TPC",
0x08AE: u"ClearJump Lightwave Lossless",
0x1000: u"Olivetti GSM",
0x1001: u"Olivetti ADPCM",
0x1002: u"Olivetti CELP",
0x1003: u"Olivetti SBC",
0x1004: u"Olivetti OPR",
0x1100: u"Lernout & Hauspie",
0x1101: u"Lernout & Hauspie CELP",
0x1102: u"Lernout & Hauspie SBC8",
0x1103: u"Lernout & Hauspie SBC12",
0x1104: u"Lernout & Hauspie SBC16",
0x1400: u"Norris Communication",
0x1401: u"ISIAudio",
0x1500: u"AT&T Labs Soundspace Music Compression",
0x1600: u"Microsoft MPEG ADTS AAC",
0x1601: u"Microsoft MPEG RAW AAC",
0x1608: u"Nokia MPEG ADTS AAC",
0x1609: u"Nokia MPEG RAW AAC",
0x181C: u"VoxWare MetaVoice RT24",
0x1971: u"Sonic Foundry Lossless",
0x1979: u"Innings Telecom ADPCM",
0x1FC4: u"NTCSoft ALF2CD ACM",
0x2000: u"Dolby AC3",
0x2001: u"DTS",
0x4143: u"Divio AAC",
0x4201: u"Nokia Adaptive Multi-Rate",
0x4243: u"Divio G.726",
0x4261: u"ITU-T H.261",
0x4263: u"ITU-T H.263",
0x4264: u"ITU-T H.264",
0x674F: u"Ogg Vorbis Mode 1",
0x6750: u"Ogg Vorbis Mode 2",
0x6751: u"Ogg Vorbis Mode 3",
0x676F: u"Ogg Vorbis Mode 1+",
0x6770: u"Ogg Vorbis Mode 2+",
0x6771: u"Ogg Vorbis Mode 3+",
0x7000: u"3COM NBX Audio",
0x706D: u"FAAD AAC Audio",
0x77A1: u"True Audio Lossless Audio",
0x7A21: u"GSM-AMR CBR 3GPP Audio",
0x7A22: u"GSM-AMR VBR 3GPP Audio",
0xA100: u"Comverse Infosys G723.1",
0xA101: u"Comverse Infosys AVQSBC",
0xA102: u"Comverse Infosys SBC",
0xA103: u"Symbol Technologies G729a",
0xA104: u"VoiceAge AMR WB",
0xA105: u"Ingenient Technologies G.726",
0xA106: u"ISO/MPEG-4 Advanced Audio Coding (AAC)",
0xA107: u"Encore Software Ltd's G.726",
0xA108: u"ZOLL Medical Corporation ASAO",
0xA109: u"Speex Voice",
0xA10A: u"Vianix MASC Speech Compression",
0xA10B: u"Windows Media 9 Spectrum Analyzer Output",
0xA10C: u"Media Foundation Spectrum Analyzer Output",
0xA10D: u"GSM 6.10 (Full-Rate) Speech",
0xA10E: u"GSM 6.20 (Half-Rate) Speech",
0xA10F: u"GSM 6.60 (Enchanced Full-Rate) Speech",
0xA110: u"GSM 6.90 (Adaptive Multi-Rate) Speech",
0xA111: u"GSM Adaptive Multi-Rate WideBand Speech",
0xA112: u"Polycom G.722",
0xA113: u"Polycom G.728",
0xA114: u"Polycom G.729a",
0xA115: u"Polycom Siren",
0xA116: u"Global IP Sound ILBC",
0xA117: u"Radio Time Time Shifted Radio",
0xA118: u"Nice Systems ACA",
0xA119: u"Nice Systems ADPCM",
0xA11A: u"Vocord Group ITU-T G.721",
0xA11B: u"Vocord Group ITU-T G.726",
0xA11C: u"Vocord Group ITU-T G.722.1",
0xA11D: u"Vocord Group ITU-T G.728",
0xA11E: u"Vocord Group ITU-T G.729",
0xA11F: u"Vocord Group ITU-T G.729a",
0xA120: u"Vocord Group ITU-T G.723.1",
0xA121: u"Vocord Group LBC",
0xA122: u"Nice G.728",
0xA123: u"France Telecom G.729 ACM Audio",
0xA124: u"CODIAN Audio",
0xCC12: u"Intel YUV12 Codec",
0xCFCC: u"Digital Processing Systems Perception Motion JPEG",
0xD261: u"DEC H.261",
0xD263: u"DEC H.263",
0xFFFE: u"Extensible Wave Format",
0xFFFF: u"Unregistered",
}
|
ronie/script.cu.lrclyrics
|
resources/lib/mutagen_culrc/asf/_util.py
|
Python
|
gpl-2.0
| 10,741
|
[
"CRYSTAL"
] |
e3ede346dce3b0472abd13a642dfd756a70ac3912a989d1acfd40e0663e96d72
|
#!/usr/bin/env python
# This example shows how to load a 3D image into VTK and then reformat
# that image into a different orientation for viewing. It uses
# vtkImageReslice for reformatting the image, and uses vtkImageActor
# and vtkInteractorStyleImage to display the image. This InteractorStyle
# forces the camera to stay perpendicular to the XY plane.
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Start by loading some data.
reader = vtk.vtkImageReader2()
reader.SetFilePrefix(VTK_DATA_ROOT + "/Data/headsq/quarter")
reader.SetDataExtent(0, 63, 0, 63, 1, 93)
reader.SetDataSpacing(3.2, 3.2, 1.5)
reader.SetDataOrigin(0.0, 0.0, 0.0)
reader.SetDataScalarTypeToUnsignedShort()
reader.UpdateWholeExtent()
# Calculate the center of the volume
reader.Update()
(xMin, xMax, yMin, yMax, zMin, zMax) = reader.GetExecutive().GetWholeExtent(reader.GetOutputInformation(0))
(xSpacing, ySpacing, zSpacing) = reader.GetOutput().GetSpacing()
(x0, y0, z0) = reader.GetOutput().GetOrigin()
center = [x0 + xSpacing * 0.5 * (xMin + xMax),
y0 + ySpacing * 0.5 * (yMin + yMax),
z0 + zSpacing * 0.5 * (zMin + zMax)]
# Matrices for axial, coronal, sagittal, oblique view orientations
axial = vtk.vtkMatrix4x4()
axial.DeepCopy((1, 0, 0, center[0],
0, 1, 0, center[1],
0, 0, 1, center[2],
0, 0, 0, 1))
coronal = vtk.vtkMatrix4x4()
coronal.DeepCopy((1, 0, 0, center[0],
0, 0, 1, center[1],
0,-1, 0, center[2],
0, 0, 0, 1))
sagittal = vtk.vtkMatrix4x4()
sagittal.DeepCopy((0, 0,-1, center[0],
1, 0, 0, center[1],
0,-1, 0, center[2],
0, 0, 0, 1))
oblique = vtk.vtkMatrix4x4()
oblique.DeepCopy((1, 0, 0, center[0],
0, 0.866025, -0.5, center[1],
0, 0.5, 0.866025, center[2],
0, 0, 0, 1))
# Extract a slice in the desired orientation
reslice = vtk.vtkImageReslice()
reslice.SetInputConnection(reader.GetOutputPort())
reslice.SetOutputDimensionality(2)
reslice.SetResliceAxes(sagittal)
reslice.SetInterpolationModeToLinear()
# Create a greyscale lookup table
table = vtk.vtkLookupTable()
table.SetRange(0, 2000) # image intensity range
table.SetValueRange(0.0, 1.0) # from black to white
table.SetSaturationRange(0.0, 0.0) # no color saturation
table.SetRampToLinear()
table.Build()
# Map the image through the lookup table
color = vtk.vtkImageMapToColors()
color.SetLookupTable(table)
color.SetInputConnection(reslice.GetOutputPort())
# Display the image
actor = vtk.vtkImageActor()
actor.GetMapper().SetInputConnection(color.GetOutputPort())
renderer = vtk.vtkRenderer()
renderer.AddActor(actor)
window = vtk.vtkRenderWindow()
window.AddRenderer(renderer)
# Set up the interaction
interactorStyle = vtk.vtkInteractorStyleImage()
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetInteractorStyle(interactorStyle)
window.SetInteractor(interactor)
window.Render()
# Create callbacks for slicing the image
actions = {}
actions["Slicing"] = 0
def ButtonCallback(obj, event):
if event == "LeftButtonPressEvent":
actions["Slicing"] = 1
else:
actions["Slicing"] = 0
def MouseMoveCallback(obj, event):
(lastX, lastY) = interactor.GetLastEventPosition()
(mouseX, mouseY) = interactor.GetEventPosition()
if actions["Slicing"] == 1:
deltaY = mouseY - lastY
reslice.Update()
sliceSpacing = reslice.GetOutput().GetSpacing()[2]
matrix = reslice.GetResliceAxes()
# move the center point that we are slicing through
center = matrix.MultiplyPoint((0, 0, sliceSpacing*deltaY, 1))
matrix.SetElement(0, 3, center[0])
matrix.SetElement(1, 3, center[1])
matrix.SetElement(2, 3, center[2])
window.Render()
else:
interactorStyle.OnMouseMove()
interactorStyle.AddObserver("MouseMoveEvent", MouseMoveCallback)
interactorStyle.AddObserver("LeftButtonPressEvent", ButtonCallback)
interactorStyle.AddObserver("LeftButtonReleaseEvent", ButtonCallback)
# Start interaction
interactor.Start()
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Examples/ImageProcessing/Python/ImageSlicing.py
|
Python
|
gpl-3.0
| 4,173
|
[
"VTK"
] |
aff3c023596fa0b9e391886bccd80420d39a95681a62a04efb97fa7174836bca
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.