text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""test/conftest.py: dynamic testing configuration for pytest
See the pytest documentation for more details:
https://docs.pytest.org/en/latest/contents.html
"""
import logging
import sys
from test.test_data import (
all_modules,
all_parsers,
module_names,
parser_names,
)
version_major = sys.version_info.major
# Paths that should be ignored for all Python versions.
paths_ignore_allver = [
'cclib/progress/qt4progress.py',
]
# Paths that should run only for Python 2.7.
paths_ignore_only_2_7 = [
'cclib/bridge/cclib2pyquante.py',
]
def match_path(path, partial_paths):
"""Does the given path contain any of the stubs in partial_paths?"""
return any(partial_path in str(path)
for partial_path in partial_paths)
def pytest_ignore_collect(path, config):
"""pytest automatically runs this on every discovered path. If this
returns True for a given path, pytest will ignore it.
"""
if match_path(path, paths_ignore_allver):
return True
if version_major != 2:
if match_path(path, paths_ignore_only_2_7):
return True
return False
def pytest_addoption(parser):
parser.addoption("--terse", action="store_true")
parser.addoption("--silent", action="store_true")
def pytest_generate_tests(metafunc):
if metafunc.function.__name__ == "test_all":
metafunc.parametrize("parsers", [{p: all_parsers[p] for p in parser_names}])
metafunc.parametrize("modules", [{p: all_modules[p] for p in module_names}])
metafunc.parametrize("terse", [metafunc.config.getoption("--terse")])
metafunc.parametrize("silent", [metafunc.config.getoption("--silent")])
metafunc.parametrize("loglevel",
[logging.DEBUG if metafunc.config.getoption("--debug")
else logging.ERROR])
metafunc.parametrize("summary", [True])
metafunc.parametrize("visual_tests", [True])
|
ATenderholt/cclib
|
test/conftest.py
|
Python
|
bsd-3-clause
| 1,965
|
[
"cclib"
] |
d4ff438c42078c9925a80317b3b4d66cd6ee51c54b9faaf97ed2ed43bffc8b1b
|
#!/usr/bin/env python
# encoding: utf-8
"""
fits2itk.py
Convert a fits file to NRRD for use in Slicer3D
Assumes that the order of axes in the FITS file
is RA, Dec, Velocity.
Example Use
-----------
import fits2itk
# convert a FITS file using the default parameters
infile = "ngc1333_co.fits"
outfile = "ngc1333_co.nrrd"
fits2itk.convert(infile,outfile)
# convert a FITS file using parameters defined
# in an external file
fits2itk.convert(infile,outfile,vel_scale=1,use_conv="ngc1333_conv")
You can use the included strip_fourth_header.py to remove
any polarization axis present in your data.
Can be run from the command line as
python fits2itk.py -i ngc1333_co.fits -o ngc1333_co.nrrd
with the following options
-i : Infile -- Input (FITS) file (req)
-o : Outfile -- Output (NRRD) file (req)
-d : Datascale -- Value by which to scale intensity (opt)
-v : Velscale -- Relative scale for velocity axis (often < 1) (opt)
-u : Use Conv -- Use the specified fixed/external conversion (opt)
-s : Strip Pol -- Strip out the fourth polarization header
Does not alter original FITS file
-h : Help -- Display this help
"""
import nrrd
from astropy.io import fits
import numpy as np
import importlib
import sys,os,getopt
import strip_fourth_fits_header
def convert(infile,outfile,data_scale=1.,vel_scale=False,use_conv=False):
"""
Parameters
----------
data_scale: Constant value to rescale the data, optional
A value by which to scale the intensity of the cube,
for instance to put it in useful units.
vel_scale: Relative scale for the velocity axis, optional
By default, the velocity axis has the same scale as the
spatial axes. If set to "auto" then the velocity axis
is rescaled/regridded to have the same length as the shortest
spatial axis. Can also be used to set the scaling manually.
If your velocity axis is 10 times longer than your spatial
axes, then the auto default will use vel_scale=0.1 to
match the axes. Setting vel_scale=1 preserves the
relative scales.
use_conv: EXPERIMENTAL! Use a fixed convention for conversion.
Use values stored in an external file for the conversion of
pixels to millimeters. This allows one to convert multiple
different cubes/images and overlay them in Slicer3D
without needing to regrid/interpolate them ahead of time.
Currently EXPERIMENTAL and assumes RA/Dec/Vel
Vel can be in km/s or m/s. Use vel_scale to
manually specify (i.e. use vel_scale = 1000. for
the cubes in km/s and vel_scale = 1. for the cubes
in m/s.). Always specify vel_scale when using this
option.
"""
d,h = fits.getdata(infile,header=True)
if data_scale:
d *= data_scale
if not vel_scale: #Determine scale automatically
vel_scale = 1.
elif vel_scale == 'auto':
min_spatial = np.min([h['NAXIS1'],h['NAXIS2']])
vel_length = h['NAXIS3']
vel_scale = min_spatial/vel_length
dra = 1.
dvel = 1.
ddec = 1.
racenter = h['NAXIS1']/2.
deccenter = -1*h['NAXIS2']/2.
velcenter = -1*h['NAXIS3']/2.
if vel_scale != 1 and not use_conv:
dvel = dvel/vel_scale
#Assume FITS order is RA,Dec,Velocity
#Numpy order is Velocity, Dec, RA
#Slicer wants RA, Velocity, Dec
d = np.swapaxes(d,0,1)
d = np.swapaxes(d,0,2)
#options = {'encoding':'raw'}
#Want the _center_ of the cube at 0
spaceorigin = -1*np.array(d.shape)/2.
if use_conv:
# This line imports the dictionary defined in your convention
# file. The example included is called "ngc1333_conv"
i = importlib.import_module(use_conv)
dra = h['CDELT1']*i.c_dict['ra-mm']
ddec = h['CDELT2']*i.c_dict['dec-mm']
dvel = h['CDELT3']*i.c_dict['vel-mm']*vel_scale #Requires m/s
ra0 = i.c_dict['ra0']
dec0 = i.c_dict['dec0']
vel0 = i.c_dict['vel0']/vel_scale
racenter = ((ra0-h['CRVAL1'])*np.cos(i.c_dict['dec0']*
np.pi/180.))/h['CDELT1']+h['CRPIX1']
deccenter = -1*((dec0-h['CRVAL2'])/h['CDELT2']+h['CRPIX2'])
velcenter = -1*((vel0-h['CRVAL3'])/(h['CDELT3'])+h['CRPIX3'])
options = {}
options['space'] = 'left-posterior-superior'
options['space directions'] = [(-1*dra,0,0),(0,dvel,0),(0,0,ddec)]
options['kinds'] = ['domain','domain','domain']
spaceorigin[0] = racenter*dra
spaceorigin[1] = velcenter*dvel
spaceorigin[2] = deccenter*ddec
options['space origin'] = spaceorigin
#This could be an option. 'raw' allows import in paraview.
#'gzip' files can be a lot smaller, depending on the cube.
options['encoding'] = 'raw'
print(options)
nrrd.write(outfile,d,options=options)
def read(inputfile):
data,options = nrdd.read(inputfile)
return(data,options)
def main():
"""
-i : Infile -- Input (FITS) file
-o : Outfile -- Output (NRRD) file
-d : Datascale -- Value by which to scale intensity
-v : Velscale -- Relative scale for velocity axis (often < 1)
-u : Use Conv -- Use the specified fixed/external conversion
-s : Strip Pol -- Strip out the fourth polarization header
Does not alter original FITS file
-h : Help -- Display this help
"""
infile, outfile = False, False
strip_pol = False
kwargs = {}
kwargs["vel_scale"] = "auto"
try:
opts,args = getopt.getopt(sys.argv[1:],"i:o:d:v:u:sh")
except getopt.GetoptError,err:
print(str(err))
print(__doc__)
sys.exit(2)
for o,a in opts:
if o == "-i":
infile = a
elif o == "-o":
outfile = a
elif o == "-d":
kwargs["data_scale"] = float(a)
elif o == "-v":
kwargs["vel_scale"] = float(a)
elif o == "-u":
kwargs["use_conv"] = a
elif o == "-s":
strip_pol = True
elif o == "-h":
print(__doc__)
sys.exit(1)
else:
assert False, "unhandled option"
print(__doc__)
sys.exit(2)
if not infile or not outfile:
assert False, "Input or Output file not specified"
print(__doc__)
sys.exit(2)
print(kwargs)
if strip_pol:
tempfile = "temp-strip-pol.fits"
strip_fourth_fits_header.strip(infile,tempfile,clobber=True)
convert(tempfile,outfile,**kwargs)
os.remove(tempfile)
else:
convert(infile,outfile,**kwargs)
if __name__ == '__main__':
main()
|
jfoster17/pyfits2itk
|
fits2itk.py
|
Python
|
mit
| 6,788
|
[
"ParaView"
] |
3c3ccfbc3f7d66c8dd30a424492b4fed0324d896139ca41a44d8318a39aa481a
|
"""
This is only meant to add docs to objects defined in C-extension modules.
The purpose is to allow easier editing of the docstrings without
requiring a re-compile.
NOTE: Many of the methods of ndarray have corresponding functions.
If you update these docstrings, please keep also the ones in
core/fromnumeric.py, core/defmatrix.py up-to-date.
"""
from __future__ import division, absolute_import, print_function
from numpy.lib import add_newdoc
###############################################################################
#
# flatiter
#
# flatiter needs a toplevel description
#
###############################################################################
add_newdoc('numpy.core', 'flatiter',
"""
Flat iterator object to iterate over arrays.
A `flatiter` iterator is returned by ``x.flat`` for any array `x`.
It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in row-major, C-style order (the last
index varying the fastest). The iterator can also be indexed using
basic slicing or advanced indexing.
See Also
--------
ndarray.flat : Return a flat iterator over an array.
ndarray.flatten : Returns a flattened copy of an array.
Notes
-----
A `flatiter` iterator can not be constructed directly from Python code
by calling the `flatiter` constructor.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> type(fl)
<type 'numpy.flatiter'>
>>> for item in fl:
... print(item)
...
0
1
2
3
4
5
>>> fl[2:4]
array([2, 3])
""")
# flatiter attributes
add_newdoc('numpy.core', 'flatiter', ('base',
"""
A reference to the array that is iterated over.
Examples
--------
>>> x = np.arange(5)
>>> fl = x.flat
>>> fl.base is x
True
"""))
add_newdoc('numpy.core', 'flatiter', ('coords',
"""
An N-dimensional tuple of current coordinates.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.coords
(0, 0)
>>> fl.next()
0
>>> fl.coords
(0, 1)
"""))
add_newdoc('numpy.core', 'flatiter', ('index',
"""
Current flat index into the array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.index
0
>>> fl.next()
0
>>> fl.index
1
"""))
# flatiter functions
add_newdoc('numpy.core', 'flatiter', ('__array__',
"""__array__(type=None) Get array from iterator
"""))
add_newdoc('numpy.core', 'flatiter', ('copy',
"""
copy()
Get a copy of the iterator as a 1-D array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> fl = x.flat
>>> fl.copy()
array([0, 1, 2, 3, 4, 5])
"""))
###############################################################################
#
# nditer
#
###############################################################################
add_newdoc('numpy.core', 'nditer',
"""
Efficient multi-dimensional iterator object to iterate over arrays.
To get started using this object, see the
:ref:`introductory guide to array iteration <arrays.nditer>`.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
flags : sequence of str, optional
Flags to control the behavior of the iterator.
* "buffered" enables buffering when required.
* "c_index" causes a C-order index to be tracked.
* "f_index" causes a Fortran-order index to be tracked.
* "multi_index" causes a multi-index, or a tuple of indices
with one per iteration dimension, to be tracked.
* "common_dtype" causes all the operands to be converted to
a common data type, with copying or buffering as necessary.
* "copy_if_overlap" causes the iterator to determine if read
operands have overlap with write operands, and make temporary
copies as necessary to avoid overlap. False positives (needless
copying) are possible in some cases.
* "delay_bufalloc" delays allocation of the buffers until
a reset() call is made. Allows "allocate" operands to
be initialized before their values are copied into the buffers.
* "external_loop" causes the `values` given to be
one-dimensional arrays with multiple values instead of
zero-dimensional arrays.
* "grow_inner" allows the `value` array sizes to be made
larger than the buffer size when both "buffered" and
"external_loop" is used.
* "ranged" allows the iterator to be restricted to a sub-range
of the iterindex values.
* "refs_ok" enables iteration of reference types, such as
object arrays.
* "reduce_ok" enables iteration of "readwrite" operands
which are broadcasted, also known as reduction operands.
* "zerosize_ok" allows `itersize` to be zero.
op_flags : list of list of str, optional
This is a list of flags for each operand. At minimum, one of
"readonly", "readwrite", or "writeonly" must be specified.
* "readonly" indicates the operand will only be read from.
* "readwrite" indicates the operand will be read from and written to.
* "writeonly" indicates the operand will only be written to.
* "no_broadcast" prevents the operand from being broadcasted.
* "contig" forces the operand data to be contiguous.
* "aligned" forces the operand data to be aligned.
* "nbo" forces the operand data to be in native byte order.
* "copy" allows a temporary read-only copy if required.
* "updateifcopy" allows a temporary read-write copy if required.
* "allocate" causes the array to be allocated if it is None
in the `op` parameter.
* "no_subtype" prevents an "allocate" operand from using a subtype.
* "arraymask" indicates that this operand is the mask to use
for selecting elements when writing to operands with the
'writemasked' flag set. The iterator does not enforce this,
but when writing from a buffer back to the array, it only
copies those elements indicated by this mask.
* 'writemasked' indicates that only elements where the chosen
'arraymask' operand is True will be written to.
* "overlap_assume_elementwise" can be used to mark operands that are
accessed only in the iterator order, to allow less conservative
copying when "copy_if_overlap" is present.
op_dtypes : dtype or tuple of dtype(s), optional
The required data type(s) of the operands. If copying or buffering
is enabled, the data will be converted to/from their original types.
order : {'C', 'F', 'A', 'K'}, optional
Controls the iteration order. 'C' means C order, 'F' means
Fortran order, 'A' means 'F' order if all the arrays are Fortran
contiguous, 'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible. This also
affects the element memory order of "allocate" operands, as they
are allocated to be compatible with iteration order.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when making a copy
or buffering. Setting this to 'unsafe' is not recommended,
as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
op_axes : list of list of ints, optional
If provided, is a list of ints or None for each operands.
The list of axes for an operand is a mapping from the dimensions
of the iterator to the dimensions of the operand. A value of
-1 can be placed for entries, causing that dimension to be
treated as "newaxis".
itershape : tuple of ints, optional
The desired shape of the iterator. This allows "allocate" operands
with a dimension mapped by op_axes not corresponding to a dimension
of a different operand to get a value not equal to 1 for that
dimension.
buffersize : int, optional
When buffering is enabled, controls the size of the temporary
buffers. Set to 0 for the default value.
Attributes
----------
dtypes : tuple of dtype(s)
The data types of the values provided in `value`. This may be
different from the operand data types if buffering is enabled.
finished : bool
Whether the iteration over the operands is finished or not.
has_delayed_bufalloc : bool
If True, the iterator was created with the "delay_bufalloc" flag,
and no reset() function was called on it yet.
has_index : bool
If True, the iterator was created with either the "c_index" or
the "f_index" flag, and the property `index` can be used to
retrieve it.
has_multi_index : bool
If True, the iterator was created with the "multi_index" flag,
and the property `multi_index` can be used to retrieve it.
index
When the "c_index" or "f_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
and `has_index` is False.
iterationneedsapi : bool
Whether iteration requires access to the Python API, for example
if one of the operands is an object array.
iterindex : int
An index which matches the order of iteration.
itersize : int
Size of the iterator.
itviews
Structured view(s) of `operands` in memory, matching the reordered
and optimized iterator access pattern.
multi_index
When the "multi_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
accessed and `has_multi_index` is False.
ndim : int
The iterator's dimension.
nop : int
The number of iterator operands.
operands : tuple of operand(s)
The array(s) to be iterated over.
shape : tuple of ints
Shape tuple, the shape of the iterator.
value
Value of `operands` at current iteration. Normally, this is a
tuple of array scalars, but if the flag "external_loop" is used,
it is a tuple of one dimensional arrays.
Notes
-----
`nditer` supersedes `flatiter`. The iterator implementation behind
`nditer` is also exposed by the NumPy C API.
The Python exposure supplies two iteration interfaces, one which follows
the Python iterator protocol, and another which mirrors the C-style
do-while pattern. The native Python approach is better in most cases, but
if you need the iterator's coordinates or index, use the C-style pattern.
Examples
--------
Here is how we might write an ``iter_add`` function, using the
Python iterator protocol::
def iter_add_py(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
for (a, b, c) in it:
addop(a, b, out=c)
return it.operands[2]
Here is the same function, but following the C-style pattern::
def iter_add(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
while not it.finished:
addop(it[0], it[1], out=it[2])
it.iternext()
return it.operands[2]
Here is an example outer product function::
def outer_it(x, y, out=None):
mulop = np.multiply
it = np.nditer([x, y, out], ['external_loop'],
[['readonly'], ['readonly'], ['writeonly', 'allocate']],
op_axes=[range(x.ndim)+[-1]*y.ndim,
[-1]*x.ndim+range(y.ndim),
None])
for (a, b, c) in it:
mulop(a, b, out=c)
return it.operands[2]
>>> a = np.arange(2)+1
>>> b = np.arange(3)+1
>>> outer_it(a,b)
array([[1, 2, 3],
[2, 4, 6]])
Here is an example function which operates like a "lambda" ufunc::
def luf(lamdaexpr, *args, **kwargs):
"luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)"
nargs = len(args)
op = (kwargs.get('out',None),) + args
it = np.nditer(op, ['buffered','external_loop'],
[['writeonly','allocate','no_broadcast']] +
[['readonly','nbo','aligned']]*nargs,
order=kwargs.get('order','K'),
casting=kwargs.get('casting','safe'),
buffersize=kwargs.get('buffersize',0))
while not it.finished:
it[0] = lamdaexpr(*it[1:])
it.iternext()
return it.operands[0]
>>> a = np.arange(5)
>>> b = np.ones(5)
>>> luf(lambda i,j:i*i + j/2, a, b)
array([ 0.5, 1.5, 4.5, 9.5, 16.5])
""")
# nditer methods
add_newdoc('numpy.core', 'nditer', ('copy',
"""
copy()
Get a copy of the iterator in its current state.
Examples
--------
>>> x = np.arange(10)
>>> y = x + 1
>>> it = np.nditer([x, y])
>>> it.next()
(array(0), array(1))
>>> it2 = it.copy()
>>> it2.next()
(array(1), array(2))
"""))
add_newdoc('numpy.core', 'nditer', ('debug_print',
"""
debug_print()
Print the current state of the `nditer` instance and debug info to stdout.
"""))
add_newdoc('numpy.core', 'nditer', ('enable_external_loop',
"""
enable_external_loop()
When the "external_loop" was not used during construction, but
is desired, this modifies the iterator to behave as if the flag
was specified.
"""))
add_newdoc('numpy.core', 'nditer', ('iternext',
"""
iternext()
Check whether iterations are left, and perform a single internal iteration
without returning the result. Used in the C-style pattern do-while
pattern. For an example, see `nditer`.
Returns
-------
iternext : bool
Whether or not there are iterations left.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_axis',
"""
remove_axis(i)
Removes axis `i` from the iterator. Requires that the flag "multi_index"
be enabled.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_multi_index',
"""
remove_multi_index()
When the "multi_index" flag was specified, this removes it, allowing
the internal iteration structure to be optimized further.
"""))
add_newdoc('numpy.core', 'nditer', ('reset',
"""
reset()
Reset the iterator to its initial state.
"""))
###############################################################################
#
# broadcast
#
###############################################################################
add_newdoc('numpy.core', 'broadcast',
"""
Produce an object that mimics broadcasting.
Parameters
----------
in1, in2, ... : array_like
Input parameters.
Returns
-------
b : broadcast object
Broadcast the input parameters against one another, and
return an object that encapsulates the result.
Amongst others, it has ``shape`` and ``nd`` properties, and
may be used as an iterator.
See Also
--------
broadcast_arrays
broadcast_to
Examples
--------
Manually adding two vectors, using broadcasting:
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> out = np.empty(b.shape)
>>> out.flat = [u+v for (u,v) in b]
>>> out
array([[ 5., 6., 7.],
[ 6., 7., 8.],
[ 7., 8., 9.]])
Compare against built-in broadcasting:
>>> x + y
array([[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
""")
# attributes
add_newdoc('numpy.core', 'broadcast', ('index',
"""
current index in broadcasted result
Examples
--------
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> b.next(), b.next(), b.next()
((1, 4), (1, 5), (1, 6))
>>> b.index
3
"""))
add_newdoc('numpy.core', 'broadcast', ('iters',
"""
tuple of iterators along ``self``'s "components."
Returns a tuple of `numpy.flatiter` objects, one for each "component"
of ``self``.
See Also
--------
numpy.flatiter
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> row, col = b.iters
>>> row.next(), col.next()
(1, 4)
"""))
add_newdoc('numpy.core', 'broadcast', ('ndim',
"""
Number of dimensions of broadcasted result. Alias for `nd`.
.. versionadded:: 1.12.0
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.ndim
2
"""))
add_newdoc('numpy.core', 'broadcast', ('nd',
"""
Number of dimensions of broadcasted result. For code intended for NumPy
1.12.0 and later the more consistent `ndim` is preferred.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.nd
2
"""))
add_newdoc('numpy.core', 'broadcast', ('numiter',
"""
Number of iterators possessed by the broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.numiter
2
"""))
add_newdoc('numpy.core', 'broadcast', ('shape',
"""
Shape of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.shape
(3, 3)
"""))
add_newdoc('numpy.core', 'broadcast', ('size',
"""
Total size of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.size
9
"""))
add_newdoc('numpy.core', 'broadcast', ('reset',
"""
reset()
Reset the broadcasted result's iterator(s).
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]]
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> b.next(), b.next(), b.next()
((1, 4), (2, 4), (3, 4))
>>> b.index
3
>>> b.reset()
>>> b.index
0
"""))
###############################################################################
#
# numpy functions
#
###############################################################################
add_newdoc('numpy.core.multiarray', 'array',
"""
array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0)
Create an array.
Parameters
----------
object : array_like
An array, any object exposing the array interface, an object whose
__array__ method returns an array, or any (nested) sequence.
dtype : data-type, optional
The desired data-type for the array. If not given, then the type will
be determined as the minimum type required to hold the objects in the
sequence. This argument can only be used to 'upcast' the array. For
downcasting, use the .astype(t) method.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy will
only be made if __array__ returns a copy, if obj is a nested sequence,
or if a copy is needed to satisfy any of the other requirements
(`dtype`, `order`, etc.).
order : {'K', 'A', 'C', 'F'}, optional
Specify the memory layout of the array. If object is not an array, the
newly created array will be in C order (row major) unless 'F' is
specified, in which case it will be in Fortran order (column major).
If object is an array the following holds.
===== ========= ===================================================
order no copy copy=True
===== ========= ===================================================
'K' unchanged F & C order preserved, otherwise most similar order
'A' unchanged F order if input is F and not C, otherwise C order
'C' C order C order
'F' F order F order
===== ========= ===================================================
When ``copy=False`` and a copy is made for other reasons, the result is
the same as if ``copy=True``, with some exceptions for `A`, see the
Notes section. The default order is 'K'.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting
array should have. Ones will be pre-pended to the shape as
needed to meet this requirement.
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
See Also
--------
empty, empty_like, zeros, zeros_like, ones, ones_like, full, full_like
Notes
-----
When order is 'A' and `object` is an array in neither 'C' nor 'F' order,
and a copy is forced by a change in dtype, then the order of the result is
not necessarily 'C' as expected. This is likely a bug.
Examples
--------
>>> np.array([1, 2, 3])
array([1, 2, 3])
Upcasting:
>>> np.array([1, 2, 3.0])
array([ 1., 2., 3.])
More than one dimension:
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
Minimum dimensions 2:
>>> np.array([1, 2, 3], ndmin=2)
array([[1, 2, 3]])
Type provided:
>>> np.array([1, 2, 3], dtype=complex)
array([ 1.+0.j, 2.+0.j, 3.+0.j])
Data-type consisting of more than one element:
>>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')])
>>> x['a']
array([1, 3])
Creating an array from sub-classes:
>>> np.array(np.mat('1 2; 3 4'))
array([[1, 2],
[3, 4]])
>>> np.array(np.mat('1 2; 3 4'), subok=True)
matrix([[1, 2],
[3, 4]])
""")
add_newdoc('numpy.core.multiarray', 'empty',
"""
empty(shape, dtype=float, order='C')
Return a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty array
dtype : data-type, optional
Desired output data-type.
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data of the given shape, dtype, and
order. Object arrays will be initialized to None.
See Also
--------
empty_like, zeros, ones
Notes
-----
`empty`, unlike `zeros`, does not set the array values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> np.empty([2, 2])
array([[ -9.74499359e+001, 6.69583040e-309],
[ 2.13182611e-314, 3.06959433e-309]]) #random
>>> np.empty([2, 2], dtype=int)
array([[-1073741821, -1067949133],
[ 496041986, 19249760]]) #random
""")
add_newdoc('numpy.core.multiarray', 'empty_like',
"""
empty_like(a, dtype=None, order='K', subok=True)
Return a new array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of the
returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of ``a`` as closely
as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = ([1,2,3], [4,5,6]) # a is array-like
>>> np.empty_like(a)
array([[-1073741821, -1073741821, 3], #random
[ 0, 0, -1073741821]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random
[ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
""")
add_newdoc('numpy.core.multiarray', 'scalar',
"""
scalar(dtype, obj)
Return a new scalar array of the given type initialized with obj.
This function is meant mainly for pickle support. `dtype` must be a
valid data-type descriptor. If `dtype` corresponds to an object
descriptor, then `obj` can be any object, otherwise `obj` must be a
string. If `obj` is not given, it will be interpreted as None for object
type and as zeros for all other types.
""")
add_newdoc('numpy.core.multiarray', 'zeros',
"""
zeros(shape, dtype=float, order='C')
Return a new array of given shape and type, filled with zeros.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and order.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> np.zeros(5)
array([ 0., 0., 0., 0., 0.])
>>> np.zeros((5,), dtype=np.int)
array([0, 0, 0, 0, 0])
>>> np.zeros((2, 1))
array([[ 0.],
[ 0.]])
>>> s = (2,2)
>>> np.zeros(s)
array([[ 0., 0.],
[ 0., 0.]])
>>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype
array([(0, 0), (0, 0)],
dtype=[('x', '<i4'), ('y', '<i4')])
""")
add_newdoc('numpy.core.multiarray', 'set_typeDict',
"""set_typeDict(dict)
Set the internal dictionary that can look up an array type using a
registered code.
""")
add_newdoc('numpy.core.multiarray', 'fromstring',
"""
fromstring(string, dtype=float, count=-1, sep='')
A new 1-D array initialized from raw binary or text data in a string.
Parameters
----------
string : str
A string containing the data.
dtype : data-type, optional
The data type of the array; default: float. For binary input data,
the data must be in exactly this format.
count : int, optional
Read this number of `dtype` elements from the data. If this is
negative (the default), the count will be determined from the
length of the data.
sep : str, optional
If not provided or, equivalently, the empty string, the data will
be interpreted as binary data; otherwise, as ASCII text with
decimal numbers. Also in this latter case, this argument is
interpreted as the string separating numbers in the data; extra
whitespace between elements is also ignored.
Returns
-------
arr : ndarray
The constructed array.
Raises
------
ValueError
If the string is not the correct size to satisfy the requested
`dtype` and `count`.
See Also
--------
frombuffer, fromfile, fromiter
Examples
--------
>>> np.fromstring('\\x01\\x02', dtype=np.uint8)
array([1, 2], dtype=uint8)
>>> np.fromstring('1 2', dtype=int, sep=' ')
array([1, 2])
>>> np.fromstring('1, 2', dtype=int, sep=',')
array([1, 2])
>>> np.fromstring('\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
array([1, 2, 3], dtype=uint8)
""")
add_newdoc('numpy.core.multiarray', 'fromiter',
"""
fromiter(iterable, dtype, count=-1)
Create a new 1-dimensional array from an iterable object.
Parameters
----------
iterable : iterable object
An iterable object providing data for the array.
dtype : data-type
The data-type of the returned array.
count : int, optional
The number of items to read from *iterable*. The default is -1,
which means all data is read.
Returns
-------
out : ndarray
The output array.
Notes
-----
Specify `count` to improve performance. It allows ``fromiter`` to
pre-allocate the output array, instead of resizing it on demand.
Examples
--------
>>> iterable = (x*x for x in range(5))
>>> np.fromiter(iterable, np.float)
array([ 0., 1., 4., 9., 16.])
""")
add_newdoc('numpy.core.multiarray', 'fromfile',
"""
fromfile(file, dtype=float, count=-1, sep='')
Construct an array from data in a text or binary file.
A highly efficient way of reading binary data with a known data-type,
as well as parsing simply formatted text files. Data written using the
`tofile` method can be read using this function.
Parameters
----------
file : file or str
Open file object or filename.
dtype : data-type
Data type of the returned array.
For binary files, it is used to determine the size and byte-order
of the items in the file.
count : int
Number of items to read. ``-1`` means all items (i.e., the complete
file).
sep : str
Separator between items if file is a text file.
Empty ("") separator means the file should be treated as binary.
Spaces (" ") in the separator match zero or more whitespace characters.
A separator consisting only of spaces must match at least one
whitespace.
See also
--------
load, save
ndarray.tofile
loadtxt : More flexible way of loading data from a text file.
Notes
-----
Do not rely on the combination of `tofile` and `fromfile` for
data storage, as the binary files generated are are not platform
independent. In particular, no byte-order or data-type information is
saved. Data can be stored in the platform independent ``.npy`` format
using `save` and `load` instead.
Examples
--------
Construct an ndarray:
>>> dt = np.dtype([('time', [('min', int), ('sec', int)]),
... ('temp', float)])
>>> x = np.zeros((1,), dtype=dt)
>>> x['time']['min'] = 10; x['temp'] = 98.25
>>> x
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
Save the raw data to disk:
>>> import os
>>> fname = os.tmpnam()
>>> x.tofile(fname)
Read the raw data from disk:
>>> np.fromfile(fname, dtype=dt)
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
The recommended way to store and load data:
>>> np.save(fname, x)
>>> np.load(fname + '.npy')
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
""")
add_newdoc('numpy.core.multiarray', 'frombuffer',
"""
frombuffer(buffer, dtype=float, count=-1, offset=0)
Interpret a buffer as a 1-dimensional array.
Parameters
----------
buffer : buffer_like
An object that exposes the buffer interface.
dtype : data-type, optional
Data-type of the returned array; default: float.
count : int, optional
Number of items to read. ``-1`` means all data in the buffer.
offset : int, optional
Start reading the buffer from this offset (in bytes); default: 0.
Notes
-----
If the buffer has data that is not in machine byte-order, this should
be specified as part of the data-type, e.g.::
>>> dt = np.dtype(int)
>>> dt = dt.newbyteorder('>')
>>> np.frombuffer(buf, dtype=dt)
The data of the resulting array will not be byteswapped, but will be
interpreted correctly.
Examples
--------
>>> s = 'hello world'
>>> np.frombuffer(s, dtype='S1', count=5, offset=6)
array(['w', 'o', 'r', 'l', 'd'],
dtype='|S1')
""")
add_newdoc('numpy.core.multiarray', 'concatenate',
"""
concatenate((a1, a2, ...), axis=0)
Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
res : ndarray
The concatenated array.
See Also
--------
ma.concatenate : Concatenate function that preserves input masks.
array_split : Split an array into multiple sub-arrays of equal or
near-equal size.
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into multiple sub-arrays horizontally (column wise)
vsplit : Split array into multiple sub-arrays vertically (row wise)
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
stack : Stack a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise)
vstack : Stack arrays in sequence vertically (row wise)
dstack : Stack arrays in sequence depth wise (along third dimension)
Notes
-----
When one or more of the arrays to be concatenated is a MaskedArray,
this function will return a MaskedArray object instead of an ndarray,
but the input masks are *not* preserved. In cases where a MaskedArray
is expected as input, use the ma.concatenate function from the masked
array module instead.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.concatenate((a, b.T), axis=1)
array([[1, 2, 5],
[3, 4, 6]])
This function will not preserve masking of MaskedArray inputs.
>>> a = np.ma.arange(3)
>>> a[1] = np.ma.masked
>>> b = np.arange(2, 5)
>>> a
masked_array(data = [0 -- 2],
mask = [False True False],
fill_value = 999999)
>>> b
array([2, 3, 4])
>>> np.concatenate([a, b])
masked_array(data = [0 1 2 2 3 4],
mask = False,
fill_value = 999999)
>>> np.ma.concatenate([a, b])
masked_array(data = [0 -- 2 2 3 4],
mask = [False True False False False False],
fill_value = 999999)
""")
add_newdoc('numpy.core', 'inner',
"""
inner(a, b)
Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : array_like
If `a` and `b` are nonscalar, their last dimensions must match.
Returns
-------
out : ndarray
`out.shape = a.shape[:-1] + b.shape[:-1]`
Raises
------
ValueError
If the last dimension of `a` and `b` has different size.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
= sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
2
A multidimensional example:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> np.inner(a, b)
array([[ 14, 38, 62],
[ 86, 110, 134]])
An example where `b` is a scalar:
>>> np.inner(np.eye(2), 7)
array([[ 7., 0.],
[ 0., 7.]])
""")
add_newdoc('numpy.core', 'fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
add_newdoc('numpy.core.multiarray', 'correlate',
"""cross_correlate(a,v, mode=0)""")
add_newdoc('numpy.core.multiarray', 'arange',
"""
arange([start,] stop[, step,], dtype=None)
Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range <http://docs.python.org/lib/built-in-funcs.html>`_ function,
but returns an ndarray rather than a list.
When using a non-integer step, such as 0.1, the results will often not
be consistent. It is better to use ``linspace`` for these cases.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified, `start` must also be given.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
See Also
--------
linspace : Evenly spaced numbers with careful handling of endpoints.
ogrid: Arrays of evenly spaced numbers in N-dimensions.
mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
Examples
--------
>>> np.arange(3)
array([0, 1, 2])
>>> np.arange(3.0)
array([ 0., 1., 2.])
>>> np.arange(3,7)
array([3, 4, 5, 6])
>>> np.arange(3,7,2)
array([3, 5])
""")
add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
"""_get_ndarray_c_version()
Return the compile time NDARRAY_VERSION number.
""")
add_newdoc('numpy.core.multiarray', '_reconstruct',
"""_reconstruct(subtype, shape, dtype)
Construct an empty array. Used by Pickles.
""")
add_newdoc('numpy.core.multiarray', 'set_string_function',
"""
set_string_function(f, repr=1)
Internal method to set a function to be used when pretty printing arrays.
""")
add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
"""
set_numeric_ops(op1=func1, op2=func2, ...)
Set numerical operators for array objects.
Parameters
----------
op1, op2, ... : callable
Each ``op = func`` pair describes an operator to be replaced.
For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace
addition by modulus 5 addition.
Returns
-------
saved_ops : list of callables
A list of all operators, stored before making replacements.
Notes
-----
.. WARNING::
Use with care! Incorrect usage may lead to memory errors.
A function replacing an operator cannot make use of that operator.
For example, when replacing add, you may not use ``+``. Instead,
directly call ufuncs.
Examples
--------
>>> def add_mod5(x, y):
... return np.add(x, y) % 5
...
>>> old_funcs = np.set_numeric_ops(add=add_mod5)
>>> x = np.arange(12).reshape((3, 4))
>>> x + x
array([[0, 2, 4, 1],
[3, 0, 2, 4],
[1, 3, 0, 2]])
>>> ignore = np.set_numeric_ops(**old_funcs) # restore operators
""")
add_newdoc('numpy.core.multiarray', 'where',
"""
where(condition, [x, y])
Return elements, either from `x` or `y`, depending on `condition`.
If only `condition` is given, return ``condition.nonzero()``.
Parameters
----------
condition : array_like, bool
When True, yield `x`, otherwise yield `y`.
x, y : array_like, optional
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape.
Returns
-------
out : ndarray or tuple of ndarrays
If both `x` and `y` are specified, the output array contains
elements of `x` where `condition` is True, and elements from
`y` elsewhere.
If only `condition` is given, return the tuple
``condition.nonzero()``, the indices where `condition` is True.
See Also
--------
nonzero, choose
Notes
-----
If `x` and `y` are given and input arrays are 1-D, `where` is
equivalent to::
[xv if c else yv for (c,xv,yv) in zip(condition,x,y)]
Examples
--------
>>> np.where([[True, False], [True, True]],
... [[1, 2], [3, 4]],
... [[9, 8], [7, 6]])
array([[1, 8],
[3, 4]])
>>> np.where([[0, 1], [1, 0]])
(array([0, 1]), array([1, 0]))
>>> x = np.arange(9.).reshape(3, 3)
>>> np.where( x > 5 )
(array([2, 2, 2]), array([0, 1, 2]))
>>> x[np.where( x > 3.0 )] # Note: result is 1D.
array([ 4., 5., 6., 7., 8.])
>>> np.where(x < 5, x, -1) # Note: broadcasting.
array([[ 0., 1., 2.],
[ 3., 4., -1.],
[-1., -1., -1.]])
Find the indices of elements of `x` that are in `goodvalues`.
>>> goodvalues = [3, 4, 7]
>>> ix = np.in1d(x.ravel(), goodvalues).reshape(x.shape)
>>> ix
array([[False, False, False],
[ True, True, False],
[False, True, False]], dtype=bool)
>>> np.where(ix)
(array([1, 1, 2]), array([0, 1, 1]))
""")
add_newdoc('numpy.core.multiarray', 'lexsort',
"""
lexsort(keys, axis=-1)
Perform an indirect sort using a sequence of keys.
Given multiple sorting keys, which can be interpreted as columns in a
spreadsheet, lexsort returns an array of integer indices that describes
the sort order by multiple columns. The last key in the sequence is used
for the primary sort order, the second-to-last key for the secondary sort
order, and so on. The keys argument must be a sequence of objects that
can be converted to arrays of the same shape. If a 2D array is provided
for the keys argument, it's rows are interpreted as the sorting keys and
sorting is according to the last row, second last row etc.
Parameters
----------
keys : (k, N) array or tuple containing k (N,)-shaped sequences
The `k` different "columns" to be sorted. The last column (or row if
`keys` is a 2D array) is the primary sort key.
axis : int, optional
Axis to be indirectly sorted. By default, sort over the last axis.
Returns
-------
indices : (N,) ndarray of ints
Array of indices that sort the keys along the specified axis.
See Also
--------
argsort : Indirect sort.
ndarray.sort : In-place sort.
sort : Return a sorted copy of an array.
Examples
--------
Sort names: first by surname, then by name.
>>> surnames = ('Hertz', 'Galilei', 'Hertz')
>>> first_names = ('Heinrich', 'Galileo', 'Gustav')
>>> ind = np.lexsort((first_names, surnames))
>>> ind
array([1, 2, 0])
>>> [surnames[i] + ", " + first_names[i] for i in ind]
['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
Sort two columns of numbers:
>>> a = [1,5,1,4,3,4,4] # First column
>>> b = [9,4,0,4,0,2,1] # Second column
>>> ind = np.lexsort((b,a)) # Sort by a, then by b
>>> print(ind)
[2 0 4 6 5 3 1]
>>> [(a[i],b[i]) for i in ind]
[(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
Note that sorting is first according to the elements of ``a``.
Secondary sorting is according to the elements of ``b``.
A normal ``argsort`` would have yielded:
>>> [(a[i],b[i]) for i in np.argsort(a)]
[(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
Structured arrays are sorted lexically by ``argsort``:
>>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
... dtype=np.dtype([('x', int), ('y', int)]))
>>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
array([2, 0, 4, 6, 5, 3, 1])
""")
add_newdoc('numpy.core.multiarray', 'can_cast',
"""
can_cast(from, totype, casting = 'safe')
Returns True if cast between data types can occur according to the
casting rule. If from is a scalar or array scalar, also returns
True if the scalar value can be cast without overflow or truncation
to an integer.
Parameters
----------
from : dtype, dtype specifier, scalar, or array
Data type, scalar, or array to cast from.
totype : dtype or dtype specifier
Data type to cast to.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Returns
-------
out : bool
True if cast can occur according to the casting rule.
Notes
-----
Starting in NumPy 1.9, can_cast function now returns False in 'safe'
casting mode for integer/float dtype and string dtype if the string dtype
length is not long enough to store the max integer/float value converted
to a string. Previously can_cast in 'safe' mode returned True for
integer/float dtype and a string dtype of any length.
See also
--------
dtype, result_type
Examples
--------
Basic examples
>>> np.can_cast(np.int32, np.int64)
True
>>> np.can_cast(np.float64, np.complex)
True
>>> np.can_cast(np.complex, np.float)
False
>>> np.can_cast('i8', 'f8')
True
>>> np.can_cast('i8', 'f4')
False
>>> np.can_cast('i4', 'S4')
False
Casting scalars
>>> np.can_cast(100, 'i1')
True
>>> np.can_cast(150, 'i1')
False
>>> np.can_cast(150, 'u1')
True
>>> np.can_cast(3.5e100, np.float32)
False
>>> np.can_cast(1000.0, np.float32)
True
Array scalar checks the value, array does not
>>> np.can_cast(np.array(1000.0), np.float32)
True
>>> np.can_cast(np.array([1000.0]), np.float32)
False
Using the casting rules
>>> np.can_cast('i8', 'i8', 'no')
True
>>> np.can_cast('<i8', '>i8', 'no')
False
>>> np.can_cast('<i8', '>i8', 'equiv')
True
>>> np.can_cast('<i4', '>i8', 'equiv')
False
>>> np.can_cast('<i4', '>i8', 'safe')
True
>>> np.can_cast('<i8', '>i4', 'safe')
False
>>> np.can_cast('<i8', '>i4', 'same_kind')
True
>>> np.can_cast('<i8', '>u4', 'same_kind')
False
>>> np.can_cast('<i8', '>u4', 'unsafe')
True
""")
add_newdoc('numpy.core.multiarray', 'promote_types',
"""
promote_types(type1, type2)
Returns the data type with the smallest size and smallest scalar
kind to which both ``type1`` and ``type2`` may be safely cast.
The returned data type is always in native byte order.
This function is symmetric and associative.
Parameters
----------
type1 : dtype or dtype specifier
First data type.
type2 : dtype or dtype specifier
Second data type.
Returns
-------
out : dtype
The promoted data type.
Notes
-----
.. versionadded:: 1.6.0
Starting in NumPy 1.9, promote_types function now returns a valid string
length when given an integer or float dtype as one argument and a string
dtype as another argument. Previously it always returned the input string
dtype, even if it wasn't long enough to store the max integer/float value
converted to a string.
See Also
--------
result_type, dtype, can_cast
Examples
--------
>>> np.promote_types('f4', 'f8')
dtype('float64')
>>> np.promote_types('i8', 'f4')
dtype('float64')
>>> np.promote_types('>i8', '<c8')
dtype('complex128')
>>> np.promote_types('i4', 'S8')
dtype('S11')
""")
add_newdoc('numpy.core.multiarray', 'min_scalar_type',
"""
min_scalar_type(a)
For scalar ``a``, returns the data type with the smallest size
and smallest scalar kind which can hold its value. For non-scalar
array ``a``, returns the vector's dtype unmodified.
Floating point values are not demoted to integers,
and complex values are not demoted to floats.
Parameters
----------
a : scalar or array_like
The value whose minimal data type is to be found.
Returns
-------
out : dtype
The minimal data type.
Notes
-----
.. versionadded:: 1.6.0
See Also
--------
result_type, promote_types, dtype, can_cast
Examples
--------
>>> np.min_scalar_type(10)
dtype('uint8')
>>> np.min_scalar_type(-260)
dtype('int16')
>>> np.min_scalar_type(3.1)
dtype('float16')
>>> np.min_scalar_type(1e50)
dtype('float64')
>>> np.min_scalar_type(np.arange(4,dtype='f8'))
dtype('float64')
""")
add_newdoc('numpy.core.multiarray', 'result_type',
"""
result_type(*arrays_and_dtypes)
Returns the type that results from applying the NumPy
type promotion rules to the arguments.
Type promotion in NumPy works similarly to the rules in languages
like C++, with some slight differences. When both scalars and
arrays are used, the array's type takes precedence and the actual value
of the scalar is taken into account.
For example, calculating 3*a, where a is an array of 32-bit floats,
intuitively should result in a 32-bit float output. If the 3 is a
32-bit integer, the NumPy rules indicate it can't convert losslessly
into a 32-bit float, so a 64-bit float should be the result type.
By examining the value of the constant, '3', we see that it fits in
an 8-bit integer, which can be cast losslessly into the 32-bit float.
Parameters
----------
arrays_and_dtypes : list of arrays and dtypes
The operands of some operation whose result type is needed.
Returns
-------
out : dtype
The result type.
See also
--------
dtype, promote_types, min_scalar_type, can_cast
Notes
-----
.. versionadded:: 1.6.0
The specific algorithm used is as follows.
Categories are determined by first checking which of boolean,
integer (int/uint), or floating point (float/complex) the maximum
kind of all the arrays and the scalars are.
If there are only scalars or the maximum category of the scalars
is higher than the maximum category of the arrays,
the data types are combined with :func:`promote_types`
to produce the return value.
Otherwise, `min_scalar_type` is called on each array, and
the resulting data types are all combined with :func:`promote_types`
to produce the return value.
The set of int values is not a subset of the uint values for types
with the same number of bits, something not reflected in
:func:`min_scalar_type`, but handled as a special case in `result_type`.
Examples
--------
>>> np.result_type(3, np.arange(7, dtype='i1'))
dtype('int8')
>>> np.result_type('i4', 'c8')
dtype('complex128')
>>> np.result_type(3.0, -2)
dtype('float64')
""")
add_newdoc('numpy.core.multiarray', 'newbuffer',
"""
newbuffer(size)
Return a new uninitialized buffer object.
Parameters
----------
size : int
Size in bytes of returned buffer object.
Returns
-------
newbuffer : buffer object
Returned, uninitialized buffer object of `size` bytes.
""")
add_newdoc('numpy.core.multiarray', 'getbuffer',
"""
getbuffer(obj [,offset[, size]])
Create a buffer object from the given object referencing a slice of
length size starting at offset.
Default is the entire buffer. A read-write buffer is attempted followed
by a read-only buffer.
Parameters
----------
obj : object
offset : int, optional
size : int, optional
Returns
-------
buffer_obj : buffer
Examples
--------
>>> buf = np.getbuffer(np.ones(5), 1, 3)
>>> len(buf)
3
>>> buf[0]
'\\x00'
>>> buf
<read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0>
""")
add_newdoc('numpy.core', 'dot',
"""
dot(a, b, out=None)
Dot product of two arrays.
For 2-D arrays it is equivalent to matrix multiplication, and for 1-D
arrays to inner product of vectors (without complex conjugation). For
N dimensions it is a sum product over the last axis of `a` and
the second-to-last of `b`::
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
If `out` is given, then it is returned.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
matmul : '@' operator as method with out parameter.
Examples
--------
>>> np.dot(3, 4)
12
Neither argument is complex-conjugated:
>>> np.dot([2j, 3j], [2j, 3j])
(-13+0j)
For 2-D arrays it is the matrix product:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.dot(a, b)
array([[4, 1],
[2, 2]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
>>> np.dot(a, b)[2,3,2,1,2,2]
499128
>>> sum(a[2,3,2,:] * b[1,2,:,2])
499128
""")
add_newdoc('numpy.core', 'matmul',
"""
matmul(a, b, out=None)
Matrix product of two arrays.
The behavior depends on the arguments in the following way.
- If both arguments are 2-D they are multiplied like conventional
matrices.
- If either argument is N-D, N > 2, it is treated as a stack of
matrices residing in the last two indexes and broadcast accordingly.
- If the first argument is 1-D, it is promoted to a matrix by
prepending a 1 to its dimensions. After matrix multiplication
the prepended 1 is removed.
- If the second argument is 1-D, it is promoted to a matrix by
appending a 1 to its dimensions. After matrix multiplication
the appended 1 is removed.
Multiplication by a scalar is not allowed, use ``*`` instead. Note that
multiplying a stack of matrices with a vector will result in a stack of
vectors, but matmul will not recognize it as such.
``matmul`` differs from ``dot`` in two important ways.
- Multiplication by scalars is not allowed.
- Stacks of matrices are broadcast together as if the matrices
were elements.
.. warning::
This function is preliminary and included in NumPy 1.10.0 for testing
and documentation. Its semantics will not change, but the number and
order of the optional arguments will.
.. versionadded:: 1.10.0
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
1-D arrays then a scalar is returned; otherwise an array is
returned. If `out` is given, then it is returned.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
If scalar value is passed.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
dot : alternative matrix product with different broadcasting rules.
Notes
-----
The matmul function implements the semantics of the `@` operator introduced
in Python 3.5 following PEP465.
Examples
--------
For 2-D arrays it is the matrix product:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.matmul(a, b)
array([[4, 1],
[2, 2]])
For 2-D mixed with 1-D, the result is the usual.
>>> a = [[1, 0], [0, 1]]
>>> b = [1, 2]
>>> np.matmul(a, b)
array([1, 2])
>>> np.matmul(b, a)
array([1, 2])
Broadcasting is conventional for stacks of arrays
>>> a = np.arange(2*2*4).reshape((2,2,4))
>>> b = np.arange(2*2*4).reshape((2,4,2))
>>> np.matmul(a,b).shape
(2, 2, 2)
>>> np.matmul(a,b)[0,1,1]
98
>>> sum(a[0,1,:] * b[0,:,1])
98
Vector, vector returns the scalar inner product, but neither argument
is complex-conjugated:
>>> np.matmul([2j, 3j], [2j, 3j])
(-13+0j)
Scalar multiplication raises an error.
>>> np.matmul([1,2], 3)
Traceback (most recent call last):
...
ValueError: Scalar operands are not allowed, use '*' instead
""")
add_newdoc('numpy.core', 'c_einsum',
"""
c_einsum(subscripts, *operands, out=None, dtype=None, order='K', casting='safe')
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional
array operations can be represented in a simple fashion. This function
provides a way to compute such summations. The best way to understand this
function is to try the examples below, which show how many common NumPy
functions can be implemented as calls to `einsum`.
This is the core C function.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
operands : list of array_like
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
dtype : {data-type, None}, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions. Default is None.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout as the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Default is 'safe'.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
einsum, dot, inner, outer, tensordot
Notes
-----
.. versionadded:: 1.6.0
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Repeated subscripts labels in one operand take the diagonal. For example,
``np.einsum('ii', a)`` is equivalent to ``np.trace(a)``.
Whenever a label is repeated, it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to ``np.inner(a,b)``. If a label appears only once,
it is not summed, so ``np.einsum('i', a)`` produces a view of ``a``
with no changes.
The order of labels in the output is by default alphabetical. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose.
The output can be controlled by specifying output subscript labels
as well. This specifies the label order, and allows summing to
be disallowed or forced when desired. The call ``np.einsum('i->', a)``
is like ``np.sum(a, axis=-1)``, and ``np.einsum('ii->i', a)``
is like ``np.diag(a)``. The difference is that `einsum` does not
allow broadcasting by default.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, you can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view.
An alternative way to provide the subscripts and operands is as
``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. The examples
below have corresponding `einsum` calls with the two parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
have the same effect as ``np.swapaxes(a, 0, 2)`` and
``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('...j,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> c.T
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum('i...->...', a)
array([50, 55, 60, 65, 70])
>>> np.einsum(a, [0,Ellipsis], [Ellipsis])
array([50, 55, 60, 65, 70])
>>> np.sum(a, axis=0)
array([50, 55, 60, 65, 70])
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('k...,jk', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> # since version 1.10.0
>>> a = np.zeros((3, 3))
>>> np.einsum('ii->i', a)[:] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
""")
add_newdoc('numpy.core', 'vdot',
"""
vdot(a, b)
Return the dot product of two vectors.
The vdot(`a`, `b`) function handles complex numbers differently than
dot(`a`, `b`). If the first argument is complex the complex conjugate
of the first argument is used for the calculation of the dot product.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : array_like
If `a` is complex the complex conjugate is taken before calculation
of the dot product.
b : array_like
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`. Can be an int, float, or
complex depending on the types of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
>>> a = np.array([1+2j,3+4j])
>>> b = np.array([5+6j,7+8j])
>>> np.vdot(a, b)
(70-8j)
>>> np.vdot(b, a)
(70+8j)
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
30
>>> np.vdot(b, a)
30
>>> 1*4 + 4*1 + 5*2 + 6*2
30
""")
##############################################################################
#
# Documentation for ndarray attributes and methods
#
##############################################################################
##############################################################################
#
# ndarray object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray',
"""
ndarray(shape, dtype=float, buffer=None, offset=0,
strides=None, order=None)
An array object represents a multidimensional, homogeneous array
of fixed-size items. An associated data-type object describes the
format of each element in the array (its byte-order, how many bytes it
occupies in memory, whether it is an integer, a floating point number,
or something else, etc.)
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
For more information, refer to the `numpy` module and examine the
methods and attributes of an array.
Parameters
----------
(for the __new__ method; see Notes below)
shape : tuple of ints
Shape of created array.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type.
buffer : object exposing buffer interface, optional
Used to fill the array with data.
offset : int, optional
Offset of array data in buffer.
strides : tuple of ints, optional
Strides of data in memory.
order : {'C', 'F'}, optional
Row-major (C-style) or column-major (Fortran-style) order.
Attributes
----------
T : ndarray
Transpose of the array.
data : buffer
The array's elements, in memory.
dtype : dtype object
Describes the format of the elements in the array.
flags : dict
Dictionary containing information related to memory use, e.g.,
'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
flat : numpy.flatiter object
Flattened version of the array as an iterator. The iterator
allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
assignment examples; TODO).
imag : ndarray
Imaginary part of the array.
real : ndarray
Real part of the array.
size : int
Number of elements in the array.
itemsize : int
The memory use of each array element in bytes.
nbytes : int
The total number of bytes required to store the array data,
i.e., ``itemsize * size``.
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
strides : tuple of ints
The step-size required to move from one element to the next in
memory. For example, a contiguous ``(3, 4)`` array of type
``int16`` in C-order has strides ``(8, 2)``. This implies that
to move from element to element in memory requires jumps of 2 bytes.
To move from row-to-row, one needs to jump 8 bytes at a time
(``2 * 4``).
ctypes : ctypes object
Class containing properties of the array needed for interaction
with ctypes.
base : ndarray
If the array is a view into another array, that array is its `base`
(unless that array is also a view). The `base` array is where the
array data is actually stored.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
dtype : Create a data-type.
Notes
-----
There are two modes of creating an array using ``__new__``:
1. If `buffer` is None, then only `shape`, `dtype`, and `order`
are used.
2. If `buffer` is an object exposing the buffer interface, then
all keywords are interpreted.
No ``__init__`` method is needed because the array is fully initialized
after the ``__new__`` method.
Examples
--------
These examples illustrate the low-level `ndarray` constructor. Refer
to the `See Also` section above for easier ways of constructing an
ndarray.
First mode, `buffer` is None:
>>> np.ndarray(shape=(2,2), dtype=float, order='F')
array([[ -1.13698227e+002, 4.25087011e-303],
[ 2.88528414e-306, 3.27025015e-309]]) #random
Second mode:
>>> np.ndarray((2,), buffer=np.array([1,2,3]),
... offset=np.int_().itemsize,
... dtype=int) # offset = 1*itemsize, i.e. skip first element
array([2, 3])
""")
##############################################################################
#
# ndarray attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
"""Array protocol: Python side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
"""None."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
"""Array priority."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
"""Array protocol: C-struct side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_',
"""Allow the array to be interpreted as a ctypes object by returning the
data-memory location as an integer
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
"""
Base object if memory is from some other object.
Examples
--------
The base of an array that owns its memory is None:
>>> x = np.array([1,2,3,4])
>>> x.base is None
True
Slicing creates a view, whose memory is shared with x:
>>> y = x[2:]
>>> y.base is x
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
"""
An object to simplify the interaction of the array with the ctypes
module.
This attribute creates an object that makes it easier to use arrays
when calling shared libraries with the ctypes module. The returned
object has, among others, data, shape, and strides attributes (see
Notes below) which themselves return ctypes objects that can be used
as arguments to a shared library.
Parameters
----------
None
Returns
-------
c : Python object
Possessing attributes data, shape, strides, etc.
See Also
--------
numpy.ctypeslib
Notes
-----
Below are the public attributes of this object which were documented
in "Guide to NumPy" (we have omitted undocumented public attributes,
as well as documented private attributes):
* data: A pointer to the memory area of the array as a Python integer.
This memory area may contain data that is not aligned, or not in correct
byte-order. The memory area may not even be writeable. The array
flags and data-type of this array should be respected when passing this
attribute to arbitrary C-code to avoid trouble that can include Python
crashing. User Beware! The value of this attribute is exactly the same
as self._array_interface_['data'][0].
* shape (c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the C-integer corresponding to dtype('p') on this
platform. This base-type could be c_int, c_long, or c_longlong
depending on the platform. The c_intp type is defined accordingly in
numpy.ctypeslib. The ctypes array contains the shape of the underlying
array.
* strides (c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the same as for the shape attribute. This ctypes array
contains the strides information from the underlying array. This strides
information is important for showing how many bytes must be jumped to
get to the next element in the array.
* data_as(obj): Return the data pointer cast to a particular c-types object.
For example, calling self._as_parameter_ is equivalent to
self.data_as(ctypes.c_void_p). Perhaps you want to use the data as a
pointer to a ctypes array of floating-point data:
self.data_as(ctypes.POINTER(ctypes.c_double)).
* shape_as(obj): Return the shape tuple as an array of some other c-types
type. For example: self.shape_as(ctypes.c_short).
* strides_as(obj): Return the strides tuple as an array of some other
c-types type. For example: self.strides_as(ctypes.c_longlong).
Be careful using the ctypes attribute - especially on temporary
arrays or arrays constructed on the fly. For example, calling
``(a+b).ctypes.data_as(ctypes.c_void_p)`` returns a pointer to memory
that is invalid because the array created as (a+b) is deallocated
before the next Python statement. You can avoid this problem using
either ``c=a+b`` or ``ct=(a+b).ctypes``. In the latter case, ct will
hold a reference to the array until ct is deleted or re-assigned.
If the ctypes module is not available, then the ctypes attribute
of array objects still returns something useful, but ctypes objects
are not returned and errors may be raised instead. In particular,
the object will still have the as parameter attribute which will
return an integer equal to the data attribute.
Examples
--------
>>> import ctypes
>>> x
array([[0, 1],
[2, 3]])
>>> x.ctypes.data
30439712
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long))
<ctypes.LP_c_long object at 0x01F01300>
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents
c_long(0)
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents
c_longlong(4294967296L)
>>> x.ctypes.shape
<numpy.core._internal.c_long_Array_2 object at 0x01FFD580>
>>> x.ctypes.shape_as(ctypes.c_long)
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides_as(ctypes.c_longlong)
<numpy.core._internal.c_longlong_Array_2 object at 0x01F01300>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('data',
"""Python buffer object pointing to the start of the array's data."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
"""
Data-type of the array's elements.
Parameters
----------
None
Returns
-------
d : numpy dtype object
See Also
--------
numpy.dtype
Examples
--------
>>> x
array([[0, 1],
[2, 3]])
>>> x.dtype
dtype('int32')
>>> type(x.dtype)
<type 'numpy.dtype'>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('imag',
"""
The imaginary part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.imag
array([ 0. , 0.70710678])
>>> x.imag.dtype
dtype('float64')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize',
"""
Length of one array element in bytes.
Examples
--------
>>> x = np.array([1,2,3], dtype=np.float64)
>>> x.itemsize
8
>>> x = np.array([1,2,3], dtype=np.complex128)
>>> x.itemsize
16
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
"""
Information about the memory layout of the array.
Attributes
----------
C_CONTIGUOUS (C)
The data is in a single, C-style contiguous segment.
F_CONTIGUOUS (F)
The data is in a single, Fortran-style contiguous segment.
OWNDATA (O)
The array owns the memory it uses or borrows it from another object.
WRITEABLE (W)
The data area can be written to. Setting this to False locks
the data, making it read-only. A view (slice, etc.) inherits WRITEABLE
from its base array at creation time, but a view of a writeable
array may be subsequently locked while the base array remains writeable.
(The opposite is not true, in that a view of a locked array may not
be made writeable. However, currently, locking a base object does not
lock any views that already reference it, so under that circumstance it
is possible to alter the contents of a locked array via a previously
created writeable view onto it.) Attempting to change a non-writeable
array raises a RuntimeError exception.
ALIGNED (A)
The data and all elements are aligned appropriately for the hardware.
UPDATEIFCOPY (U)
This array is a copy of some other array. When this array is
deallocated, the base array will be updated with the contents of
this array.
FNC
F_CONTIGUOUS and not C_CONTIGUOUS.
FORC
F_CONTIGUOUS or C_CONTIGUOUS (one-segment test).
BEHAVED (B)
ALIGNED and WRITEABLE.
CARRAY (CA)
BEHAVED and C_CONTIGUOUS.
FARRAY (FA)
BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS.
Notes
-----
The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``),
or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag
names are only supported in dictionary access.
Only the UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be changed by
the user, via direct assignment to the attribute or dictionary entry,
or by calling `ndarray.setflags`.
The array flags cannot be set arbitrarily:
- UPDATEIFCOPY can only be set ``False``.
- ALIGNED can only be set ``True`` if the data is truly aligned.
- WRITEABLE can only be set ``True`` if the array owns its own memory
or the ultimate owner of the memory exposes a writeable buffer
interface or is a string.
Arrays can be both C-style and Fortran-style contiguous simultaneously.
This is clear for 1-dimensional arrays, but can also be true for higher
dimensional arrays.
Even for contiguous arrays a stride for a given dimension
``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
or the array has no elements.
It does *not* generally hold that ``self.strides[-1] == self.itemsize``
for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
Fortran-style contiguous arrays is true.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
"""
A 1-D iterator over the array.
This is a `numpy.flatiter` instance, which acts similarly to, but is not
a subclass of, Python's built-in iterator object.
See Also
--------
flatten : Return a copy of the array collapsed into one dimension.
flatiter
Examples
--------
>>> x = np.arange(1, 7).reshape(2, 3)
>>> x
array([[1, 2, 3],
[4, 5, 6]])
>>> x.flat[3]
4
>>> x.T
array([[1, 4],
[2, 5],
[3, 6]])
>>> x.T.flat[3]
5
>>> type(x.flat)
<type 'numpy.flatiter'>
An assignment example:
>>> x.flat = 3; x
array([[3, 3, 3],
[3, 3, 3]])
>>> x.flat[[1,4]] = 1; x
array([[3, 1, 3],
[3, 1, 3]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes',
"""
Total bytes consumed by the elements of the array.
Notes
-----
Does not include memory consumed by non-element attributes of the
array object.
Examples
--------
>>> x = np.zeros((3,5,2), dtype=np.complex128)
>>> x.nbytes
480
>>> np.prod(x.shape) * x.itemsize
480
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim',
"""
Number of array dimensions.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> x.ndim
1
>>> y = np.zeros((2, 3, 4))
>>> y.ndim
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('real',
"""
The real part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.real
array([ 1. , 0.70710678])
>>> x.real.dtype
dtype('float64')
See Also
--------
numpy.real : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
"""
Tuple of array dimensions.
Notes
-----
May be used to "reshape" the array, as long as this would not
require a change in the total number of elements
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> x.shape
(4,)
>>> y = np.zeros((2, 3, 4))
>>> y.shape
(2, 3, 4)
>>> y.shape = (3, 8)
>>> y
array([[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> y.shape = (3, 6)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: total size of new array must be unchanged
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
"""
Number of elements in the array.
Equivalent to ``np.prod(a.shape)``, i.e., the product of the array's
dimensions.
Examples
--------
>>> x = np.zeros((3, 5, 2), dtype=np.complex128)
>>> x.size
30
>>> np.prod(x.shape)
30
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
"""
Tuple of bytes to step in each dimension when traversing an array.
The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a`
is::
offset = sum(np.array(i) * a.strides)
A more detailed explanation of strides can be found in the
"ndarray.rst" file in the NumPy reference guide.
Notes
-----
Imagine an array of 32-bit integers (each 4 bytes)::
x = np.array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]], dtype=np.int32)
This array is stored in memory as 40 bytes, one after the other
(known as a contiguous block of memory). The strides of an array tell
us how many bytes we have to skip in memory to move to the next position
along a certain axis. For example, we have to skip 4 bytes (1 value) to
move to the next column, but 20 bytes (5 values) to get to the same
position in the next row. As such, the strides for the array `x` will be
``(20, 4)``.
See Also
--------
numpy.lib.stride_tricks.as_strided
Examples
--------
>>> y = np.reshape(np.arange(2*3*4), (2,3,4))
>>> y
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
>>> y.strides
(48, 16, 4)
>>> y[1,1,1]
17
>>> offset=sum(y.strides * np.array((1,1,1)))
>>> offset/y.itemsize
17
>>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0)
>>> x.strides
(32, 4, 224, 1344)
>>> i = np.array([3,5,2,2])
>>> offset = sum(i * x.strides)
>>> x[3,5,2,2]
813
>>> offset / x.itemsize
813
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
"""
Same as self.transpose(), except that self is returned if
self.ndim < 2.
Examples
--------
>>> x = np.array([[1.,2.],[3.,4.]])
>>> x
array([[ 1., 2.],
[ 3., 4.]])
>>> x.T
array([[ 1., 3.],
[ 2., 4.]])
>>> x = np.array([1.,2.,3.,4.])
>>> x
array([ 1., 2., 3., 4.])
>>> x.T
array([ 1., 2., 3., 4.])
"""))
##############################################################################
#
# ndarray methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
""" a.__array__(|dtype) -> reference if type unchanged, copy otherwise.
Returns either a new reference to self if dtype is not given or a new array
of provided data type if dtype is different from the current dtype of the
array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__',
"""a.__array_prepare__(obj) -> Object of same type as ndarray object obj.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
"""a.__array_wrap__(obj) -> Object of same type as ndarray object a.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
"""a.__copy__([order])
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A'}, optional
If order is 'C' (False) then the result is contiguous (default).
If order is 'Fortran' (True) then the result has fortran order.
If order is 'Any' (None) then the result has fortran order
only if the array already is in fortran order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
"""a.__deepcopy__() -> Deep copy of array.
Used if copy.deepcopy is called on an array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
"""a.__reduce__()
For pickling.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
"""a.__setstate__(version, shape, dtype, isfortran, rawdata)
For unpickling.
Parameters
----------
version : int
optional pickle version. If omitted defaults to 0.
shape : tuple
dtype : data-type
isFortran : bool
rawdata : string or list
a binary string with the data (or a list if 'a' is an object array)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
"""
a.all(axis=None, out=None, keepdims=False)
Returns True if all elements evaluate to True.
Refer to `numpy.all` for full documentation.
See Also
--------
numpy.all : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
"""
a.any(axis=None, out=None, keepdims=False)
Returns True if any of the elements of `a` evaluate to True.
Refer to `numpy.any` for full documentation.
See Also
--------
numpy.any : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax',
"""
a.argmax(axis=None, out=None)
Return indices of the maximum values along the given axis.
Refer to `numpy.argmax` for full documentation.
See Also
--------
numpy.argmax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
"""
a.argmin(axis=None, out=None)
Return indices of the minimum values along the given axis of `a`.
Refer to `numpy.argmin` for detailed documentation.
See Also
--------
numpy.argmin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
"""
a.argsort(axis=-1, kind='quicksort', order=None)
Returns the indices that would sort this array.
Refer to `numpy.argsort` for full documentation.
See Also
--------
numpy.argsort : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition',
"""
a.argpartition(kth, axis=-1, kind='introselect', order=None)
Returns the indices that would partition this array.
Refer to `numpy.argpartition` for full documentation.
.. versionadded:: 1.8.0
See Also
--------
numpy.argpartition : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
"""
a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True)
Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through (default), otherwise
the returned array will be forced to be a base-class array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to false, and the `dtype`, `order`, and `subok`
requirements are satisfied, the input array is returned instead
of a copy.
Returns
-------
arr_t : ndarray
Unless `copy` is False and the other conditions for returning the input
array are satisfied (see description for `copy` input parameter), `arr_t`
is a new array of the same shape as the input array, with dtype, order
given by `dtype`, `order`.
Notes
-----
Starting in NumPy 1.9, astype method now returns an error if the string
dtype to cast to is not long enough in 'safe' casting mode to hold the max
value of integer/float array that is being casted. Previously the casting
was allowed even if the result was truncated.
Raises
------
ComplexWarning
When casting from complex to float or int. To avoid this,
one should use ``a.real.astype(t)``.
Examples
--------
>>> x = np.array([1, 2, 2.5])
>>> x
array([ 1. , 2. , 2.5])
>>> x.astype(int)
array([1, 2, 2])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
"""
a.byteswap(inplace)
Swap the bytes of the array elements
Toggle between low-endian and big-endian data representation by
returning a byteswapped array, optionally swapped in-place.
Parameters
----------
inplace : bool, optional
If ``True``, swap bytes in-place, default is ``False``.
Returns
-------
out : ndarray
The byteswapped array. If `inplace` is ``True``, this is
a view to self.
Examples
--------
>>> A = np.array([1, 256, 8755], dtype=np.int16)
>>> map(hex, A)
['0x1', '0x100', '0x2233']
>>> A.byteswap(True)
array([ 256, 1, 13090], dtype=int16)
>>> map(hex, A)
['0x100', '0x1', '0x3322']
Arrays of strings are not swapped
>>> A = np.array(['ceg', 'fac'])
>>> A.byteswap()
array(['ceg', 'fac'],
dtype='|S3')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
"""
a.choose(choices, out=None, mode='raise')
Use an index array to construct a new array from a set of choices.
Refer to `numpy.choose` for full documentation.
See Also
--------
numpy.choose : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
"""
a.clip(min=None, max=None, out=None)
Return an array whose values are limited to ``[min, max]``.
One of max or min must be given.
Refer to `numpy.clip` for full documentation.
See Also
--------
numpy.clip : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('compress',
"""
a.compress(condition, axis=None, out=None)
Return selected slices of this array along given axis.
Refer to `numpy.compress` for full documentation.
See Also
--------
numpy.compress : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conj',
"""
a.conj()
Complex-conjugate all elements.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate',
"""
a.conjugate()
Return the complex conjugate, element-wise.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
"""
a.copy(order='C')
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :func:numpy.copy are very
similar, but have different default values for their order=
arguments.)
See also
--------
numpy.copy
numpy.copyto
Examples
--------
>>> x = np.array([[1,2,3],[4,5,6]], order='F')
>>> y = x.copy()
>>> x.fill(0)
>>> x
array([[0, 0, 0],
[0, 0, 0]])
>>> y
array([[1, 2, 3],
[4, 5, 6]])
>>> y.flags['C_CONTIGUOUS']
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod',
"""
a.cumprod(axis=None, dtype=None, out=None)
Return the cumulative product of the elements along the given axis.
Refer to `numpy.cumprod` for full documentation.
See Also
--------
numpy.cumprod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum',
"""
a.cumsum(axis=None, dtype=None, out=None)
Return the cumulative sum of the elements along the given axis.
Refer to `numpy.cumsum` for full documentation.
See Also
--------
numpy.cumsum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal',
"""
a.diagonal(offset=0, axis1=0, axis2=1)
Return specified diagonals. In NumPy 1.9 the returned array is a
read-only view instead of a copy as in previous NumPy versions. In
a future version the read-only restriction will be removed.
Refer to :func:`numpy.diagonal` for full documentation.
See Also
--------
numpy.diagonal : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dot',
"""
a.dot(b, out=None)
Dot product of two arrays.
Refer to `numpy.dot` for full documentation.
See Also
--------
numpy.dot : equivalent function
Examples
--------
>>> a = np.eye(2)
>>> b = np.ones((2, 2)) * 2
>>> a.dot(b)
array([[ 2., 2.],
[ 2., 2.]])
This array method can be conveniently chained:
>>> a.dot(b).dot(b)
array([[ 8., 8.],
[ 8., 8.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
"""a.dump(file)
Dump a pickle of the array to the specified file.
The array can be read back with pickle.load or numpy.load.
Parameters
----------
file : str
A string naming the dump file.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
"""
a.dumps()
Returns the pickle of the array as a string.
pickle.loads or numpy.loads will convert the string back to an array.
Parameters
----------
None
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
"""
a.fill(value)
Fill the array with a scalar value.
Parameters
----------
value : scalar
All elements of `a` will be assigned this value.
Examples
--------
>>> a = np.array([1, 2])
>>> a.fill(0)
>>> a
array([0, 0])
>>> a = np.empty(2)
>>> a.fill(1)
>>> a
array([ 1., 1.])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten',
"""
a.flatten(order='C')
Return a copy of the array collapsed into one dimension.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order.
'F' means to flatten in column-major (Fortran-
style) order. 'A' means to flatten in column-major
order if `a` is Fortran *contiguous* in memory,
row-major order otherwise. 'K' means to flatten
`a` in the order the elements occur in memory.
The default is 'C'.
Returns
-------
y : ndarray
A copy of the input array, flattened to one dimension.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the array.
Examples
--------
>>> a = np.array([[1,2], [3,4]])
>>> a.flatten()
array([1, 2, 3, 4])
>>> a.flatten('F')
array([1, 3, 2, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
"""
a.getfield(dtype, offset=0)
Returns a field of the given array as a certain type.
A field is a view of the array data with a given data-type. The values in
the view are determined by the given type and the offset into the current
array in bytes. The offset needs to be such that the view dtype fits in the
array dtype; for example an array of dtype complex128 has 16-byte elements.
If taking a view with a 32-bit integer (4 bytes), the offset needs to be
between 0 and 12 bytes.
Parameters
----------
dtype : str or dtype
The data type of the view. The dtype size of the view can not be larger
than that of the array itself.
offset : int
Number of bytes to skip before beginning the element view.
Examples
--------
>>> x = np.diag([1.+1.j]*2)
>>> x[1, 1] = 2 + 4.j
>>> x
array([[ 1.+1.j, 0.+0.j],
[ 0.+0.j, 2.+4.j]])
>>> x.getfield(np.float64)
array([[ 1., 0.],
[ 0., 2.]])
By choosing an offset of 8 bytes we can select the complex part of the
array for our view:
>>> x.getfield(np.float64, offset=8)
array([[ 1., 0.],
[ 0., 4.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
"""
a.item(*args)
Copy an element of an array to a standard Python scalar and return it.
Parameters
----------
\\*args : Arguments (variable number and type)
* none: in this case, the method only works for arrays
with one element (`a.size == 1`), which element is
copied into a standard Python scalar object and returned.
* int_type: this argument is interpreted as a flat index into
the array, specifying which element to copy and return.
* tuple of int_types: functions as does a single int_type argument,
except that the argument is interpreted as an nd-index into the
array.
Returns
-------
z : Standard Python scalar object
A copy of the specified element of the array as a suitable
Python scalar
Notes
-----
When the data type of `a` is longdouble or clongdouble, item() returns
a scalar array object because there is no available Python scalar that
would not lose information. Void arrays return a buffer object for item(),
unless fields are defined, in which case a tuple is returned.
`item` is very similar to a[args], except, instead of an array scalar,
a standard Python scalar is returned. This can be useful for speeding up
access to elements of the array and doing arithmetic on elements of the
array using Python's optimized math.
Examples
--------
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[3, 1, 7],
[2, 8, 3],
[8, 5, 3]])
>>> x.item(3)
2
>>> x.item(7)
5
>>> x.item((0, 1))
1
>>> x.item((2, 2))
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
"""
a.itemset(*args)
Insert scalar into an array (scalar is cast to array's dtype, if possible)
There must be at least 1 argument, and define the last argument
as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster
than ``a[args] = item``. The item should be a scalar value and `args`
must select a single item in the array `a`.
Parameters
----------
\\*args : Arguments
If one argument: a scalar, only used in case `a` is of size 1.
If two arguments: the last argument is the value to be set
and must be a scalar, the first argument specifies a single array
element location. It is either an int or a tuple.
Notes
-----
Compared to indexing syntax, `itemset` provides some speed increase
for placing a scalar into a particular location in an `ndarray`,
if you must do this. However, generally this is discouraged:
among other problems, it complicates the appearance of the code.
Also, when using `itemset` (and `item`) inside a loop, be sure
to assign the methods to a local variable to avoid the attribute
look-up at each loop iteration.
Examples
--------
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[3, 1, 7],
[2, 8, 3],
[8, 5, 3]])
>>> x.itemset(4, 0)
>>> x.itemset((2, 2), 9)
>>> x
array([[3, 1, 7],
[2, 0, 3],
[8, 5, 9]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""
a.max(axis=None, out=None)
Return the maximum along a given axis.
Refer to `numpy.amax` for full documentation.
See Also
--------
numpy.amax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
"""
a.mean(axis=None, dtype=None, out=None, keepdims=False)
Returns the average of the array elements along given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""
a.min(axis=None, out=None, keepdims=False)
Return the minimum along a given axis.
Refer to `numpy.amin` for full documentation.
See Also
--------
numpy.amin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'shares_memory',
"""
shares_memory(a, b, max_work=None)
Determine if two arrays share memory
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem (maximum number
of candidate solutions to consider). The following special
values are recognized:
max_work=MAY_SHARE_EXACT (default)
The problem is solved exactly. In this case, the function returns
True only if there is an element shared between the arrays.
max_work=MAY_SHARE_BOUNDS
Only the memory bounds of a and b are checked.
Raises
------
numpy.TooHardError
Exceeded max_work.
Returns
-------
out : bool
See Also
--------
may_share_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
""")
add_newdoc('numpy.core.multiarray', 'may_share_memory',
"""
may_share_memory(a, b, max_work=None)
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem. See
`shares_memory` for details. Default for ``may_share_memory``
is to do a bounds check.
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
""")
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
arr.newbyteorder(new_order='S')
Return the array with the same data viewed with a different byte order.
Equivalent to::
arr.view(arr.dtype.newbytorder(new_order))
Changes are also made in all fields and sub-arrays of the array data
type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_arr : array
New array object with the dtype reflecting given change to the
byte order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
"""
a.nonzero()
Return the indices of the elements that are non-zero.
Refer to `numpy.nonzero` for full documentation.
See Also
--------
numpy.nonzero : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
"""
a.prod(axis=None, dtype=None, out=None, keepdims=False)
Return the product of the array elements over the given axis
Refer to `numpy.prod` for full documentation.
See Also
--------
numpy.prod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp',
"""
a.ptp(axis=None, out=None)
Peak to peak (maximum - minimum) value along a given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
"""
a.put(indices, values, mode='raise')
Set ``a.flat[n] = values[n]`` for all `n` in indices.
Refer to `numpy.put` for full documentation.
See Also
--------
numpy.put : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'copyto',
"""
copyto(dst, src, casting='same_kind', where=None)
Copies values from one array to another, broadcasting as necessary.
Raises a TypeError if the `casting` rule is violated, and if
`where` is provided, it selects which elements to copy.
.. versionadded:: 1.7.0
Parameters
----------
dst : ndarray
The array into which values are copied.
src : array_like
The array from which values are copied.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when copying.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `dst`, and selects elements to copy from `src` to `dst`
wherever it contains the value True.
""")
add_newdoc('numpy.core.multiarray', 'putmask',
"""
putmask(a, mask, values)
Changes elements of an array based on conditional and input values.
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
If `values` is not the same size as `a` and `mask` then it will repeat.
This gives behavior different from ``a[mask] = values``.
Parameters
----------
a : array_like
Target array.
mask : array_like
Boolean mask array. It has to be the same shape as `a`.
values : array_like
Values to put into `a` where `mask` is True. If `values` is smaller
than `a` it will be repeated.
See Also
--------
place, put, take, copyto
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> np.putmask(x, x>2, x**2)
>>> x
array([[ 0, 1, 2],
[ 9, 16, 25]])
If `values` is smaller than `a` it is repeated:
>>> x = np.arange(5)
>>> np.putmask(x, x>1, [-33, -44])
>>> x
array([ 0, 1, -33, -44, -33])
""")
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
"""
a.ravel([order])
Return a flattened array.
Refer to `numpy.ravel` for full documentation.
See Also
--------
numpy.ravel : equivalent function
ndarray.flat : a flat iterator on the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat',
"""
a.repeat(repeats, axis=None)
Repeat elements of an array.
Refer to `numpy.repeat` for full documentation.
See Also
--------
numpy.repeat : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape',
"""
a.reshape(shape, order='C')
Returns an array containing the same data with a new shape.
Refer to `numpy.reshape` for full documentation.
See Also
--------
numpy.reshape : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
"""
a.resize(new_shape, refcheck=True)
Change shape and size of array in-place.
Parameters
----------
new_shape : tuple of ints, or `n` ints
Shape of resized array.
refcheck : bool, optional
If False, reference count will not be checked. Default is True.
Returns
-------
None
Raises
------
ValueError
If `a` does not own its own data or references or views to it exist,
and the data memory must be changed.
PyPy only: will always raise if the data memory must be changed, since
there is no reliable way to determine if references or views to it
exist.
SystemError
If the `order` keyword argument is specified. This behaviour is a
bug in NumPy.
See Also
--------
resize : Return a new array with the specified shape.
Notes
-----
This reallocates space for the data area if necessary.
Only contiguous arrays (data elements consecutive in memory) can be
resized.
The purpose of the reference count check is to make sure you
do not use this array as a buffer for another Python object and then
reallocate the memory. However, reference counts can increase in
other ways so if you are sure that you have not shared the memory
for this array with another Python object, then you may safely set
`refcheck` to False.
Examples
--------
Shrinking an array: array is flattened (in the order that the data are
stored in memory), resized, and reshaped:
>>> a = np.array([[0, 1], [2, 3]], order='C')
>>> a.resize((2, 1))
>>> a
array([[0],
[1]])
>>> a = np.array([[0, 1], [2, 3]], order='F')
>>> a.resize((2, 1))
>>> a
array([[0],
[2]])
Enlarging an array: as above, but missing entries are filled with zeros:
>>> b = np.array([[0, 1], [2, 3]])
>>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple
>>> b
array([[0, 1, 2],
[3, 0, 0]])
Referencing an array prevents resizing...
>>> c = a
>>> a.resize((1, 1))
Traceback (most recent call last):
...
ValueError: cannot resize an array that has been referenced ...
Unless `refcheck` is False:
>>> a.resize((1, 1), refcheck=False)
>>> a
array([[0]])
>>> c
array([[0]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('round',
"""
a.round(decimals=0, out=None)
Return `a` with each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted',
"""
a.searchsorted(v, side='left', sorter=None)
Find indices where elements of v should be inserted in a to maintain order.
For full documentation, see `numpy.searchsorted`
See Also
--------
numpy.searchsorted : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
"""
a.setfield(val, dtype, offset=0)
Put a value into a specified place in a field defined by a data-type.
Place `val` into `a`'s field defined by `dtype` and beginning `offset`
bytes into the field.
Parameters
----------
val : object
Value to be placed in field.
dtype : dtype object
Data-type of the field in which to place `val`.
offset : int, optional
The number of bytes into the field at which to place `val`.
Returns
-------
None
See Also
--------
getfield
Examples
--------
>>> x = np.eye(3)
>>> x.getfield(np.float64)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> x.setfield(3, np.int32)
>>> x.getfield(np.int32)
array([[3, 3, 3],
[3, 3, 3],
[3, 3, 3]])
>>> x
array([[ 1.00000000e+000, 1.48219694e-323, 1.48219694e-323],
[ 1.48219694e-323, 1.00000000e+000, 1.48219694e-323],
[ 1.48219694e-323, 1.48219694e-323, 1.00000000e+000]])
>>> x.setfield(np.eye(3), np.int32)
>>> x
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
"""
a.setflags(write=None, align=None, uic=None)
Set array flags WRITEABLE, ALIGNED, and UPDATEIFCOPY, respectively.
These Boolean-valued flags affect how numpy interprets the memory
area used by `a` (see Notes below). The ALIGNED flag can only
be set to True if the data is actually aligned according to the type.
The UPDATEIFCOPY flag can never be set to True. The flag WRITEABLE
can only be set to True if the array owns its own memory, or the
ultimate owner of the memory exposes a writeable buffer interface,
or is a string. (The exception for string is made so that unpickling
can be done without copying memory.)
Parameters
----------
write : bool, optional
Describes whether or not `a` can be written to.
align : bool, optional
Describes whether or not `a` is aligned properly for its type.
uic : bool, optional
Describes whether or not `a` is a copy of another "base" array.
Notes
-----
Array flags provide information about how the memory area used
for the array is to be interpreted. There are 6 Boolean flags
in use, only three of which can be changed by the user:
UPDATEIFCOPY, WRITEABLE, and ALIGNED.
WRITEABLE (W) the data area can be written to;
ALIGNED (A) the data and strides are aligned appropriately for the hardware
(as determined by the compiler);
UPDATEIFCOPY (U) this array is a copy of some other array (referenced
by .base). When this array is deallocated, the base array will be
updated with the contents of this array.
All flags can be accessed using their first (upper case) letter as well
as the full name.
Examples
--------
>>> y
array([[3, 1, 7],
[2, 0, 0],
[8, 5, 9]])
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
>>> y.setflags(write=0, align=0)
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : False
ALIGNED : False
UPDATEIFCOPY : False
>>> y.setflags(uic=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: cannot set UPDATEIFCOPY flag to True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
"""
a.sort(axis=-1, kind='quicksort', order=None)
Sort an array, in-place.
Parameters
----------
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.sort : Return a sorted copy of an array.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in sorted array.
partition: Partial sort.
Notes
-----
See ``sort`` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4], [3,1]])
>>> a.sort(axis=1)
>>> a
array([[1, 4],
[1, 3]])
>>> a.sort(axis=0)
>>> a
array([[1, 3],
[1, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
>>> a.sort(order='y')
>>> a
array([('c', 1), ('a', 2)],
dtype=[('x', '|S1'), ('y', '<i4')])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
"""
a.partition(kth, axis=-1, kind='introselect', order=None)
Rearranges the elements in the array in such a way that value of the
element in kth position is in the position it would be in a sorted array.
All elements smaller than the kth element are moved before this element and
all equal or greater are moved behind it. The ordering of the elements in
the two partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
kth : int or sequence of ints
Element index to partition by. The kth element value will be in its
final sorted position and all smaller elements will be moved before it
and all equal or greater elements behind it.
The order all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all elements
indexed by kth of them into their sorted position at once.
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.partition : Return a parititioned copy of an array.
argpartition : Indirect partition.
sort : Full sort.
Notes
-----
See ``np.partition`` for notes on the different algorithms.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> a.partition(3)
>>> a
array([2, 1, 3, 4])
>>> a.partition((1, 3))
array([1, 2, 3, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
"""
a.squeeze(axis=None)
Remove single-dimensional entries from the shape of `a`.
Refer to `numpy.squeeze` for full documentation.
See Also
--------
numpy.squeeze : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
"""
a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the standard deviation of the array elements along given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
"""
a.sum(axis=None, dtype=None, out=None, keepdims=False)
Return the sum of the array elements over the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes',
"""
a.swapaxes(axis1, axis2)
Return a view of the array with `axis1` and `axis2` interchanged.
Refer to `numpy.swapaxes` for full documentation.
See Also
--------
numpy.swapaxes : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('take',
"""
a.take(indices, axis=None, out=None, mode='raise')
Return an array formed from the elements of `a` at the given indices.
Refer to `numpy.take` for full documentation.
See Also
--------
numpy.take : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
"""
a.tofile(fid, sep="", format="%s")
Write array to a file as text or binary (default).
Data is always written in 'C' order, independent of the order of `a`.
The data produced by this method can be recovered using the function
fromfile().
Parameters
----------
fid : file or str
An open file object, or a string containing a filename.
sep : str
Separator between array items for text output.
If "" (empty), a binary file is written, equivalent to
``file.write(a.tobytes())``.
format : str
Format string for text file output.
Each entry in the array is formatted to text by first converting
it to the closest Python type, and then using "format" % item.
Notes
-----
This is a convenience function for quick storage of array data.
Information on endianness and precision is lost, so this method is not a
good choice for files intended to archive data or transport data between
machines with different endianness. Some of these problems can be overcome
by outputting the data as text files, at the expense of speed and file
size.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
"""
a.tolist()
Return the array as a (possibly nested) list.
Return a copy of the array data as a (nested) Python list.
Data items are converted to the nearest compatible Python type.
Parameters
----------
none
Returns
-------
y : list
The possibly nested list of array elements.
Notes
-----
The array may be recreated, ``a = np.array(a.tolist())``.
Examples
--------
>>> a = np.array([1, 2])
>>> a.tolist()
[1, 2]
>>> a = np.array([[1, 2], [3, 4]])
>>> list(a)
[array([1, 2]), array([3, 4])]
>>> a.tolist()
[[1, 2], [3, 4]]
"""))
tobytesdoc = """
a.{name}(order='C')
Construct Python bytes containing the raw data bytes in the array.
Constructs Python bytes showing a copy of the raw contents of
data memory. The bytes object can be produced in either 'C' or 'Fortran',
or 'Any' order (the default is 'C'-order). 'Any' order means C-order
unless the F_CONTIGUOUS flag in the array is set, in which case it
means 'Fortran' order.
{deprecated}
Parameters
----------
order : {{'C', 'F', None}}, optional
Order of the data for multidimensional arrays:
C, Fortran, or the same as for the original array.
Returns
-------
s : bytes
Python bytes exhibiting a copy of `a`'s raw data.
Examples
--------
>>> x = np.array([[0, 1], [2, 3]])
>>> x.tobytes()
b'\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00'
>>> x.tobytes('C') == x.tobytes()
True
>>> x.tobytes('F')
b'\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00'
"""
add_newdoc('numpy.core.multiarray', 'ndarray',
('tostring', tobytesdoc.format(name='tostring',
deprecated=
'This function is a compatibility '
'alias for tobytes. Despite its '
'name it returns bytes not '
'strings.')))
add_newdoc('numpy.core.multiarray', 'ndarray',
('tobytes', tobytesdoc.format(name='tobytes',
deprecated='.. versionadded:: 1.9.0')))
add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
"""
a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
Return the sum along diagonals of the array.
Refer to `numpy.trace` for full documentation.
See Also
--------
numpy.trace : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
"""
a.transpose(*axes)
Returns a view of the array with axes transposed.
For a 1-D array, this has no effect. (To change between column and
row vectors, first cast the 1-D array into a matrix object.)
For a 2-D array, this is the usual matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted (see Examples). If axes are not provided and
``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then
``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
Parameters
----------
axes : None, tuple of ints, or `n` ints
* None or no argument: reverses the order of the axes.
* tuple of ints: `i` in the `j`-th place in the tuple means `a`'s
`i`-th axis becomes `a.transpose()`'s `j`-th axis.
* `n` ints: same as an n-tuple of the same ints (this form is
intended simply as a "convenience" alternative to the tuple form)
Returns
-------
out : ndarray
View of `a`, with axes suitably permuted.
See Also
--------
ndarray.T : Array property returning the array transposed.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> a
array([[1, 2],
[3, 4]])
>>> a.transpose()
array([[1, 3],
[2, 4]])
>>> a.transpose((1, 0))
array([[1, 3],
[2, 4]])
>>> a.transpose(1, 0)
array([[1, 3],
[2, 4]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
"""
a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the variance of the array elements, along given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
"""
a.view(dtype=None, type=None)
New view of array with the same data.
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16. The
default, None, results in the view having the same data-type as `a`.
This argument can also be specified as an ndarray sub-class, which
then specifies the type of the returned object (this is equivalent to
setting the ``type`` parameter).
type : Python type, optional
Type of the returned view, e.g., ndarray or matrix. Again, the
default None results in type preservation.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a
regular array to a structured array), then the behavior of the view
cannot be predicted just from the superficial appearance of ``a`` (shown
by ``print(a)``). It also depends on exactly how ``a`` is stored in
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
defined as a slice or transpose, etc., the view may give different
results.
Examples
--------
>>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
Viewing array data using a different type and dtype:
>>> y = x.view(dtype=np.int16, type=np.matrix)
>>> y
matrix([[513]], dtype=int16)
>>> print(type(y))
<class 'numpy.matrixlib.defmatrix.matrix'>
Creating a view on a structured array so it can be used in calculations
>>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)])
>>> xv = x.view(dtype=np.int8).reshape(-1,2)
>>> xv
array([[1, 2],
[3, 4]], dtype=int8)
>>> xv.mean(0)
array([ 2., 3.])
Making changes to the view changes the underlying array
>>> xv[0,1] = 20
>>> print(x)
[(1, 20) (3, 4)]
Using a view to convert an array to a recarray:
>>> z = x.view(np.recarray)
>>> z.a
array([1], dtype=int8)
Views share data:
>>> x[0] = (9, 10)
>>> z[0]
(9, 10)
Views that change the dtype size (bytes per entry) should normally be
avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
>>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16)
>>> y = x[:, 0:2]
>>> y
array([[1, 2],
[4, 5]], dtype=int16)
>>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: new type not compatible with array.
>>> z = y.copy()
>>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
array([[(1, 2)],
[(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')])
"""))
##############################################################################
#
# umath functions
#
##############################################################################
add_newdoc('numpy.core.umath', 'frompyfunc',
"""
frompyfunc(func, nin, nout)
Takes an arbitrary Python function and returns a NumPy ufunc.
Can be used, for example, to add broadcasting to a built-in Python
function (see Examples section).
Parameters
----------
func : Python function object
An arbitrary Python function.
nin : int
The number of input arguments.
nout : int
The number of objects returned by `func`.
Returns
-------
out : ufunc
Returns a NumPy universal function (``ufunc``) object.
See Also
--------
vectorize : evaluates pyfunc over input arrays using broadcasting rules of numpy
Notes
-----
The returned ufunc always returns PyObject arrays.
Examples
--------
Use frompyfunc to add broadcasting to the Python function ``oct``:
>>> oct_array = np.frompyfunc(oct, 1, 1)
>>> oct_array(np.array((10, 30, 100)))
array([012, 036, 0144], dtype=object)
>>> np.array((oct(10), oct(30), oct(100))) # for comparison
array(['012', '036', '0144'],
dtype='|S4')
""")
add_newdoc('numpy.core.umath', 'geterrobj',
"""
geterrobj()
Return the current object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `geterrobj` is used internally by the other
functions that get and set error handling behavior (`geterr`, `seterr`,
`geterrcall`, `seterrcall`).
Returns
-------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
seterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrobj() # first get the defaults
[10000, 0, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> old_bufsize = np.setbufsize(20000)
>>> old_err = np.seterr(divide='raise')
>>> old_handler = np.seterrcall(err_handler)
>>> np.geterrobj()
[20000, 2, <function err_handler at 0x91dcaac>]
>>> old_err = np.seterr(all='ignore')
>>> np.base_repr(np.geterrobj()[1], 8)
'0'
>>> old_err = np.seterr(divide='warn', over='log', under='call',
invalid='print')
>>> np.base_repr(np.geterrobj()[1], 8)
'4351'
""")
add_newdoc('numpy.core.umath', 'seterrobj',
"""
seterrobj(errobj)
Set the object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `seterrobj` is used internally by the other
functions that set error handling behavior (`seterr`, `seterrcall`).
Parameters
----------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
geterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> old_errobj = np.geterrobj() # first get the defaults
>>> old_errobj
[10000, 0, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> new_errobj = [20000, 12, err_handler]
>>> np.seterrobj(new_errobj)
>>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn')
'14'
>>> np.geterr()
{'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.geterrcall() is err_handler
True
""")
##############################################################################
#
# compiled_base functions
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'digitize',
"""
digitize(x, bins, right=False)
Return the indices of the bins to which each value in input array belongs.
Each index ``i`` returned is such that ``bins[i-1] <= x < bins[i]`` if
`bins` is monotonically increasing, or ``bins[i-1] > x >= bins[i]`` if
`bins` is monotonically decreasing. If values in `x` are beyond the
bounds of `bins`, 0 or ``len(bins)`` is returned as appropriate. If right
is True, then the right bin is closed so that the index ``i`` is such
that ``bins[i-1] < x <= bins[i]`` or ``bins[i-1] >= x > bins[i]`` if `bins`
is monotonically increasing or decreasing, respectively.
Parameters
----------
x : array_like
Input array to be binned. Prior to NumPy 1.10.0, this array had to
be 1-dimensional, but can now have any shape.
bins : array_like
Array of bins. It has to be 1-dimensional and monotonic.
right : bool, optional
Indicating whether the intervals include the right or the left bin
edge. Default behavior is (right==False) indicating that the interval
does not include the right edge. The left bin end is open in this
case, i.e., bins[i-1] <= x < bins[i] is the default behavior for
monotonically increasing bins.
Returns
-------
out : ndarray of ints
Output array of indices, of same shape as `x`.
Raises
------
ValueError
If `bins` is not monotonic.
TypeError
If the type of the input is complex.
See Also
--------
bincount, histogram, unique, searchsorted
Notes
-----
If values in `x` are such that they fall outside the bin range,
attempting to index `bins` with the indices that `digitize` returns
will result in an IndexError.
.. versionadded:: 1.10.0
`np.digitize` is implemented in terms of `np.searchsorted`. This means
that a binary search is used to bin the values, which scales much better
for larger number of bins than the previous linear search. It also removes
the requirement for the input array to be 1-dimensional.
Examples
--------
>>> x = np.array([0.2, 6.4, 3.0, 1.6])
>>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
>>> inds = np.digitize(x, bins)
>>> inds
array([1, 4, 3, 2])
>>> for n in range(x.size):
... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]])
...
0.0 <= 0.2 < 1.0
4.0 <= 6.4 < 10.0
2.5 <= 3.0 < 4.0
1.0 <= 1.6 < 2.5
>>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.])
>>> bins = np.array([0, 5, 10, 15, 20])
>>> np.digitize(x,bins,right=True)
array([1, 2, 3, 4, 4])
>>> np.digitize(x,bins,right=False)
array([1, 3, 3, 4, 5])
""")
add_newdoc('numpy.core.multiarray', 'bincount',
"""
bincount(x, weights=None, minlength=0)
Count number of occurrences of each value in array of non-negative ints.
The number of bins (of size 1) is one larger than the largest value in
`x`. If `minlength` is specified, there will be at least this number
of bins in the output array (though it will be longer if necessary,
depending on the contents of `x`).
Each bin gives the number of occurrences of its index value in `x`.
If `weights` is specified the input array is weighted by it, i.e. if a
value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
of ``out[n] += 1``.
Parameters
----------
x : array_like, 1 dimension, nonnegative ints
Input array.
weights : array_like, optional
Weights, array of the same shape as `x`.
minlength : int, optional
A minimum number of bins for the output array.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray of ints
The result of binning the input array.
The length of `out` is equal to ``np.amax(x)+1``.
Raises
------
ValueError
If the input is not 1-dimensional, or contains elements with negative
values, or if `minlength` is non-positive.
TypeError
If the type of the input is float or complex.
See Also
--------
histogram, digitize, unique
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
The input array needs to be of integer dtype, otherwise a
TypeError is raised:
>>> np.bincount(np.arange(5, dtype=np.float))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: array cannot be safely cast to required type
A possible use of ``bincount`` is to perform sums over
variable-size chunks of an array, using the ``weights`` keyword.
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
""")
add_newdoc('numpy.core.multiarray', 'ravel_multi_index',
"""
ravel_multi_index(multi_index, dims, mode='raise', order='C')
Converts a tuple of index arrays into an array of flat
indices, applying boundary modes to the multi-index.
Parameters
----------
multi_index : tuple of array_like
A tuple of integer arrays, one array for each dimension.
dims : tuple of ints
The shape of array into which the indices from ``multi_index`` apply.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices are handled. Can specify
either one mode or a tuple of modes, one mode per index.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
In 'clip' mode, a negative index which would normally
wrap will clip to 0 instead.
order : {'C', 'F'}, optional
Determines whether the multi-index should be viewed as
indexing in row-major (C-style) or column-major
(Fortran-style) order.
Returns
-------
raveled_indices : ndarray
An array of indices into the flattened version of an array
of dimensions ``dims``.
See Also
--------
unravel_index
Notes
-----
.. versionadded:: 1.6.0
Examples
--------
>>> arr = np.array([[3,6,6],[4,5,1]])
>>> np.ravel_multi_index(arr, (7,6))
array([22, 41, 37])
>>> np.ravel_multi_index(arr, (7,6), order='F')
array([31, 41, 13])
>>> np.ravel_multi_index(arr, (4,6), mode='clip')
array([22, 23, 19])
>>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
array([12, 13, 13])
>>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
1621
""")
add_newdoc('numpy.core.multiarray', 'unravel_index',
"""
unravel_index(indices, dims, order='C')
Converts a flat index or array of flat indices into a tuple
of coordinate arrays.
Parameters
----------
indices : array_like
An integer array whose elements are indices into the flattened
version of an array of dimensions ``dims``. Before version 1.6.0,
this function accepted just one index value.
dims : tuple of ints
The shape of the array to use for unraveling ``indices``.
order : {'C', 'F'}, optional
Determines whether the indices should be viewed as indexing in
row-major (C-style) or column-major (Fortran-style) order.
.. versionadded:: 1.6.0
Returns
-------
unraveled_coords : tuple of ndarray
Each array in the tuple has the same shape as the ``indices``
array.
See Also
--------
ravel_multi_index
Examples
--------
>>> np.unravel_index([22, 41, 37], (7,6))
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index([31, 41, 13], (7,6), order='F')
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index(1621, (6,7,8,9))
(3, 1, 4, 1)
""")
add_newdoc('numpy.core.multiarray', 'add_docstring',
"""
add_docstring(obj, docstring)
Add a docstring to a built-in obj if possible.
If the obj already has a docstring raise a RuntimeError
If this routine does not know how to add a docstring to the object
raise a TypeError
""")
add_newdoc('numpy.core.umath', '_add_newdoc_ufunc',
"""
add_ufunc_docstring(ufunc, new_docstring)
Replace the docstring for a ufunc with new_docstring.
This method will only work if the current docstring for
the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.)
Parameters
----------
ufunc : numpy.ufunc
A ufunc whose current doc is NULL.
new_docstring : string
The new docstring for the ufunc.
Notes
-----
This method allocates memory for new_docstring on
the heap. Technically this creates a mempory leak, since this
memory will not be reclaimed until the end of the program
even if the ufunc itself is removed. However this will only
be a problem if the user is repeatedly creating ufuncs with
no documentation, adding documentation via add_newdoc_ufunc,
and then throwing away the ufunc.
""")
add_newdoc('numpy.core.multiarray', 'packbits',
"""
packbits(myarray, axis=None)
Packs the elements of a binary-valued array into bits in a uint8 array.
The result is padded to full bytes by inserting zero bits at the end.
Parameters
----------
myarray : array_like
An array of integers or booleans whose elements should be packed to
bits.
axis : int, optional
The dimension over which bit-packing is done.
``None`` implies packing the flattened array.
Returns
-------
packed : ndarray
Array of type uint8 whose elements represent bits corresponding to the
logical (0 or nonzero) value of the input elements. The shape of
`packed` has the same number of dimensions as the input (unless `axis`
is None, in which case the output is 1-D).
See Also
--------
unpackbits: Unpacks elements of a uint8 array into a binary-valued output
array.
Examples
--------
>>> a = np.array([[[1,0,1],
... [0,1,0]],
... [[1,1,0],
... [0,0,1]]])
>>> b = np.packbits(a, axis=-1)
>>> b
array([[[160],[64]],[[192],[32]]], dtype=uint8)
Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
and 32 = 0010 0000.
""")
add_newdoc('numpy.core.multiarray', 'unpackbits',
"""
unpackbits(myarray, axis=None)
Unpacks elements of a uint8 array into a binary-valued output array.
Each element of `myarray` represents a bit-field that should be unpacked
into a binary-valued output array. The shape of the output array is either
1-D (if `axis` is None) or the same shape as the input array with unpacking
done along the axis specified.
Parameters
----------
myarray : ndarray, uint8 type
Input array.
axis : int, optional
Unpacks along this axis.
Returns
-------
unpacked : ndarray, uint8 type
The elements are binary-valued (0 or 1).
See Also
--------
packbits : Packs the elements of a binary-valued array into bits in a uint8
array.
Examples
--------
>>> a = np.array([[2], [7], [23]], dtype=np.uint8)
>>> a
array([[ 2],
[ 7],
[23]], dtype=uint8)
>>> b = np.unpackbits(a, axis=1)
>>> b
array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
""")
##############################################################################
#
# Documentation for ufunc attributes and methods
#
##############################################################################
##############################################################################
#
# ufunc object
#
##############################################################################
add_newdoc('numpy.core', 'ufunc',
"""
Functions that operate element by element on whole arrays.
To see the documentation for a specific ufunc, use np.info(). For
example, np.info(np.sin). Because ufuncs are written in C
(for speed) and linked into Python with NumPy's ufunc facility,
Python's help() function finds this page whenever help() is called
on a ufunc.
A detailed explanation of ufuncs can be found in the "ufuncs.rst"
file in the NumPy reference guide.
Unary ufuncs:
=============
op(X, out=None)
Apply op to X elementwise
Parameters
----------
X : array_like
Input array.
out : array_like
An array to store the output. Must be the same shape as `X`.
Returns
-------
r : array_like
`r` will have the same shape as `X`; if out is provided, `r`
will be equal to out.
Binary ufuncs:
==============
op(X, Y, out=None)
Apply `op` to `X` and `Y` elementwise. May "broadcast" to make
the shapes of `X` and `Y` congruent.
The broadcasting rules are:
* Dimensions of length 1 may be prepended to either array.
* Arrays may be repeated along dimensions of length 1.
Parameters
----------
X : array_like
First input array.
Y : array_like
Second input array.
out : array_like
An array to store the output. Must be the same shape as the
output would have.
Returns
-------
r : array_like
The return value; if out is provided, `r` will be equal to out.
""")
##############################################################################
#
# ufunc attributes
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('identity',
"""
The identity value.
Data attribute containing the identity element for the ufunc, if it has one.
If it does not, the attribute value is None.
Examples
--------
>>> np.add.identity
0
>>> np.multiply.identity
1
>>> np.power.identity
1
>>> print(np.exp.identity)
None
"""))
add_newdoc('numpy.core', 'ufunc', ('nargs',
"""
The number of arguments.
Data attribute containing the number of arguments the ufunc takes, including
optional ones.
Notes
-----
Typically this value will be one more than what you might expect because all
ufuncs take the optional "out" argument.
Examples
--------
>>> np.add.nargs
3
>>> np.multiply.nargs
3
>>> np.power.nargs
3
>>> np.exp.nargs
2
"""))
add_newdoc('numpy.core', 'ufunc', ('nin',
"""
The number of inputs.
Data attribute containing the number of arguments the ufunc treats as input.
Examples
--------
>>> np.add.nin
2
>>> np.multiply.nin
2
>>> np.power.nin
2
>>> np.exp.nin
1
"""))
add_newdoc('numpy.core', 'ufunc', ('nout',
"""
The number of outputs.
Data attribute containing the number of arguments the ufunc treats as output.
Notes
-----
Since all ufuncs can take output arguments, this will always be (at least) 1.
Examples
--------
>>> np.add.nout
1
>>> np.multiply.nout
1
>>> np.power.nout
1
>>> np.exp.nout
1
"""))
add_newdoc('numpy.core', 'ufunc', ('ntypes',
"""
The number of types.
The number of numerical NumPy types - of which there are 18 total - on which
the ufunc can operate.
See Also
--------
numpy.ufunc.types
Examples
--------
>>> np.add.ntypes
18
>>> np.multiply.ntypes
18
>>> np.power.ntypes
17
>>> np.exp.ntypes
7
>>> np.remainder.ntypes
14
"""))
add_newdoc('numpy.core', 'ufunc', ('types',
"""
Returns a list with types grouped input->output.
Data attribute listing the data-type "Domain-Range" groupings the ufunc can
deliver. The data-types are given using the character codes.
See Also
--------
numpy.ufunc.ntypes
Examples
--------
>>> np.add.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.multiply.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.power.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G',
'OO->O']
>>> np.exp.types
['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O']
>>> np.remainder.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O']
"""))
##############################################################################
#
# ufunc methods
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('reduce',
"""
reduce(a, axis=0, dtype=None, out=None, keepdims=False)
Reduces `a`'s dimension by one, by applying ufunc along one axis.
Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
:math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
the result of iterating `j` over :math:`range(N_i)`, cumulatively applying
ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
For a one-dimensional array, reduce produces results equivalent to:
::
r = op.identity # op = ufunc
for i in range(len(A)):
r = op(r, A[i])
return r
For example, add.reduce() is equivalent to sum().
Parameters
----------
a : array_like
The array to act on.
axis : None or int or tuple of ints, optional
Axis or axes along which a reduction is performed.
The default (`axis` = 0) is perform a reduction over the first
dimension of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is `None`, a reduction is performed over all the axes.
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
For operations which are either not commutative or not associative,
doing a reduction over multiple axes is not well-defined. The
ufuncs do not currently raise an exception in this case, but will
likely do so in the future.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data-type of the output array if this is provided, or
the data-type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided, a
freshly-allocated array is returned.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.7.0
Returns
-------
r : ndarray
The reduced array. If `out` was supplied, `r` is a reference to it.
Examples
--------
>>> np.multiply.reduce([2,3,5])
30
A multi-dimensional array example:
>>> X = np.arange(8).reshape((2,2,2))
>>> X
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.add.reduce(X, 0)
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X) # confirm: default axis value is 0
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X, 1)
array([[ 2, 4],
[10, 12]])
>>> np.add.reduce(X, 2)
array([[ 1, 5],
[ 9, 13]])
"""))
add_newdoc('numpy.core', 'ufunc', ('accumulate',
"""
accumulate(array, axis=0, dtype=None, out=None, keepdims=None)
Accumulate the result of applying the operator to all elements.
For a one-dimensional array, accumulate produces results equivalent to::
r = np.empty(len(A))
t = op.identity # op = the ufunc being applied to A's elements
for i in range(len(A)):
t = op(t, A[i])
r[i] = t
return r
For example, add.accumulate() is equivalent to np.cumsum().
For a multi-dimensional array, accumulate is applied along only one
axis (axis zero by default; see Examples below) so repeated use is
necessary if one wants to accumulate over multiple axes.
Parameters
----------
array : array_like
The array to act on.
axis : int, optional
The axis along which to apply the accumulation; default is zero.
dtype : data-type code, optional
The data-type used to represent the intermediate results. Defaults
to the data-type of the output array if such is provided, or the
the data-type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided a
freshly-allocated array is returned.
keepdims : bool
Has no effect. Deprecated, and will be removed in future.
Returns
-------
r : ndarray
The accumulated values. If `out` was supplied, `r` is a reference to
`out`.
Examples
--------
1-D array examples:
>>> np.add.accumulate([2, 3, 5])
array([ 2, 5, 10])
>>> np.multiply.accumulate([2, 3, 5])
array([ 2, 6, 30])
2-D array examples:
>>> I = np.eye(2)
>>> I
array([[ 1., 0.],
[ 0., 1.]])
Accumulate along axis 0 (rows), down columns:
>>> np.add.accumulate(I, 0)
array([[ 1., 0.],
[ 1., 1.]])
>>> np.add.accumulate(I) # no axis specified = axis zero
array([[ 1., 0.],
[ 1., 1.]])
Accumulate along axis 1 (columns), through rows:
>>> np.add.accumulate(I, 1)
array([[ 1., 1.],
[ 0., 1.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('reduceat',
"""
reduceat(a, indices, axis=0, dtype=None, out=None)
Performs a (local) reduce with specified slices over a single axis.
For i in ``range(len(indices))``, `reduceat` computes
``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th
generalized "row" parallel to `axis` in the final result (i.e., in a
2-D array, for example, if `axis = 0`, it becomes the i-th row, but if
`axis = 1`, it becomes the i-th column). There are three exceptions to this:
* when ``i = len(indices) - 1`` (so for the last index),
``indices[i+1] = a.shape[axis]``.
* if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is
simply ``a[indices[i]]``.
* if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised.
The shape of the output depends on the size of `indices`, and may be
larger than `a` (this happens if ``len(indices) > a.shape[axis]``).
Parameters
----------
a : array_like
The array to act on.
indices : array_like
Paired indices, comma separated (not colon), specifying slices to
reduce.
axis : int, optional
The axis along which to apply the reduceat.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data type of the output array if this is provided, or
the data type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided a
freshly-allocated array is returned.
Returns
-------
r : ndarray
The reduced values. If `out` was supplied, `r` is a reference to
`out`.
Notes
-----
A descriptive example:
If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as
``ufunc.reduceat(a, indices)[::2]`` where `indices` is
``range(len(array) - 1)`` with a zero placed
in every other element:
``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``.
Don't be fooled by this attribute's name: `reduceat(a)` is not
necessarily smaller than `a`.
Examples
--------
To take the running sum of four successive values:
>>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
array([ 6, 10, 14, 18])
A 2-D example:
>>> x = np.linspace(0, 15, 16).reshape(4,4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
::
# reduce such that the result has the following five rows:
# [row1 + row2 + row3]
# [row4]
# [row2]
# [row3]
# [row1 + row2 + row3 + row4]
>>> np.add.reduceat(x, [0, 3, 1, 2, 0])
array([[ 12., 15., 18., 21.],
[ 12., 13., 14., 15.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 24., 28., 32., 36.]])
::
# reduce such that result has the following two columns:
# [col1 * col2 * col3, col4]
>>> np.multiply.reduceat(x, [0, 3], 1)
array([[ 0., 3.],
[ 120., 7.],
[ 720., 11.],
[ 2184., 15.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('outer',
"""
outer(A, B, **kwargs)
Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of
``op.outer(A, B)`` is an array of dimension M + N such that:
.. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] =
op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}])
For `A` and `B` one-dimensional, this is equivalent to::
r = empty(len(A),len(B))
for i in range(len(A)):
for j in range(len(B)):
r[i,j] = op(A[i], B[j]) # op = ufunc in question
Parameters
----------
A : array_like
First array
B : array_like
Second array
kwargs : any
Arguments to pass on to the ufunc. Typically `dtype` or `out`.
Returns
-------
r : ndarray
Output array
See Also
--------
numpy.outer
Examples
--------
>>> np.multiply.outer([1, 2, 3], [4, 5, 6])
array([[ 4, 5, 6],
[ 8, 10, 12],
[12, 15, 18]])
A multi-dimensional example:
>>> A = np.array([[1, 2, 3], [4, 5, 6]])
>>> A.shape
(2, 3)
>>> B = np.array([[1, 2, 3, 4]])
>>> B.shape
(1, 4)
>>> C = np.multiply.outer(A, B)
>>> C.shape; C
(2, 3, 1, 4)
array([[[[ 1, 2, 3, 4]],
[[ 2, 4, 6, 8]],
[[ 3, 6, 9, 12]]],
[[[ 4, 8, 12, 16]],
[[ 5, 10, 15, 20]],
[[ 6, 12, 18, 24]]]])
"""))
add_newdoc('numpy.core', 'ufunc', ('at',
"""
at(a, indices, b=None)
Performs unbuffered in place operation on operand 'a' for elements
specified by 'indices'. For addition ufunc, this method is equivalent to
`a[indices] += b`, except that results are accumulated for elements that
are indexed more than once. For example, `a[[0,0]] += 1` will only
increment the first element once because of buffering, whereas
`add.at(a, [0,0], 1)` will increment the first element twice.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
The array to perform in place operation on.
indices : array_like or tuple
Array like index object or slice object for indexing into first
operand. If first operand has multiple dimensions, indices can be a
tuple of array like index objects or slice objects.
b : array_like
Second operand for ufuncs requiring two operands. Operand must be
broadcastable over first operand after indexing or slicing.
Examples
--------
Set items 0 and 1 to their negative values:
>>> a = np.array([1, 2, 3, 4])
>>> np.negative.at(a, [0, 1])
>>> print(a)
array([-1, -2, 3, 4])
::
Increment items 0 and 1, and increment item 2 twice:
>>> a = np.array([1, 2, 3, 4])
>>> np.add.at(a, [0, 1, 2, 2], 1)
>>> print(a)
array([2, 3, 5, 4])
::
Add items 0 and 1 in first array to second array,
and store results in first array:
>>> a = np.array([1, 2, 3, 4])
>>> b = np.array([1, 2])
>>> np.add.at(a, [0, 1], b)
>>> print(a)
array([2, 4, 3, 4])
"""))
##############################################################################
#
# Documentation for dtype attributes and methods
#
##############################################################################
##############################################################################
#
# dtype object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype',
"""
dtype(obj, align=False, copy=False)
Create a data type object.
A numpy array is homogeneous, and contains elements described by a
dtype object. A dtype object can be constructed from different
combinations of fundamental numeric types.
Parameters
----------
obj
Object to be converted to a data type object.
align : bool, optional
Add padding to the fields to match what a C compiler would output
for a similar C-struct. Can be ``True`` only if `obj` is a dictionary
or a comma-separated string. If a struct dtype is being created,
this also sets a sticky alignment flag ``isalignedstruct``.
copy : bool, optional
Make a new copy of the data-type object. If ``False``, the result
may just be a reference to a built-in data-type object.
See also
--------
result_type
Examples
--------
Using array-scalar type:
>>> np.dtype(np.int16)
dtype('int16')
Structured type, one field name 'f1', containing int16:
>>> np.dtype([('f1', np.int16)])
dtype([('f1', '<i2')])
Structured type, one field named 'f1', in itself containing a structured
type with one field:
>>> np.dtype([('f1', [('f1', np.int16)])])
dtype([('f1', [('f1', '<i2')])])
Structured type, two fields: the first field contains an unsigned int, the
second an int32:
>>> np.dtype([('f1', np.uint), ('f2', np.int32)])
dtype([('f1', '<u4'), ('f2', '<i4')])
Using array-protocol type strings:
>>> np.dtype([('a','f8'),('b','S10')])
dtype([('a', '<f8'), ('b', '|S10')])
Using comma-separated field formats. The shape is (2,3):
>>> np.dtype("i4, (2,3)f8")
dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))])
Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void``
is a flexible type, here of size 10:
>>> np.dtype([('hello',(np.int,3)),('world',np.void,10)])
dtype([('hello', '<i4', 3), ('world', '|V10')])
Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are
the offsets in bytes:
>>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
dtype(('<i2', [('x', '|i1'), ('y', '|i1')]))
Using dictionaries. Two fields named 'gender' and 'age':
>>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
dtype([('gender', '|S1'), ('age', '|u1')])
Offsets in bytes, here 0 and 25:
>>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
dtype([('surname', '|S25'), ('age', '|u1')])
""")
##############################################################################
#
# dtype attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('alignment',
"""
The required alignment (bytes) of this data-type according to the compiler.
More information is available in the C-API section of the manual.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder',
"""
A character indicating the byte-order of this data-type object.
One of:
=== ==============
'=' native
'<' little-endian
'>' big-endian
'|' not applicable
=== ==============
All built-in data-type objects have byteorder either '=' or '|'.
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.byteorder
'='
>>> # endian is not relevant for 8 bit numbers
>>> np.dtype('i1').byteorder
'|'
>>> # or ASCII strings
>>> np.dtype('S2').byteorder
'|'
>>> # Even if specific code is given, and it is native
>>> # '=' is the byteorder
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> dt = np.dtype(native_code + 'i2')
>>> dt.byteorder
'='
>>> # Swapped code shows up as itself
>>> dt = np.dtype(swapped_code + 'i2')
>>> dt.byteorder == swapped_code
True
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('char',
"""A unique character code for each of the 21 different built-in types."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
"""
PEP3118 interface description of the data-type.
The format is that required by the 'descr' key in the
PEP3118 `__array_interface__` attribute.
Warning: This attribute exists specifically for PEP3118 compliance, and
is not a datatype description compatible with `np.dtype`.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
"""
Dictionary of named fields defined for this data type, or ``None``.
The dictionary is indexed by keys that are the names of the fields.
Each entry in the dictionary is a tuple fully describing the field::
(dtype, offset[, title])
If present, the optional title can be any object (if it is a string
or unicode then it will also be a key in the fields dictionary,
otherwise it's meta-data). Notice also that the first two elements
of the tuple can be passed directly as arguments to the ``ndarray.getfield``
and ``ndarray.setfield`` methods.
See Also
--------
ndarray.getfield, ndarray.setfield
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> print(dt.fields)
{'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)}
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('flags',
"""
Bit-flags describing how this data type is to be interpreted.
Bit-masks are in `numpy.core.multiarray` as the constants
`ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`,
`NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation
of these flags is in C-API documentation; they are largely useful
for user-defined data-types.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject',
"""
Boolean indicating whether this dtype contains any reference-counted
objects in any fields or sub-dtypes.
Recall that what is actually in the ndarray memory representing
the Python object is the memory address of that object (a pointer).
Special handling may be required, and this attribute is useful for
distinguishing data types that may contain arbitrary Python objects
and data-types that won't.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin',
"""
Integer indicating how this dtype relates to the built-in dtypes.
Read-only.
= ========================================================================
0 if this is a structured array type, with fields
1 if this is a dtype compiled into numpy (such as ints, floats etc)
2 if the dtype is for a user-defined numpy type
A user-defined type uses the numpy C-API machinery to extend
numpy to handle a new array type. See
:ref:`user.user-defined-data-types` in the NumPy manual.
= ========================================================================
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.isbuiltin
1
>>> dt = np.dtype('f8')
>>> dt.isbuiltin
1
>>> dt = np.dtype([('field1', 'f8')])
>>> dt.isbuiltin
0
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isnative',
"""
Boolean indicating whether the byte order of this dtype is native
to the platform.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct',
"""
Boolean indicating whether the dtype is a struct which maintains
field alignment. This flag is sticky, so when combining multiple
structs together, it is preserved and produces new dtypes which
are also aligned.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
"""
The element size of this data-type object.
For 18 of the 21 types this number is fixed by the data-type.
For the flexible data-types, this number can be anything.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
"""
A character code (one of 'biufcmMOSUV') identifying the general kind of data.
= ======================
b boolean
i signed integer
u unsigned integer
f floating-point
c complex floating-point
m timedelta
M datetime
O object
S (byte-)string
U Unicode
V void
= ======================
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('name',
"""
A bit-width name for this data-type.
Un-sized flexible data-type objects do not have this attribute.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('names',
"""
Ordered list of field names, or ``None`` if there are no fields.
The names are ordered according to increasing byte offset. This can be
used, for example, to walk through all of the named fields in offset order.
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.names
('name', 'grades')
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('num',
"""
A unique number for each of the 21 different built-in types.
These are roughly ordered from least-to-most precision.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('shape',
"""
Shape tuple of the sub-array if this data type describes a sub-array,
and ``()`` otherwise.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('ndim',
"""
Number of dimensions of the sub-array if this data type describes a
sub-array, and ``0`` otherwise.
.. versionadded:: 1.13.0
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('str',
"""The array-protocol typestring of this data-type object."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype',
"""
Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and
None otherwise.
The *shape* is the fixed shape of the sub-array described by this
data type, and *item_dtype* the data type of the array.
If a field whose dtype object has this attribute is retrieved,
then the extra dimensions implied by *shape* are tacked on to
the end of the retrieved array.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('type',
"""The type object used to instantiate a scalar of this data-type."""))
##############################################################################
#
# dtype methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new dtype with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. The default value ('S') results in swapping the current
byte order. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The code does a case-insensitive check on the first letter of
`new_order` for these alternatives. For example, any of '>'
or 'B' or 'b' or 'brian' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New dtype object with the given change to the byte order.
Notes
-----
Changes are also made in all fields and sub-arrays of the data type.
Examples
--------
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> native_dt = np.dtype(native_code+'i2')
>>> swapped_dt = np.dtype(swapped_code+'i2')
>>> native_dt.newbyteorder('S') == swapped_dt
True
>>> native_dt.newbyteorder() == swapped_dt
True
>>> native_dt == swapped_dt.newbyteorder('S')
True
>>> native_dt == swapped_dt.newbyteorder('=')
True
>>> native_dt == swapped_dt.newbyteorder('N')
True
>>> native_dt == native_dt.newbyteorder('|')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('<')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('L')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('>')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('B')
True
"""))
##############################################################################
#
# Datetime-related Methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'busdaycalendar',
"""
busdaycalendar(weekmask='1111100', holidays=None)
A business day calendar object that efficiently stores information
defining valid days for the busday family of functions.
The default valid days are Monday through Friday ("business days").
A busdaycalendar object can be specified with any set of weekly
valid days, plus an optional "holiday" dates that always will be invalid.
Once a busdaycalendar object is created, the weekmask and holidays
cannot be modified.
.. versionadded:: 1.7.0
Parameters
----------
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates, no matter which
weekday they fall upon. Holiday dates may be specified in any
order, and NaT (not-a-time) dates are ignored. This list is
saved in a normalized form that is suited for fast calculations
of valid days.
Returns
-------
out : busdaycalendar
A business day calendar object containing the specified
weekmask and holidays values.
See Also
--------
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Attributes
----------
Note: once a busdaycalendar object is created, you cannot modify the
weekmask or holidays. The attributes return copies of internal data.
weekmask : (copy) seven-element array of bool
holidays : (copy) sorted array of datetime64[D]
Examples
--------
>>> # Some important days in July
... bdd = np.busdaycalendar(
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
>>> # Default is Monday to Friday weekdays
... bdd.weekmask
array([ True, True, True, True, True, False, False], dtype='bool')
>>> # Any holidays already on the weekend are removed
... bdd.holidays
array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]')
""")
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask',
"""A copy of the seven-element boolean mask indicating valid days."""))
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays',
"""A copy of the holiday array indicating additional invalid days."""))
add_newdoc('numpy.core.multiarray', 'is_busday',
"""
is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None)
Calculates which of the given dates are valid days, and which are not.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of bool, optional
If provided, this array is filled with the result.
Returns
-------
out : array of bool
An array with the same shape as ``dates``, containing True for
each valid day, and False for each invalid day.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # The weekdays are Friday, Saturday, and Monday
... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
array([False, False, True], dtype='bool')
""")
add_newdoc('numpy.core.multiarray', 'busday_offset',
"""
busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None)
First adjusts the date to fall on a valid day according to
the ``roll`` rule, then applies offsets to the given dates
counted in valid days.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
offsets : array_like of int
The array of offsets, which is broadcast with ``dates``.
roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional
How to treat dates that do not fall on a valid day. The default
is 'raise'.
* 'raise' means to raise an exception for an invalid day.
* 'nat' means to return a NaT (not-a-time) for an invalid day.
* 'forward' and 'following' mean to take the first valid day
later in time.
* 'backward' and 'preceding' mean to take the first valid day
earlier in time.
* 'modifiedfollowing' means to take the first valid day
later in time unless it is across a Month boundary, in which
case to take the first valid day earlier in time.
* 'modifiedpreceding' means to take the first valid day
earlier in time unless it is across a Month boundary, in which
case to take the first valid day later in time.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of datetime64[D], optional
If provided, this array is filled with the result.
Returns
-------
out : array of datetime64[D]
An array with a shape from broadcasting ``dates`` and ``offsets``
together, containing the dates with offsets applied.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # First business day in October 2011 (not accounting for holidays)
... np.busday_offset('2011-10', 0, roll='forward')
numpy.datetime64('2011-10-03','D')
>>> # Last business day in February 2012 (not accounting for holidays)
... np.busday_offset('2012-03', -1, roll='forward')
numpy.datetime64('2012-02-29','D')
>>> # Third Wednesday in January 2011
... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')
numpy.datetime64('2011-01-19','D')
>>> # 2012 Mother's Day in Canada and the U.S.
... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')
numpy.datetime64('2012-05-13','D')
>>> # First business day on or after a date
... np.busday_offset('2011-03-20', 0, roll='forward')
numpy.datetime64('2011-03-21','D')
>>> np.busday_offset('2011-03-22', 0, roll='forward')
numpy.datetime64('2011-03-22','D')
>>> # First business day after a date
... np.busday_offset('2011-03-20', 1, roll='backward')
numpy.datetime64('2011-03-21','D')
>>> np.busday_offset('2011-03-22', 1, roll='backward')
numpy.datetime64('2011-03-23','D')
""")
add_newdoc('numpy.core.multiarray', 'busday_count',
"""
busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None)
Counts the number of valid days between `begindates` and
`enddates`, not including the day of `enddates`.
If ``enddates`` specifies a date value that is earlier than the
corresponding ``begindates`` date value, the count will be negative.
.. versionadded:: 1.7.0
Parameters
----------
begindates : array_like of datetime64[D]
The array of the first dates for counting.
enddates : array_like of datetime64[D]
The array of the end dates for counting, which are excluded
from the count themselves.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of int, optional
If provided, this array is filled with the result.
Returns
-------
out : array of int
An array with a shape from broadcasting ``begindates`` and ``enddates``
together, containing the number of valid days between
the begin and end dates.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
Examples
--------
>>> # Number of weekdays in January 2011
... np.busday_count('2011-01', '2011-02')
21
>>> # Number of weekdays in 2011
... np.busday_count('2011', '2012')
260
>>> # Number of Saturdays in 2011
... np.busday_count('2011', '2012', weekmask='Sat')
53
""")
add_newdoc('numpy.core.multiarray', 'normalize_axis_index',
"""
normalize_axis_index(axis, ndim, msg_prefix=None)
Normalizes an axis index, `axis`, such that is a valid positive index into
the shape of array with `ndim` dimensions. Raises an AxisError with an
appropriate message if this is not possible.
Used internally by all axis-checking logic.
.. versionadded:: 1.13.0
Parameters
----------
axis : int
The un-normalized index of the axis. Can be negative
ndim : int
The number of dimensions of the array that `axis` should be normalized
against
msg_prefix : str
A prefix to put before the message, typically the name of the argument
Returns
-------
normalized_axis : int
The normalized axis index, such that `0 <= normalized_axis < ndim`
Raises
------
AxisError
If the axis index is invalid, when `-ndim <= axis < ndim` is false.
Examples
--------
>>> normalize_axis_index(0, ndim=3)
0
>>> normalize_axis_index(1, ndim=3)
1
>>> normalize_axis_index(-1, ndim=3)
2
>>> normalize_axis_index(3, ndim=3)
Traceback (most recent call last):
...
AxisError: axis 3 is out of bounds for array of dimension 3
>>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg')
Traceback (most recent call last):
...
AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3
""")
##############################################################################
#
# nd_grid instances
#
##############################################################################
add_newdoc('numpy.lib.index_tricks', 'mgrid',
"""
`nd_grid` instance which returns a dense multi-dimensional "meshgrid".
An instance of `numpy.lib.index_tricks.nd_grid` which returns an dense
(or fleshed out) mesh-grid when indexed, so that each returned argument
has the same shape. The dimensions and number of the output arrays are
equal to the number of indexing dimensions. If the step length is not a
complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then
the integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
Returns
----------
mesh-grid `ndarrays` all of the same dimensions
See Also
--------
numpy.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
ogrid : like mgrid but returns open (not fleshed out) mesh grids
r_ : array concatenator
Examples
--------
>>> np.mgrid[0:5,0:5]
array([[[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]],
[[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]])
>>> np.mgrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
""")
add_newdoc('numpy.lib.index_tricks', 'ogrid',
"""
`nd_grid` instance which returns an open multi-dimensional "meshgrid".
An instance of `numpy.lib.index_tricks.nd_grid` which returns an open
(i.e. not fleshed out) mesh-grid when indexed, so that only one dimension
of each returned array is greater than 1. The dimension and number of the
output arrays are equal to the number of indexing dimensions. If the step
length is not a complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then
the integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
Returns
----------
mesh-grid `ndarrays` with only one dimension :math:`\\neq 1`
See Also
--------
np.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids
r_ : array concatenator
Examples
--------
>>> from numpy import ogrid
>>> ogrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
>>> ogrid[0:5,0:5]
[array([[0],
[1],
[2],
[3],
[4]]), array([[0, 1, 2, 3, 4]])]
""")
##############################################################################
#
# Documentation for `generic` attributes and methods
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'generic',
"""
Base class for numpy scalar types.
Class from which most (all?) numpy scalar types are derived. For
consistency, exposes the same API as `ndarray`, despite many
consequent attributes being either "get-only," or completely irrelevant.
This is the class from which it is strongly suggested users should derive
custom scalar types.
""")
# Attributes
add_newdoc('numpy.core.numerictypes', 'generic', ('T',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('base',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('data',
"""Pointer to start of data."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dtype',
"""Get array data-descriptor."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flags',
"""The integer value of flags."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flat',
"""A 1-D view of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('imag',
"""The imaginary part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize',
"""The length of one element in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes',
"""The length of the scalar in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ndim',
"""The number of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('real',
"""The real part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('shape',
"""Tuple of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('size',
"""The number of elements in the gentype."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('strides',
"""Tuple of bytes steps in each dimension."""))
# Methods
add_newdoc('numpy.core.numerictypes', 'generic', ('all',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('any',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argmax',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argmin',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argsort',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('astype',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('choose',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('clip',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('compress',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('copy',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dump',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dumps',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('fill',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flatten',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('getfield',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('item',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemset',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('max',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('mean',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('min',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new `dtype` with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
The `new_order` code can be any from the following:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
Parameters
----------
new_order : str, optional
Byte order to force; a value from the byte order specifications
above. The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New `dtype` object with the given change to the byte order.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('prod',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ptp',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('put',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ravel',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('repeat',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('reshape',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('resize',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('round',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('setfield',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('setflags',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('sort',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('std',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('sum',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('take',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tofile',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tolist',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tostring',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('trace',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('transpose',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('var',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('view',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
##############################################################################
#
# Documentation for other scalar classes
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'bool_',
"""NumPy's Boolean type. Character code: ``?``. Alias: bool8""")
add_newdoc('numpy.core.numerictypes', 'complex64',
"""
Complex number type composed of two 32 bit floats. Character code: 'F'.
""")
add_newdoc('numpy.core.numerictypes', 'complex128',
"""
Complex number type composed of two 64 bit floats. Character code: 'D'.
Python complex compatible.
""")
add_newdoc('numpy.core.numerictypes', 'complex256',
"""
Complex number type composed of two 128-bit floats. Character code: 'G'.
""")
add_newdoc('numpy.core.numerictypes', 'float32',
"""
32-bit floating-point number. Character code 'f'. C float compatible.
""")
add_newdoc('numpy.core.numerictypes', 'float64',
"""
64-bit floating-point number. Character code 'd'. Python float compatible.
""")
add_newdoc('numpy.core.numerictypes', 'float96',
"""
""")
add_newdoc('numpy.core.numerictypes', 'float128',
"""
128-bit floating-point number. Character code: 'g'. C long float
compatible.
""")
add_newdoc('numpy.core.numerictypes', 'int8',
"""8-bit integer. Character code ``b``. C char compatible.""")
add_newdoc('numpy.core.numerictypes', 'int16',
"""16-bit integer. Character code ``h``. C short compatible.""")
add_newdoc('numpy.core.numerictypes', 'int32',
"""32-bit integer. Character code 'i'. C int compatible.""")
add_newdoc('numpy.core.numerictypes', 'int64',
"""64-bit integer. Character code 'l'. Python int compatible.""")
add_newdoc('numpy.core.numerictypes', 'object_',
"""Any Python object. Character code: 'O'.""")
|
behzadnouri/numpy
|
numpy/add_newdocs.py
|
Python
|
bsd-3-clause
| 227,238
|
[
"Brian"
] |
519f1f04c6aeab27a45b52187ae407a9ec1d88246977f239937135b797a2af8f
|
"""
Copyright (C) <2010> Autin L.
This file ePMV_git/patch/crystalCommands.py is part of ePMV.
ePMV is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ePMV is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ePMV. If not, see <http://www.gnu.org/licenses/gpl-3.0.html>.
"""
# $Header: /opt/cvs/python/packages/share1.5/Pmv/crystalCommands.py,v 1.4 2009/05/22 18:40:03 vareille Exp $
#
# $Id: crystalCommands.py,v 1.4 2009/05/22 18:40:03 vareille Exp $
#
"""
Displays Unit Cell and Packing when Crystal Info is available in input cif file.
This Code is broken now
"""
from symserv.spaceGroups import spaceGroups
import numpy
from mglutil.math.crystal import Crystal
from Pmv.mvCommand import MVCommand
from ViewerFramework.VFCommand import CommandGUI
from mglutil.gui.InputForm.Tk.gui import InputFormDescr
import Pmw, Tkinter
import tkMessageBox
from DejaVu.Box import Box
from mglutil.util.callback import CallBackFunction
def instanceMatricesFromGroup(molecule):
returnMatrices = [numpy.eye(4,4)]
crystal = Crystal(molecule.cellLength, molecule.cellAngles)
spgroup = molecule.spaceGroup.upper()
if spgroup[-1] == " ":
spgroup= spgroup[:-1]
matrices = spaceGroups[spgroup]
for matrix in matrices:
tmpMatix = numpy.eye(4,4)
tmpMatix[:3, :3] = matrix[0]
tmpMatix[:3, 3] = crystal.toCartesian(matrix[1])
returnMatrices.append(tmpMatix)
molecule.crystal = crystal
return returnMatrices
#what about usin upy instead of Tk
class CrystalCommand(MVCommand):
def guiCallback(self):
molNames = []
for mol in self.vf.Mols:
if hasattr(mol, 'spaceGroup'):
molNames.append(mol.name)
if not molNames:
tkMessageBox.showinfo("Crystal Info is Needed", "No Molecule in the Viewer has Crystal Info.")
return
ifd = InputFormDescr(title='Crystal Info')
ifd.append({'name':'moleculeList',
'widgetType':Pmw.ScrolledListBox,
'tooltip':'Select a molecule with Crystal Info.',
'wcfg':{'label_text':'Select Molecule: ',
'labelpos':'nw',
'items':molNames,
'listbox_selectmode':'single',
'listbox_exportselection':0,
'usehullsize': 1,
'hull_width':100,'hull_height':150,
'listbox_height':5},
'gridcfg':{'sticky':'nsew', 'row':1, 'column':0}})
val = self.vf.getUserInput(ifd, modal=1, blocking=1)
if val:
molecule = self.vf.getMolFromName(val['moleculeList'][0])
matrices = instanceMatricesFromGroup(molecule)
geom = molecule.geomContainer.geoms['master']
geom.Set(instanceMatrices=matrices)
if not molecule.geomContainer.geoms.has_key('Unit Cell'):
fractCoords=((1,1,0),(0,1,0),(0,0,0),(1,0,0),(1,1,1),(0,1,1),
(0,0,1),(1,0,1))
coords = []
coords = molecule.crystal.toCartesian(fractCoords)
box=Box('Unit Cell', vertices=coords)
self.vf.GUI.VIEWER.AddObject(box, parent=geom)
molecule.geomContainer.geoms['Unit Cell'] = box
ifd = InputFormDescr(title='Crystal Options')
visible = molecule.geomContainer.geoms['Unit Cell'].visible
if visible:
showState = 'active'
else:
showState = 'normal'
ifd.append({'name': 'Show Cell',
'widgetType':Tkinter.Checkbutton,
'text': 'Hide Unit Cell',
'state':showState,
'gridcfg':{'sticky':Tkinter.W},
'command': CallBackFunction(self.showUnitCell, molecule)})
ifd.append({'name': 'Show Packing',
'widgetType':Tkinter.Checkbutton,
'text': 'Hide Packing',
'state':'active',
'gridcfg':{'sticky':Tkinter.W},
'command': CallBackFunction(self.showPacking, molecule)})
val = self.vf.getUserInput(ifd, modal=0, blocking=1)
if not val:
geom.Set(instanceMatrices=[numpy.eye(4,4)])
molecule.geomContainer.geoms['Unit Cell'].Set(visible=False)
def __call__(self, nodes, **kw):
nodes = self.vf.expandNodes(nodes)
if type(nodes) is StringType:
self.nodeLogString = "'" + nodes +"'"
apply(self.doitWrapper, (nodes,), kw)
def doit(self, nodes, showPacking = False, **kw):
if nodes is None or not nodes:
return
# Check the validity of th
molecules = nodes.top.uniq()
molecule = molecules[0]
if not hasattr(mol, 'spaceGroup'):
return
matrices = instanceMatricesFromGroup(molecule)
geom = molecule.geomContainer.geoms['master']
geom.Set(instanceMatrices=matrices)#packing
if not molecule.geomContainer.geoms.has_key('Unit Cell'):
fractCoords=((1,1,0),(0,1,0),(0,0,0),(1,0,0),(1,1,1),(0,1,1),
(0,0,1),(1,0,1))
coords = []
coords = molecule.crystal.toCartesian(fractCoords)
box=Box('Unit Cell', vertices=coords)
self.vf.GUI.VIEWER.AddObject(box, parent=geom)
molecule.geomContainer.geoms['Unit Cell'] = box
def showUnitCell(self, molecule):
visible = not molecule.geomContainer.geoms['Unit Cell'].visible
molecule.geomContainer.geoms['Unit Cell'].Set(visible=visible)
def showPacking(self, molecule):
geom = molecule.geomContainer.geoms['master']
if len(geom.instanceMatricesFortran) >= 2:
geom.Set(instanceMatrices=[numpy.eye(4,4)])
else:
matrices = instanceMatricesFromGroup(molecule)
geom.Set(instanceMatrices=matrices)
CrystalCommandGUI = CommandGUI()
CrystalCommandGUI.addMenuCommand('menuRoot', 'Display', 'Crystal')
commandList = [{'name':'crystalCommand','cmd':CrystalCommand(),'gui':CrystalCommandGUI}]
def initModule(viewer):
for _dict in commandList:
viewer.addCommand(_dict['cmd'],_dict['name'],_dict['gui'])
|
corredD/ePMV
|
patch/crystalCommands.py
|
Python
|
gpl-3.0
| 6,745
|
[
"CRYSTAL"
] |
74033727bf59f8b86d8afdbfd1aab64f3a2e6f7349bcb9a52111eecdf8a1a1e9
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""tree_cable.py: A depth 10 binary tree like cable with following properties.
Depth Number Length (microns) Diameter (microns)
==========================================================================
0 1 32.0 16.0
1 2 25.4 10.08
2 4 20.16 6.35
3 8 16.0 4.0
4 16 12.7 2.52
5 32 10.08 1.587
6 64 8.0 1.0
7 128 6.35 0.63
8 256 5.04 0.397
9 512 4.0 0.25
The membrane properties are :
RA = 1.0 ohms meter = 100 ohms cm
RM = 4.0 ohms meter^2 = 40000 ohms cm^2
CM = 0.01 Farads/meter^2 = 1.0 uf/cm^2
EM = -0.065 Volts = -65 mV
Last modified: Sat Jan 18, 2014 05:01PM
"""
from __future__ import print_function
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2013, NCBS Bangalore"
__credits__ = ["NCBS Bangalore", "Bhalla Lab"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import moose
import moose.utils as utils
import compartment as comp
import pylab
import numpy as np
import moose.backend.graphviz as graphviz
def nextValuePowerOf2Law( d1, power=2.0/3.0 ):
''' Given a value, compute the next value using 2^power law '''
return pow(pow(d1, 1.0/power)/2.0, power)
def testPowerLaw():
"""Test power law """
l = [ 32.0 ]
d = [ 16.0 ]
print(nextValuePowerOf2Law( d[-1] ))
print(nextValuePowerOf2Law( l[-1], 1.0/3.0 ))
class BinaryCable( ):
def __init__(self, depth):
''' init '''
self.depth = depth
self.size = pow(2, self.depth) - 1
self.compLenAtLevelZero = 32e-6
self.compDiamAtLevelZero = 16.0e-6
self.compLengthList = [ self.compLenAtLevelZero ]
self.compDiamList = [ self.compDiamAtLevelZero ]
self.cablePath = '/cable'
moose.Neutral(self.cablePath)
self.tablePath = '/data'
moose.Neutral(self.tablePath)
self.stimTables = []
def buildParameterLists(self):
''' Build list of parameters in moose '''
while len(self.compDiamList) < self.depth:
self.compDiamList.append(
nextValuePowerOf2Law(self.compDiamList[-1] )
)
while len(self.compLengthList) < self.depth:
self.compLengthList.append(
nextValuePowerOf2Law(self.compLengthList[-1], 1.0/3.0)
)
def buildCable(self, args):
''' Build binary cable '''
self.args = args
self.buildParameterLists()
# Cable is a list of lists.
self.cable = list()
for n, (l, d) in enumerate(zip(self.compLengthList, self.compDiamList)):
utils.dump("STEP"
, "Binary tree level {}: length {}, diameter {}".format(
n, l, d
)
)
noOfCompartments = pow(2, n)
compartmentList = []
for i in range(noOfCompartments):
compPath = '{}/comp_{}_{}'.format(self.cablePath, n, i)
m = comp.MooseCompartment( compPath, l, d, args )
compartmentList.append( m.mc_ )
self.cable.append( compartmentList )
self.connectCable()
def connectCable(self):
''' Connect the binary tree cable '''
for i, parentList in enumerate(self.cable[:-1]):
childrenList = self.cable[i+1]
for ii, p in enumerate(parentList):
leftChild = childrenList[ 2*ii + 0 ]
rightChild = childrenList[ 2*ii + 1 ]
moose.connect( p, 'raxial', leftChild, 'axial')
moose.connect( p, 'raxial', rightChild, 'axial' )
def setupDUT( self ):
''' Setup cable for recording '''
# Create a pulse input
moose.Neutral( self.tablePath )
stim = moose.PulseGen( '{}/input'.format( self.tablePath) )
stim.level[0] = self.args['inj']
stim.width[0] = self.args['run_time']
# Inject the current from stim to first compartment.
moose.connect( stim, 'output', self.cable[0][0], 'injectMsg' )
# Fill the data from stim into table.
inputTable = moose.Table( '{}/inputTable'.format( self.tablePath ) )
self.stimTables.append( inputTable )
moose.connect( inputTable, 'requestOut', stim, 'getOutputValue' )
def recordAt( self, depth, index ):
''' Parameter index is python list-like index. Index -1 is the last
elements in the list
'''
utils.dump( "RECORD"
, "Adding probe at index {} and depth {}".format(index, depth)
)
c = self.cable[depth][index]
t = moose.Table( '{}/output_at_{}'.format( self.tablePath, index ))
moose.connect( t, 'requestOut', c, 'getVm' )
return t
def setupSolver(self, path = '/hsolve'):
"""Setting up HSolver """
hsolve = moose.HSolve( path )
hsolve.dt = self.simDt
moose.setClock(1, self.simDt)
moose.useClock(1, hsolve.path, 'process')
hsolve.target = self.cablePath
def simulate(self, simTime, simDt, plotDt=None):
'''Simulate the cable
'''
self.simDt = simDt
self.setupDUT( )
# Setup clocks
moose.setClock( 0, self.simDt )
# Use clocks
moose.useClock( 0, '/##', 'process' )
moose.useClock( 0, '/##', 'init' )
utils.dump("STEP"
, [ "Simulating cable for {} sec".format(simTime)
, " simDt: %s" % self.simDt
]
)
utils.verify( )
moose.reinit( )
self.setupSolver( )
moose.start( simTime )
def main( args ):
# d is depth of cable.
d = args['tree_depth']
assert d > 0, "Cable depth can not be nagative"
binCable = BinaryCable( depth = d )
binCable.buildCable( args )
table0 = binCable.recordAt( depth = 0, index = 0 )
table1 = binCable.recordAt( depth = d-1, index = -1)
print("[STIM] Simulating a cable with depth {}".format(d))
binCable.simulate( simTime = args['run_time'], simDt = args['dt'] )
#utils.plotTables( [ table0, table1 ]
# , file = args['output']
# , xscale = args['dt']
# )
graphviz.writeGraphviz(__file__+".dot") #, compartment_shape='point')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description = 'Rallpacks1: A cable with passive compartments'
)
parser.add_argument( '--tau'
, default = 0.04
, help = 'Time constant of membrane'
)
parser.add_argument( '--run_time'
, default = 0.25
, help = 'Simulation run time'
)
parser.add_argument( '--dt'
, default = 5e-5
, help = 'Step time during simulation'
)
parser.add_argument( '--Em'
, default = -65e-3
, help = 'Resting potential of membrane'
)
parser.add_argument( '--RA'
, default = 1.0
, help = 'Axial resistivity'
)
parser.add_argument( '--RM'
, default = 4.0
, help = 'Membrane resistivity.'
)
parser.add_argument( '--lambda'
, default = 1e-3
, help = 'Lambda, what else?'
)
parser.add_argument( '--x'
, default = 1e-3
, help = 'You should record membrane potential somewhere, right?'
)
parser.add_argument( '--length'
, default = 1e-3
, help = 'Length of the cable'
)
parser.add_argument( '--diameter'
, default = 1e-6
, help = 'Diameter of cable'
)
parser.add_argument( '--inj'
, default = 1e-10
, help = 'Current injected at one end of the cable'
)
parser.add_argument( '--tree_depth'
, default = 10
, help = 'Depth of binary tree.'
)
parser.add_argument( '--output'
, default = None
, help = 'Store simulation results to this file'
)
args = parser.parse_args()
main( vars(args) )
|
dharmasam9/moose-core
|
tests/python/Rallpacks/rallpacks_tree_cable.py
|
Python
|
gpl-3.0
| 8,350
|
[
"MOOSE"
] |
e943f55079601b26978a683a8bf55226d4db02f419e51ccd4dfa3f42c5f002b8
|
""" Mapping the CATMAID datamodel to a graph database
Each project has its own annotation graph.
- Can I point from one graph to the vertex of another graph?
- Classes could be derived from Ontologies
http://bioportal.bioontology.org/
"""
import time
from bulbs.model import Node
from bulbs.property import Property, String, Integer, Float
from bulbs.model import Relationship
class User(Node):
element_type = "user"
username = Property(String, nullable=False)
password = Property(String, nullable=False)
def after_created(self):
# include code to create relationships and to index the node
pass
class Concept(Node):
element_type = "concept"
name = Property(String, nullable=False)
creation_time = Property(Float, default="current_timestamp", nullable=False)
edition_time = Property(Float, default="current_timestamp", nullable=False)
def current_timestamp(self):
return time.time()
class Group(Concept):
element_type = "group"
class Neuron(Concept):
element_type = "neuron"
class Skeleton(Concept):
element_type = "skeleton"
class Tag(Concept):
element_type = "tag"
# was: label
class Root(Concept):
element_type = "root"
class Synapse(Concept):
element_type = "synapse"
class PresynapticTerminal(Concept):
element_type = "presynaptic_terminal"
class PostsynapticTerminal(Concept):
element_type = "postsynaptic_terminal"
# new classes
class Mitochondrion(Concept):
element_type = "mitochondrion"
# PartOf Neuron/Skeleton ?
class SynapticVesicle(Concept):
element_type = "synaptic_vesicle"
# PartOf Pre/PostsynapticTerminal
class ChemicalSynapse(Synapse):
element_type = "chemical_synapse"
class ElectricalSynapse(Synapse):
element_type = "electrical_synapse"
class SynapticCleft(Concept):
element_type = "synaptic_cleft"
# ParfOf Synapse
# relationships
class Relation(Relationship):
label = "relation"
creation_time = Property(Float, default="current_timestamp", nullable=False)
def current_timestamp(self):
return time.time()
class TaggedAs(Relation):
label = "tagged_as"
class PostsynapticTo(Relation):
label = "postsynaptic_to"
class PresynapticTo(Relation):
label = "presynaptic_to"
class ModelOf(Relation):
label = "model_of"
class PartOf(Relation):
label = "part_of"
# note used because geometry to annotation: element_of
class CreatedBy(Relationship):
label = "created_by"
creation_time = Property(Float, default="current_timestamp", nullable=False)
@property
def concept(self):
return Concept.get(self.outV)
@property
def user(self):
return User.get(self.inV)
def current_timestamp(self):
return time.time()
if __name__ == '__main__':
from bulbs.graph import Graph
g = Graph()
u = User(username="test", password="test")
|
catsop/CATMAID
|
scripts/graphdb/old_test_bulbs.py
|
Python
|
gpl-3.0
| 2,887
|
[
"NEURON"
] |
64facdbdfe37e739c92c7480831f2261a85ff1663b53e3a57c14bc306afe7441
|
"""
This module contains 'classical' methods of calculating a pore size distribution for
pores in the micropore range (<2 nm). These are derived from the Horvath-Kawazoe models.
"""
import math
import numpy
from scipy import constants
from scipy import optimize
from pygaps.characterisation.models_hk import HK_KEYS
from pygaps.characterisation.models_hk import get_hk_model
from pygaps.core.adsorbate import Adsorbate
from pygaps.core.modelisotherm import ModelIsotherm
from pygaps.core.pointisotherm import PointIsotherm
from pygaps.utilities.exceptions import CalculationError
from pygaps.utilities.exceptions import ParameterError
from pygaps.utilities.exceptions import pgError
_MICRO_PSD_MODELS = ['HK', 'HK-CY', 'RY', 'RY-CY']
_PORE_GEOMETRIES = ['slit', 'cylinder', 'sphere']
def psd_microporous(
isotherm: "PointIsotherm | ModelIsotherm",
psd_model: str = 'HK',
pore_geometry: str = 'slit',
branch: str = 'ads',
material_model: "str | dict[str, float]" = 'Carbon(HK)',
adsorbate_model: "str | dict[str, float]" = None,
p_limits: "tuple[float, float]" = None,
verbose: bool = False
) -> "dict[str, list[float]]":
r"""
Calculate the microporous size distribution using a Horvath-Kawazoe type model.
Expected pore geometry must be specified as ``pore_geometry``.
Parameters
----------
isotherm : PointIsotherm, ModelIsotherm
Isotherm for which the pore size distribution will be calculated.
psd_model : str
Pore size distribution model to use. Available are 'HK' (original Horvath-Kawazoe),
'RY' (Rege-Yang correction) or the Cheng-Yang modification to the two models ('HK-CY', 'RY-CY').
pore_geometry : str
The geometry of the adsorbent pores.
branch : {'ads', 'des'}, optional
Branch of the isotherm to use. It defaults to adsorption.
material_model : str, dict
The material model to use for PSD, It defaults to 'Carbon(HK)', the original
Horvath-Kawazoe activated carbon parameters.
adsorbate_model : str, dict
The adsorbate properties to use for PSD, If empty, properties are
automatically searched from internal database for the Adsorbate.
p_limits : tuple[float, float]
Pressure range in which to calculate PSD, defaults to [0, 0.2].
verbose : bool
Print out extra information on the calculation and graphs the results.
Returns
-------
dict
A dictionary with the pore widths and the pore distributions, of the form:
- ``pore_widths`` (array) : the widths of the pores
- ``pore_distribution`` (array) : contribution of each pore width to the
overall pore distribution
Raises
------
ParameterError
When something is wrong with the function parameters.
CalculationError
When the calculation itself fails.
Notes
-----
Calculates the pore size distribution using a "classical" model, which
describes adsorption in micropores as a sequential instant filling of
increasingly wider pores. The pressure of filling for each pore is
determined by relating the global adsorption potential,
:math:`RT \ln(p/p_0)`, with the energetic potential of individual adsorbate
molecules in a pore of a particular geometry :math:`\Phi`. Calculation of
the latter is based on the Lennard-Jones 6-12 intermolecular potential,
incorporating both guest-host and guest-guest dispersion contributions
through the Kirkwood-Muller formalism. The function is then solved
numerically. These methods are necessarily approximations, as besides using
a semi-empirical mathematical model, they are also heavily dependent on the
material and adsorbate properties (polarizability and susceptibility) used
to derive dispersion coefficients.
There are two main approaches which pyGAPS implements, chosen by passing
the ``psd_model`` parameter:
- The "HK", or the original Horvath-Kawazoe method [#hk1]_.
- The "RY", or the modified Rege-Yang method [#ry1]_.
Detailed explanations for both methods can be found in
:py:func:`~pygaps.characterisation.psd_micro.psd_horvath_kawazoe` and
:py:func:`~pygaps.characterisation.psd_micro.psd_horvath_kawazoe_ry`,
respectively. Additionally for both models, the Cheng-Yang correction
[#cy1]_ can be applied by appending *"-CY"*, such as ``psd_model="HK-CY"``
or ``"RY-CY"``. This correction attempts to change the expression for the
thermodynamic potential from a Henry-type to a Langmuir-type isotherm. While
this new expression does not remain consistent at high pressures, it may
better represent the isotherm curvature at low pressure [#ry1]_.
.. math::
\Phi = RT\ln(p/p_0) + RT (1 + \frac{\ln(1-\theta)}{\theta})
Currently, three geometries are supported for each model: slit-like pores,
cylindrical pores and spherical pores, as described in the related papers
[#hk1]_ [#sf1]_ [#cy1]_ [#ry1]_.
.. caution::
A common mantra of data processing is: **garbage in = garbage out**. Only use
methods when you are aware of their limitations and shortcomings.
References
----------
.. [#hk1] G. Horvath and K. Kawazoe, "Method for Calculation of Effective Pore
Size Distribution in Molecular Sieve Carbon", J. Chem. Eng. Japan, 16, 470
1983.
.. [#sf1] A. Saito and H. C. Foley, "Curvature and Parametric Sensitivity in
Models for Adsorption in Micropores", AIChE J., 37, 429, 1991.
.. [#cy1] L. S. Cheng and R. T. Yang, "Improved Horvath-Kawazoe Equations
Including Spherical Pore Models for Calculating Micropore Size
Distribution", Chem. Eng. Sci., 49, 2599, 1994.
.. [#ry1] S. U. Rege and R. T. Yang, "Corrected Horváth-Kawazoe equations for
pore-size distribution", AIChE Journal, 46, 4, (2000) 734-750.
See Also
--------
pygaps.characterisation.psd_micro.psd_horvath_kawazoe : low level HK (Horvath-Kawazoe) method
pygaps.characterisation.psd_micro.psd_horvath_kawazoe_ry : low level RY (Rege-Yang) method
"""
# Function parameter checks
if psd_model is None:
raise ParameterError(
"Specify a model to generate the pore size"
" distribution e.g. psd_model=\"HK\""
)
if psd_model not in _MICRO_PSD_MODELS:
raise ParameterError(
f"Model {psd_model} not an option for psd. "
f"Available models are {_MICRO_PSD_MODELS}"
)
if pore_geometry not in _PORE_GEOMETRIES:
raise ParameterError(
f"Geometry {pore_geometry} not an option for pore size distribution. "
f"Available geometries are {_PORE_GEOMETRIES}"
)
if branch not in ['ads', 'des']:
raise ParameterError(
f"Branch '{branch}' not an option for PSD.", "Select either 'ads' or 'des'"
)
# Get adsorbate properties
if adsorbate_model is None:
if not isinstance(isotherm.adsorbate, Adsorbate):
raise ParameterError(
"Isotherm adsorbate is not known, cannot calculate PSD."
"Either use a recognised adsorbate (i.e. nitrogen) or "
"pass a dictionary with your adsorbate parameters."
)
adsorbate_model = {
'molecular_diameter': isotherm.adsorbate.get_prop('molecular_diameter'),
'polarizability': isotherm.adsorbate.get_prop('polarizability'),
'magnetic_susceptibility': isotherm.adsorbate.get_prop('magnetic_susceptibility'),
'surface_density': isotherm.adsorbate.get_prop('surface_density'),
'liquid_density': isotherm.adsorbate.liquid_density(isotherm.temperature),
'adsorbate_molar_mass': isotherm.adsorbate.molar_mass(),
}
# Get material properties
material_properties = get_hk_model(material_model)
# Read data in
loading = isotherm.loading(
branch=branch,
loading_basis='molar',
loading_unit='mmol',
)
if loading is None:
raise ParameterError(
"The isotherm does not have the required branch "
"for this calculation"
)
try:
pressure = isotherm.pressure(
branch=branch,
pressure_mode='relative',
)
except pgError:
raise CalculationError(
"The isotherm cannot be converted to a relative basis. "
"Is your isotherm supercritical?"
)
# If on an desorption branch, data will be reversed
if branch == 'des':
loading = loading[::-1]
pressure = pressure[::-1]
# select the maximum and minimum of the points and the pressure associated
minimum = 0
maximum = len(pressure) - 1 # As we want absolute position
# Set default values
if p_limits is None:
p_limits = (None, 0.2)
if p_limits[0]:
minimum = numpy.searchsorted(pressure, p_limits[0])
if p_limits[1]:
maximum = numpy.searchsorted(pressure, p_limits[1]) - 1
if maximum - minimum < 2: # (for 3 point minimum)
raise CalculationError(
"The isotherm does not have enough points (at least 3) "
"in the selected region."
)
pressure = pressure[minimum:maximum + 1]
loading = loading[minimum:maximum + 1]
# Call specified pore size distribution function
if psd_model in ['HK', 'HK-CY']:
pore_widths, pore_dist, pore_vol_cum = psd_horvath_kawazoe(
pressure,
loading,
isotherm.temperature,
pore_geometry,
adsorbate_model,
material_properties,
use_cy=False if psd_model == 'HK' else True,
)
elif psd_model in ['RY', 'RY-CY']:
pore_widths, pore_dist, pore_vol_cum = psd_horvath_kawazoe_ry(
pressure,
loading,
isotherm.temperature,
pore_geometry,
adsorbate_model,
material_properties,
use_cy=False if psd_model == 'RY' else True,
)
if verbose:
from pygaps.graphing.calc_graphs import psd_plot
psd_plot(
pore_widths,
pore_dist,
pore_vol_cum=pore_vol_cum,
log=False,
right=5,
method=psd_model,
)
return {
'pore_widths': pore_widths,
'pore_distribution': pore_dist,
'pore_volume_cumulative': pore_vol_cum,
'limits': (minimum, maximum),
}
def psd_horvath_kawazoe(
pressure: "list[float]",
loading: "list[float]",
temperature: float,
pore_geometry: str,
adsorbate_properties: "dict[str, float]",
material_properties: "dict[str, float]",
use_cy: bool = False,
):
r"""
Calculate the pore size distribution using the Horvath-Kawazoe method.
This function should not be used with isotherms (use instead
:func:`pygaps.characterisation.psd_micro.psd_microporous`).
Parameters
----------
pressure : list[float]
Relative pressure.
loading : list[float]
Adsorbed amount in mmol/g.
temperature : float
Temperature of the experiment, in K.
pore_geometry : str
The geometry of the pore, eg. 'sphere', 'cylinder' or 'slit'.
adsorbate_properties : dict
Properties for the adsorbate in the form of::
adsorbate_properties = {
'molecular_diameter': 0, # nm
'polarizability': 0, # nm3
'magnetic_susceptibility': 0, # nm3
'surface_density': 0, # molecules/m2
'liquid_density': 0, # g/cm3
'adsorbate_molar_mass': 0, # g/mol
}
material_properties : dict
Properties for the adsorbate in the same form
as 'adsorbate_properties'. A list of common models
can be found in .characterisation.models_hk.
use_cy : bool:
Whether to use the Cheng-Yang nonlinear Langmuir term.
Returns
-------
pore widths : array
The widths of the pores.
pore_dist : array
The distributions for each width.
pore_vol_cum : array
Cumulative pore volume.
Notes
-----
*Description*
The H-K method [#hk2]_ attempts to describe adsorption within pores by
calculation of the average potential energy for a pore and equating it to
the change in free energy upon adsorption. The method starts by assuming the
following relationship between the two:
.. math::
\Phi = RT \ln(p/p_0) = U_0 + P_a
Here :math:`U_0` is the potential describing the surface to adsorbent
interactions and :math:`P_a` is the potential describing the
adsorbate-adsorbate interactions. This relationship is derived from the
equation of the free energy of adsorption at constant temperature where the
adsorption entropy term :math:`T \Delta S^{tr}(\theta)` is assumed to be
negligible. :math:`R`, :math:`T`, and :math:`p` are the gas constant,
temperature and pressure, respectively. The expression for the guest-host
and host-host interaction in the pore is then modelled on the basis of the
Lennard-Jones 12-6 potential. For two molecules 1 and 2:
.. math::
\epsilon_{12}(z) = 4 \epsilon^{*}_{12} \Big[(\frac{\sigma}{z})^{12} - (\frac{\sigma}{z})^{6}\Big]
Where :math:`z` is intermolecular distance, :math:`\epsilon^{*}` is the
depth of the potential well and :math:`\sigma` is the zero-interaction
energy distance. The two molecules can be identical, or different species.
The distance at zero-interaction energy, commonly defined as the "rest
internuclear distance", is a function of the diameter of the molecules
involved, and is calculated as :math:`\sigma = (2/5)^{1/6} d_0`. If the two
molecules are different, :math:`d_0` is the average of the diameter of the
two, :math:`d_0=(d_g + d_h)/2` such as between the guest and host molecules.
In the case of multiple surface atom types (as for zeolites), representative
averages are used.
The depth of the potential well is obtained using the Kirkwood-Muller
formalism, which relates molecular polarizability :math:`\alpha` and
magnetic susceptibility :math:`\varkappa` to the specific dispersion
constant. For guest-host (:math:`A_{gh}`) and guest-guest (:math:`A_{gg}`)
interactions they are calculated through:
.. math::
A_{gh} = \frac{6mc^2\alpha_g\alpha_h}{\alpha_g/\varkappa_g + \alpha_h/\varkappa_h} \\
A_{gg} = \frac{3}{2} m_e c ^2 \alpha_g\varkappa_g
In the above formulas, :math:`m_e` is the mass of an electron and :math:`c`
is the speed of light in a vacuum. This potential equation
(:math:`\epsilon`) is then applied to the specific geometry of the pore
(e.g. potential of an adsorbate molecule between two infinite surface
slits). Individual molecular contributions as obtained through these
expressions are multiplied by average surface densities for the guest
(:math:`n_g`) and the host (:math:`n_h`) and then scaled to moles by using
Avogadro's number :math:`N_A`. By integrating over the specific pore
dimension (width, radius) an average potential for a specific pore size is
obtained.
*Slit pore*
The original model was derived for a slit-like pore, with each pore modelled
as two parallel infinite planes between which adsorption took place.
[#hk2]_ The effective width of the pore is related to the characterisic
length by, :math:`W = L - d_h` and the following relationship is derived:
.. math::
RT\ln(p/p_0) = & N_A\frac{n_h A_{gh} + n_g A_{gh} }{\sigma^{4}(L-2d_0)} \\
& \times
\Big[
\Big(\frac{\sigma^{10}}{9 d_0^9}\Big)
- \Big(\frac{\sigma^{4}}{3 d_0^3}\Big)
- \Big(\frac{\sigma^{10}}{9(L-d_0)^{9}}\Big)
+ \Big(\frac{\sigma^{4}}{3(L - d_0)^{3}}\Big)
\Big]
*Cylindrical pore*
Using the same procedure, a cylindrical model was proposed by Saito and
Foley [#sf2]_ using pore radius :math:`L` as the representative length
(therefore pore width :math:`W = 2L - d_h`), and involves a summation of
probe-wall interactions for sequential axial rings of the cylinder up to
infinity.
.. math::
RT\ln(p/p_0) = & \frac{3}{4}\pi N_A \frac{n_h A_{gh} + n_g A_{gg} }{d_0^{4}} \\
& \times
\sum^{\infty}_{k = 0} \frac{1}{k+1} \Big( 1 - \frac{d_0}{L} \Big)^{2k}
\Big[
\frac{21}{32} \alpha_k \Big(\frac{d_0}{L}\Big)^{10}
- \beta_k \Big(\frac{d_0}{L}\Big)^{4}
\Big]
Where the constants :math:`\alpha_k` and :math:`\beta` are recursively
calculated from :math:`\alpha_0 = \beta_0 = 1`:
.. math::
\alpha_k = \Big( \frac{-4.5-k}{k} \Big)^2 \alpha_{k-1} \ \text{and}
\ \beta_k = \Big( \frac{-1.5-k}{k} \Big)^2 \beta_{k-1}
*Spherical pore*
Similarly, Cheng and Yang [#cy2]_ introduced an extension for spherical
pores by considering the interactions with a spherical cavity. This model
similarly uses the sphere radius :math:`L` as the representative length
(therefore effective pore width :math:`W = 2L - d_h`) It should be noted
that realistic spherical pores would not have any communication with the
adsorbent exterior.
.. math::
RT\ln(p/p_0) = & N_A 6 \Big( n_1 \frac{A_{gh}}{4 d_0^6} + n_2 \frac{A_{gg}}{4 d_g^6} \Big)
\frac{L^3}{(L-d_0)^{3}} \\
& \times
\Big[
\Big( \frac{d_0}{L} \Big)^{12} \Big( \frac{T_9}{90} - \frac{T_8}{80} \Big)
- \Big( \frac{d_0}{L} \Big)^{6} \Big( \frac{T_3}{12} - \frac{T_2}{8} \Big)
\Big]
Here, :math:`T_x` stands for a function of the type:
.. math::
T_x = \Big[1 + (-1)^{x} \frac{L-d_0}{L} \Big]^{-x} -
\Big[1 - (-1)^{x} \frac{L-d_0}{L} \Big]^{-x}
While the population densities for guest and host :math:`n_1` and
:math:`n_2` are calculated from the plane values as
:math:`n_0 = 4\pi L^2 n_h` and :math:`n_i = 4\pi (L - d_0)^2 n_g`.\
*Limitations*
The main assumptions made by using the H-K method are:
- It does not have a description of capillary condensation. This means that
the pore size distribution can only be considered accurate up to a maximum
of 5 nm.
- The surface is made up of a single layer of atoms. Furthermore, since the
HK method is reliant on knowing the properties of the surface atoms as
well as the adsorbate molecules the material should ideally be homogenous.
- Only dispersive forces are accounted for. If the adsorbate-adsorbent
interactions have other contributions, such as charged interactions, the
Lennard-Jones potential function will not be an accurate description of
pore environment.
- Each pore is uniform and of infinite length. Materials with varying pore
shapes or highly interconnected networks may not give realistic results.
References
----------
.. [#hk2] G. Horvath and K. Kawazoe, "Method for Calculation of Effective Pore
Size Distribution in Molecular Sieve Carbon", J. Chem. Eng. Japan, 16, 470 (1983).
.. [#sf2] A. Saito and H. C. Foley, "Curvature and Parametric Sensitivity in
Models for Adsorption in Micropores", AIChE J., 37, 429, (1991).
.. [#cy2] L. S. Cheng and R. T. Yang, "Improved Horvath-Kawazoe Equations
Including Spherical Pore Models for Calculating Micropore Size
Distribution", Chem. Eng. Sci., 49, 2599, (1994).
"""
# Parameter checks
missing = [x for x in HK_KEYS if x not in material_properties]
if missing:
raise ParameterError(f"Adsorbent properties dictionary is missing parameters: {missing}.")
missing = [
x for x in list(HK_KEYS.keys()) + ['liquid_density', 'adsorbate_molar_mass']
if x not in adsorbate_properties
]
if missing:
raise ParameterError(f"Adsorbate properties dictionary is missing parameters: {missing}.")
# Check lengths
if len(pressure) == 0:
raise ParameterError("Empty input values!")
if len(pressure) != len(loading):
raise ParameterError("The length of the pressure and loading arrays do not match.")
# ensure numpy arrays
pressure = numpy.asarray(pressure)
loading = numpy.asarray(loading)
pore_widths = []
# Constants unpacking and calculation
d_ads = adsorbate_properties['molecular_diameter']
d_mat = material_properties['molecular_diameter']
n_ads = adsorbate_properties['surface_density']
n_mat = material_properties['surface_density']
a_ads, a_mat = _dispersion_from_dict(
adsorbate_properties, material_properties
) # dispersion constants
d_eff = (d_ads + d_mat) / 2 # effective diameter
N_over_RT = _N_over_RT(temperature) # N_av / RT
###################################################################
if pore_geometry == 'slit':
sigma = 0.8583742 * d_eff # (2/5)**(1/6)*d_eff, internuclear distance at 0 energy
sigma_p4_o3 = sigma**4 / 3 # pre-calculated constant
sigma_p10_o9 = sigma**10 / 9 # pre-calculated constant
const_coeff = (
N_over_RT * (n_ads * a_ads + n_mat * a_mat) / (sigma * 1e-9)**4
) # sigma must be in SI here
const_term = (sigma_p10_o9 / (d_eff**9) - sigma_p4_o3 / (d_eff**3)) # nm
def potential(l_pore):
return (
const_coeff / (l_pore - 2 * d_eff) * ((sigma_p4_o3 / (l_pore - d_eff)**3) -
(sigma_p10_o9 /
(l_pore - d_eff)**9) + const_term)
)
if use_cy:
pore_widths = _solve_hk_cy(pressure, loading, potential, 2 * d_eff, 1)
else:
pore_widths = _solve_hk(pressure, potential, 2 * d_eff, 1)
# width = distance between infinite slabs - 2 * surface molecule radius (=d_mat)
pore_widths = numpy.asarray(pore_widths) - d_mat
###################################################################
elif pore_geometry == 'cylinder':
const_coeff = 0.75 * constants.pi * N_over_RT * \
(n_ads * a_ads + n_mat * a_mat) / (d_eff * 1e-9)**4 # d_eff must be in SI
# to avoid unnecessary recalculations, we cache a_k and b_k values
a_ks, b_ks = [1], [1]
for k in range(1, 2000):
a_ks.append(((-4.5 - k) / k)**2 * a_ks[k - 1])
b_ks.append(((-1.5 - k) / k)**2 * b_ks[k - 1])
def potential(l_pore):
d_over_r = d_eff / l_pore # dimensionless
d_over_r_p4 = d_over_r**4 # d/L ^ 4
d_over_r_p10_k = 0.65625 * d_over_r**10 # 21/32 * d/L ^ 4
k_sum = d_over_r_p10_k - d_over_r_p4 # first value at K=0
# 25 * pore radius ensures that layer convergence is achieved
for k in range(1, int(l_pore * 25)):
k_sum = k_sum + ((1 / (k + 1) * (1 - d_over_r)**(2 * k)) *
(a_ks[k] * d_over_r_p10_k - b_ks[k] * d_over_r_p4))
return const_coeff * k_sum
if use_cy:
pore_widths = _solve_hk_cy(pressure, loading, potential, d_eff, 2)
else:
pore_widths = _solve_hk(pressure, potential, d_eff, 2)
# width = 2 * cylinder radius - 2 * surface molecule radius (=d_mat)
pore_widths = 2 * numpy.asarray(pore_widths) - d_mat
###################################################################
elif pore_geometry == 'sphere':
p_12 = 0.25 * a_mat / (d_eff * 1e-9)**6 # ads-surface potential depth
p_22 = 0.25 * a_ads / (d_ads * 1e-9)**6 # ads-ads potential depth
def potential(l_pore):
l_minus_d = l_pore - d_eff
d_over_l = d_eff / l_pore
n_1 = 4 * constants.pi * (l_pore * 1e-9)**2 * n_mat
n_2 = 4 * constants.pi * (l_minus_d * 1e-9)**2 * n_ads
def t_term(x):
return (1 + (-1)**x * l_minus_d / l_pore)**(-x) -\
(1 - (-1)**x * l_minus_d / l_pore)**(-x)
return N_over_RT * (6 * (n_1 * p_12 + n_2 * p_22) * (l_pore / l_minus_d)**3) * (
-(d_over_l**6) * (t_term(3) / 12 + t_term(2) / 8) + (d_over_l**12) *
(t_term(9) / 90 + t_term(8) / 80)
)
if use_cy:
pore_widths = _solve_hk_cy(pressure, loading, potential, d_eff, 2)
else:
pore_widths = _solve_hk(pressure, potential, d_eff, 2)
# width = 2 * sphere radius - 2 * surface molecule radius (=d_mat)
pore_widths = 2 * numpy.asarray(pore_widths) - d_mat
# finally calculate pore distribution
liquid_density = adsorbate_properties['liquid_density']
adsorbate_molar_mass = adsorbate_properties['adsorbate_molar_mass']
# Cut unneeded values
selected = slice(0, len(pore_widths))
pore_widths = pore_widths[selected]
pressure = pressure[selected]
loading = loading[selected]
avg_pore_widths = numpy.add(pore_widths[:-1], pore_widths[1:]) / 2 # nm
volume_adsorbed = loading * adsorbate_molar_mass / liquid_density / 1000 # cm3/g
pore_dist = numpy.diff(volume_adsorbed) / numpy.diff(pore_widths)
return avg_pore_widths, pore_dist, volume_adsorbed[1:]
def psd_horvath_kawazoe_ry(
pressure: "list[float]",
loading: "list[float]",
temperature: float,
pore_geometry: str,
adsorbate_properties: "dict[str, float]",
material_properties: "dict[str, float]",
use_cy: bool = False,
):
r"""
Calculate the microporous size distribution using a Rege-Yang (R-Y) type model.
This function should not be used with isotherms (use instead
:func:`pygaps.characterisation.psd_micro.psd_microporous`).
Parameters
----------
pressure : list[float]
Relative pressure.
loading : list[float]
Adsorbed amount in mmol/g.
temperature : float
Temperature of the experiment, in K.
pore_geometry : str
The geometry of the pore, eg. 'sphere', 'cylinder' or 'slit'.
adsorbate_properties : dict
Properties for the adsorbate in the form of::
adsorbate_properties = {
'molecular_diameter': 0, # nm
'polarizability': 0, # nm3
'magnetic_susceptibility': 0, # nm3
'surface_density': 0, # molecules/m2
'liquid_density': 0, # g/cm3
'adsorbate_molar_mass': 0, # g/mol
}
material_properties : dict
Properties for the adsorbate in the same form
as 'adsorbate_properties'. A list of common models
can be found in .characterisation.models_hk.
use_cy : bool:
Whether to use the Cheng-Yang nonlinear Langmuir term.
Returns
-------
pore widths : array
The widths of the pores.
pore_dist : array
The distributions for each width.
pore_vol_cum : array
Cumulative pore volume.
Notes
-----
This approach attempts to address two main shortcomings of the H-K method,
(see details here
:py:func:`~pygaps.characterisation.psd_micro.psd_horvath_kawazoe_ry`)
namely its odd summation of contributions from the adsorbate-surface and
adsorbate-adsorbate contributions and the assumption of a continuous
distributions of guest molecules inside a pore.
Rege and Yang [#ry2]_ propose a more granular model, where molecules occupy
fixed positions according to a minimum energy potential. Depending on the
size of the pore in relation to the guest, pores are categorised based on
the number of adsorbed layers :math:`M`, with molecules adsorbed inside
described on a layer-by-layer basis. In a similar assumption to the BET
theory, a molecule would experience a surface-guest potential only if
adjacent to the pore wall, with subsequent layers interacting through pure
guest-guest interactions. While they do not assign a weighted distribution
to the guest position (i.e. according to Boltzmann's law) and thus disregard
thermal motion, this model is theoretically a more accurate representation
of how spherical molecules would pack in the pore. The potential equations
were derived for slit, cylindrical and spherical pores.
*Slit pore*
For a slit geometry, the number of layers in a pore of width :math:`L` is
calculated as a function of guest molecule and host surface atom diameter as
:math:`M = (L - d_h)/d_g`. If the number of adsorbed layers is between 1 and
2, the guest molecule will see only the two pore walls, and its potential
will be:
.. math::
\epsilon_{hgh} = \frac{n_h A_{gh}}{2\sigma^{4}}
\Big[
\Big(\frac{\sigma}{d_0}\Big)^{10}
- \Big(\frac{\sigma}{d_0}\Big)^{4}
- \Big(\frac{\sigma}{L - d_0}\Big)^{10}
+ \Big(\frac{\sigma}{L - d_0}\Big)^{4}
\Big]
If the number of layers is larger than two, there will be two types of guest
molecular potentials, namely (i) the first layer which interacts on one side
with the host surface and a layer of guests on the other and (ii) a
middle-type layer which interacts with two other guest layers. Internuclear
distance at zero energy for two guest molecules is introduced as
:math:`\sigma_g = (2/5)^{1/6} d_g`. The functions describing the potentials
of the two types of potential :math:`\epsilon_{hgg}` and
:math:`\epsilon_{ggg}` are then:
.. math::
\epsilon_{hgg} = \frac{n_h A_{gh}}{2\sigma^{4}}
\Big[
\Big(\frac{\sigma}{d_0}\Big)^{10}
- \Big(\frac{\sigma}{d_0}\Big)^{4}
\Big] +
\frac{n_g A_{gg}}{2\sigma_g^{4}}
\Big[
\Big(\frac{\sigma_g}{d_g}\Big)^{10}
- \Big(\frac{\sigma_g}{d_g}\Big)^{4}
\Big]
.. math::
\epsilon_{ggg} = 2 \times \frac{n_g A_{gg}}{2\sigma_g^{4}}
\Big[
\Big(\frac{\sigma_g}{d_g}\Big)^{10}
- \Big(\frac{\sigma_g}{d_g}\Big)^{4}
\Big]
The average potential for a pore with more than two layers is a weighted
combination of the two types of layers
:math:`\bar{\epsilon} = [2 \epsilon_{hgg} + (M-2)\epsilon_{ggg}] / M`, while
while for a single layer it is equal to
:math:`\bar{\epsilon} = \epsilon_{hgh}`. With a potential formula for both
types of pores, the change in free energy can be calculated similarly to the
original H-K method: :math:`RT\ln(p/p_0) = N_A \bar{\epsilon}`.
*Cylindrical pore*
In a cylindrical pore, the number of concentric layers of guest molecules
which can be arranged in a cross-section of radius :math:`L` is
mathematically represented as:
.. math::
M = \text{int}\Big[ \frac{(2L - d_h)/d_g - 1}{2} \Big] + 1
Here, :math:`int` truncates to an integer number rounded down. Molecules can
then either be part of the first layer, interacting with the surface, or in
subsequent layers, interacting with adsorbate layers, with their number for
each layer estimated using its diameter. In this particular geometry, an
assumption is made that *only outer-facing layers contribute to the
interaction energy*. The potentials corresponding to the two situations are
then determined as:
.. math::
\epsilon_{hg} = \frac{3}{4}\pi \frac{n_h A_{gh}}{d_0^{4}}
\times
\Big[
\frac{21}{32} a_1^{10} \sum^{\infty}_{k = 0} \alpha_k b_1^{2k}
- a_1^{4} \sum^{\infty}_{k = 0} \beta_k b_1^{2k}
\Big] \\
.. math::
\epsilon_{gg} = \frac{3}{4}\pi \frac{n_g A_{gg}}{d_g^{4}}
\times
\Big[
\frac{21}{32} a_i^{10} \sum^{\infty}_{k = 0} \alpha_k b_i^{2k}
- a_i^{4} \sum^{\infty}_{k = 0} \beta_k b_i^{2k}
\Big]
Where:
.. math::
a_1 = d_0 / L \ \text{and} \ b_1 = (L - d_0) / L
.. math::
a_i = \frac{d_g}{L - d_0 - (i - 2) d_g} \ \text{and} \ b_i = \frac{L - d_0 - (i - 1) d_g}{L - d_0 - (i - 2) d_g}
With the symbols having the same connotation as those in the original H-K
cylindrical model. The number of molecules accommodated in each concentric
layer is calculated as:
.. math::
n_i = \frac{\pi}{\sin^{-1} \Big[\frac{d_g}{2(L - d_0 - (i - 1) d_g)}\Big]}
The average potential for a pore is then a weighted average defined as
:math:`\bar{\epsilon} = \sum^{M}_{i = 1} n_i \epsilon_i / \sum^{M}_{i = 1} n_i`
and then equated to change in free energy by multiplication with Avogadro's
number.
*Spherical pore*
In a spherical pore of radius :math:`L`, the number of layers that can be
accommodated :math:`M` is assumed identical to that in a cylindrical pore of
similar radius. The equations describing the potential for the initial and
subsequent layers are then given as:
.. math::
\epsilon_1 = 2 \frac{n_0 A_{gh}}{4 d_0^6}
\Big[
\frac{a_1^{12}}{10 b_1} \Big( \frac{1}{(1-b_1)^{10}} - \frac{1}{(1+b_1)^{10}} \Big)
- \frac{a_1^{6}}{4 b_1} \Big( \frac{1}{(1-b_1)^{4}} - \frac{1}{(1+b_1)^{4}} \Big)
\Big]
.. math::
\epsilon_i = 2 \frac{n_{i-1} A_{gg}}{4 d_g^6}
\Big[
\frac{a_i^{12}}{10 b_i} \Big( \frac{1}{(1-b_i)^{10}} - \frac{1}{(1+b_i)^{10}} \Big)
- \frac{a_i^{6}}{4 b_i} \Big( \frac{1}{(1-b_i)^{4}} - \frac{1}{(1+b_i)^{4}} \Big)
\Big]
The number of molecules each layer interacts with (:math:`n`) is calculated
based on known surface density and a spherical geometry correction. For the
first layer :math:`n_0 = 4\pi L^2 n_h` and for subsequent layers
:math:`n_i = 4\pi (L - d_0 - (i-1) d_g)^2 n_g`. The constants :math:`a` and
:math:`b` are calculated as for a cylindrical geometry, as in the case with
the average potential :math:`\bar{\epsilon}`.
References
----------
.. [#ry2] S. U. Rege and R. T. Yang, "Corrected Horváth-Kawazoe equations for
pore-size distribution", AIChE Journal, 46, 4, 734-750, (2000).
"""
# Parameter checks
missing = [x for x in HK_KEYS if x not in material_properties]
if missing:
raise ParameterError(f"Adsorbent properties dictionary is missing parameters: {missing}.")
missing = [
x for x in list(HK_KEYS.keys()) + ['liquid_density', 'adsorbate_molar_mass']
if x not in adsorbate_properties
]
if missing:
raise ParameterError(f"Adsorbate properties dictionary is missing parameters: {missing}.")
# ensure numpy arrays
pressure = numpy.asarray(pressure)
loading = numpy.asarray(loading)
pore_widths = []
# Constants unpacking and calculation
d_ads = adsorbate_properties['molecular_diameter']
d_mat = material_properties['molecular_diameter']
n_ads = adsorbate_properties['surface_density']
n_mat = material_properties['surface_density']
a_ads, a_mat = _dispersion_from_dict(
adsorbate_properties, material_properties
) # dispersion constants
d_eff = (d_ads + d_mat) / 2 # effective diameter
N_over_RT = _N_over_RT(temperature) # N_av / RT
###################################################################
if pore_geometry == 'slit':
sigma = 0.8583742 * d_eff # (2/5)**(1/6) * d_eff,
sigma_ads = 0.8583742 * d_ads # (2/5)**(1/6) * d_ads,
s_over_d0 = sigma / d_eff # pre-calculated constant
sa_over_da = sigma_ads / d_ads # pre-calculated constant
# Potential with one sorbate layer.
potential_adsorbate = (
n_ads * a_ads / 2 / (sigma_ads * 1e-9)**4 * (-sa_over_da**4 + sa_over_da**10)
)
# Potential with one surface layer and one sorbate layer.
potential_onesurface = (
n_mat * a_mat / 2 / (sigma * 1e-9)**4 * (-s_over_d0**4 + s_over_d0**10)
) + potential_adsorbate
def potential_twosurface(l_pore):
"""Potential with two surface layers."""
return (
n_mat * a_mat / 2 / (sigma * 1e-9)**4 * (
s_over_d0**10 - s_over_d0**4 + (sigma / (l_pore - d_eff))**10 -
(sigma / (l_pore - d_eff))**4
)
)
def potential_average(n_layer):
return ((
2 * potential_onesurface +
(n_layer - 2) * 2 * potential_adsorbate # NOTE 2 * is correct
) / n_layer)
def potential(l_pore):
n_layer = (l_pore - d_mat) / d_ads
if n_layer < 2:
return N_over_RT * potential_twosurface(l_pore)
else:
return N_over_RT * potential_average(n_layer)
if use_cy:
pore_widths = _solve_hk_cy(pressure, loading, potential, 2 * d_eff, 1)
else:
pore_widths = _solve_hk(pressure, potential, 2 * d_eff, 1)
# width = distance between infinite slabs - 2 * surface molecule radius (=d_mat)
pore_widths = numpy.asarray(pore_widths) - d_mat
###################################################################
elif pore_geometry == 'cylinder':
max_k = 25 # Maximum K summed
cached_k = 2000 # Maximum K's cached
# to avoid unnecessary recalculations, we cache a_k and b_k values
a_ks, b_ks = [1], [1]
for k in range(1, cached_k):
a_ks.append(((-4.5 - k) / k)**2 * a_ks[k - 1])
b_ks.append(((-1.5 - k) / k)**2 * b_ks[k - 1])
def a_k_sum(r2, max_k_pore):
k_sum_t = 1
for k in range(1, max_k_pore):
k_sum_t = k_sum_t + (a_ks[k] * r2**(2 * k))
return k_sum_t
def b_k_sum(r2, max_k_pore):
k_sum_t = 1
for k in range(1, max_k_pore):
k_sum_t = k_sum_t + (b_ks[k] * r2**(2 * k))
return k_sum_t
def potential_general(l_pore, d_x, n_x, a_x, r1):
# determine maximum summation as a function of pore length
max_k_pore = int(l_pore * max_k)
max_k_pore = max_k_pore if max_k_pore < 2000 else 2000
# the b constant is 1-a
r2 = 1 - r1
# 0.65625 is (21 / 32), constant
return (
0.75 * constants.pi * n_x * a_x / ((d_x * 1e-9)**4) *
(0.65625 * r1**10 * a_k_sum(r2, max_k_pore) - r1**4 * b_k_sum(r2, max_k_pore))
)
def potential(l_pore):
n_layers = int(((2 * l_pore - d_mat) / d_ads - 1) / 2) + 1
layer_populations = []
layer_potentials = []
for layer in range(1, n_layers + 1):
width = 2 * (l_pore - d_eff - (layer - 1) * d_ads)
if d_ads <= width:
layer_population = constants.pi / math.asin(d_ads / width)
else:
layer_population = 1
if layer == 1: # potential with surface (first layer)
r1 = d_eff / l_pore
layer_potential = potential_general(l_pore, d_eff, n_mat, a_mat, r1)
else: # inter-adsorbate potential (subsequent layers)
r1 = d_ads / (l_pore - d_eff - (layer - 2) * d_ads)
layer_potential = potential_general(l_pore, d_ads, n_ads, a_ads, r1)
layer_populations.append(layer_population)
layer_potentials.append(layer_potential)
layer_populations = numpy.asarray(layer_populations)
layer_potentials = numpy.asarray(layer_potentials)
return (
N_over_RT * numpy.sum(layer_populations * layer_potentials) /
numpy.sum(layer_populations)
)
if use_cy:
pore_widths = _solve_hk_cy(pressure, loading, potential, d_eff, 1)
else:
pore_widths = _solve_hk(pressure, potential, d_eff, 1)
# width = 2 * cylinder radius - 2 * surface molecule radius (=d_mat)
pore_widths = 2 * numpy.asarray(pore_widths) - d_mat
###################################################################
elif pore_geometry == 'sphere':
p_12 = a_mat / (4 * (d_eff * 1e-9)**6) # ads-surface potential depth
p_22 = a_ads / (4 * (d_ads * 1e-9)**6) # ads-ads potential depth
def potential_general(n_m, p_xx, r1):
"""General RY layer potential in a spherical regime."""
r2 = 1 - r1 # the b constant is 1-a
return (
2 * n_m * p_xx * ((-r1**6 / (4 * r2) * ((1 - r2)**(-4) - (1 + r2)**(-4))) +
(r1**12 / (10 * r2) * ((1 - r2)**(-10) - (1 + r2)**(-10))))
)
def potential(l_pore):
n_layers = int(((2 * l_pore - d_mat) / d_ads - 1) / 2) + 1
layer_populations = []
layer_potentials = []
# potential with surface (first layer)
layer_population = 4 * constants.pi * (l_pore * 1e-9)**2 * n_mat
r1 = d_eff / l_pore
layer_potential = potential_general(layer_population, p_12, r1)
layer_potentials.append(layer_potential) # add E1
# inter-adsorbate potential (subsequent layers)
layer_populations = [
(4 * constants.pi * ((l_pore - d_eff - (layer - 1) * d_ads) * 1e-9)**2 * n_ads)
for layer in range(1, n_layers + 1)
] # [N1...Nm]
for layer, layer_population in zip(range(2, n_layers + 1), layer_populations):
r1 = d_ads / (l_pore - d_eff - (layer - 2) * d_ads)
layer_potential = potential_general(layer_population, p_22, r1)
layer_potentials.append(layer_potential) # add [E2...Em]
layer_populations = numpy.asarray(layer_populations)
layer_potentials = numpy.asarray(layer_potentials)
return (
N_over_RT * numpy.sum(layer_populations * layer_potentials) /
numpy.sum(layer_populations)
)
if use_cy:
pore_widths = _solve_hk_cy(pressure, loading, potential, d_eff, 1)
else:
pore_widths = _solve_hk(pressure, potential, d_eff, 1)
# width = 2 * sphere radius - 2 * surface molecule radius (=d_mat)
pore_widths = 2 * numpy.asarray(pore_widths) - d_mat
# finally calculate pore distribution
liquid_density = adsorbate_properties['liquid_density']
adsorbate_molar_mass = adsorbate_properties['adsorbate_molar_mass']
# Cut unneeded values
selected = slice(0, len(pore_widths))
pore_widths = pore_widths[selected]
pressure = pressure[selected]
loading = loading[selected]
avg_pore_widths = numpy.add(pore_widths[:-1], pore_widths[1:]) / 2 # nm
volume_adsorbed = loading * adsorbate_molar_mass / liquid_density / 1000 # cm3/g
pore_dist = numpy.diff(volume_adsorbed) / numpy.diff(pore_widths)
return avg_pore_widths, pore_dist, volume_adsorbed[1:]
def _solve_hk(pressure, hk_fun, bound, geo):
"""
I personally found that simple Brent minimization
gives good results. There may be other, more efficient
algorithms, like conjugate gradient, but optimization is a moot point
as long as average total runtime is short.
The minimisation runs with bounds of [d_eff < x < 50].
Maximum determinable pore size is limited at ~2.5 nm anyway.
"""
p_w = []
p_w_max = 10 / geo
for p_point in pressure:
def fun(l_pore):
return (numpy.exp(hk_fun(l_pore)) - p_point)**2
res = optimize.minimize_scalar(fun, method='bounded', bounds=(bound, 50))
p_w.append(res.x)
# we will stop if reaching unrealistic pore sizes
if res.x > p_w_max:
break
return p_w
def _solve_hk_cy(pressure, loading, hk_fun, bound, geo):
"""
In this case, the SF correction factor is subtracted
from the original function.
"""
p_w = []
p_w_max = 10 / geo
coverage = loading / (max(loading) * 1.01)
for p_point, c_point in zip(pressure, coverage):
sf_corr = 1 + 1 / c_point * numpy.log(1 - c_point)
def fun(l_pore):
return (numpy.exp(hk_fun(l_pore) - sf_corr) - p_point)**2
res = optimize.minimize_scalar(fun, method='bounded', bounds=(bound, 50))
p_w.append(res.x)
# we will stop if reaching unrealistic pore sizes
if res.x > p_w_max:
break
return p_w
def _dispersion_from_dict(ads_dict, mat_dict):
p_ads = ads_dict['polarizability'] * 1e-27 # to m3
p_mat = mat_dict['polarizability'] * 1e-27 # to m3
m_ads = ads_dict['magnetic_susceptibility'] * 1e-27 # to m3
m_mat = mat_dict['magnetic_susceptibility'] * 1e-27 # to m3
return (
_kirkwood_muller_dispersion_ads(p_ads, m_ads),
_kirkwood_muller_dispersion_mat(p_mat, m_mat, p_ads, m_ads),
)
def _kirkwood_muller_dispersion_ads(p_ads, m_ads):
"""Calculate the dispersion constant for the adsorbate.
p and m stand for polarizability and magnetic susceptibility
"""
return (1.5 * constants.electron_mass * constants.speed_of_light**2 * p_ads * m_ads)
def _kirkwood_muller_dispersion_mat(p_mat, m_mat, p_ads, m_ads):
"""Calculate the dispersion constant for the material.
p and m stand for polarizability and magnetic susceptibility
"""
return (
6 * constants.electron_mass * constants.speed_of_light**2 * p_ads * p_mat /
(p_ads / m_ads + p_mat / m_mat)
)
def _N_over_RT(temp):
"""Calculate (N_a / RT)."""
return (constants.Avogadro / constants.gas_constant / temp)
|
pauliacomi/pyGAPS
|
src/pygaps/characterisation/psd_micro.py
|
Python
|
mit
| 47,092
|
[
"Avogadro"
] |
061a2a32f21252b4ea432b939570ccd6ad84d4dd1ffaeb1731d06bbc7afcfbc3
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Variational Inference."""
import jax
import copy
from jax import numpy as jnp
from bnn_hmc.utils import tree_utils
def inv_softplus(x):
return jnp.log(jnp.exp(x) - 1)
def get_mfvi_model_fn(net_fn, params, net_state, seed=0, sigma_init=0.):
"""Convert model, parameters and net state to use MFVI.
Convert the model to fit a Gaussian distribution to each of the weights
following the Mean Field Variational Inference (MFVI) procedure.
Args:
net_fn: neural network function.
params: parameters of the network; we intialize the mean in MFVI with
params.
net_state: state of the network.
seed: random seed; used for generating random samples when computing MFVI
predictions (default: 0).
sigma_init: initial value of the standard deviation of the per-prarameter
Gaussians.
"""
# net_fn(params, net_state, None, batch, is_training)
mean_params = jax.tree_map(lambda p: p.copy(), params)
sigma_isp = inv_softplus(sigma_init)
std_params = jax.tree_map(lambda p: jnp.ones_like(p) * sigma_isp, params)
mfvi_params = {"mean": mean_params, "inv_softplus_std": std_params}
mfvi_state = {
"net_state": copy.deepcopy(net_state),
"mfvi_key": jax.random.PRNGKey(seed)
}
def sample_parms_fn(params, state):
mean = params["mean"]
std = jax.tree_map(jax.nn.softplus, params["inv_softplus_std"])
noise, new_key = tree_utils.normal_like_tree(mean, state["mfvi_key"])
params_sampled = jax.tree_multimap(lambda m, s, n: m + n * s, mean, std,
noise)
new_mfvi_state = {
"net_state": copy.deepcopy(state["net_state"]),
"mfvi_key": new_key
}
return params_sampled, new_mfvi_state
def mfvi_apply_fn(params, state, _, batch, is_training):
params_sampled, new_mfvi_state = sample_parms_fn(params, state)
predictions, new_net_state = net_fn(params_sampled, state["net_state"],
None, batch, is_training)
new_mfvi_state = {
"net_state": copy.deepcopy(new_net_state),
"mfvi_key": new_mfvi_state["mfvi_key"]
}
return predictions, new_mfvi_state
def mfvi_apply_mean_fn(params, state, _, batch, is_training):
"""Predict with the variational mean."""
mean = params["mean"]
predictions, new_net_state = net_fn(mean, state["net_state"], None, batch,
is_training)
new_mfvi_state = {
"net_state": copy.deepcopy(new_net_state),
"mfvi_key": state["mfvi_key"]
}
return predictions, new_mfvi_state
return (mfvi_apply_fn, mfvi_apply_mean_fn, sample_parms_fn, mfvi_params,
mfvi_state)
def make_kl_with_gaussian_prior(weight_decay, temperature=1.):
"""Implements the prior KL term in the ELBO.
Args:
weight_decay: weight decay corresponding to the prior distribution.
temperature: temperature of the posterior, corresponds to the weight of the
KL term in the ELBO. Returns a function that takes the MFVI parameters
and returns the KL divergence between the posterior and the prior weighted
by the temperature.
"""
def kl_fn(params):
n_params = sum([p.size for p in jax.tree_leaves(params)])
sigma_prior = jnp.sqrt(1 / weight_decay)
mu_vi_tree = params["mean"]
sigma_vi_tree = jax.tree_map(jax.nn.softplus, params["inv_softplus_std"])
def get_parameter_kl(mu_vi, sigma_vi):
return (jnp.log(sigma_prior / sigma_vi) +
(sigma_vi**2 + mu_vi**2) / 2 / sigma_prior**2 - 1 / 2)
kl_tree = jax.tree_multimap(get_parameter_kl, mu_vi_tree, sigma_vi_tree)
kl = sum([p_kl.sum() for p_kl in jax.tree_leaves(kl_tree)])
return -kl * temperature
return kl_fn
|
google-research/google-research
|
bnn_hmc/core/vi.py
|
Python
|
apache-2.0
| 4,956
|
[
"Gaussian"
] |
245cd2e2494ffeeb5cf1a57e93481893f73f2a0cc21674cc383f5cfb662a1e05
|
""" Class that contains client access to the job monitoring handler. """
__RCSID__ = "$Id$"
from DIRAC.Core.Base.Client import Client
class JobMonitoringClient(Client):
def __init__(self, **kwargs):
super(JobMonitoringClient, self).__init__(**kwargs)
self.setServer('WorkloadManagement/JobMonitoring')
def traceJobParameters(self, site, localID, parameterList=None, attributeList=None, date=None, until=None):
return self._getRPC().traceJobParameters(site, localID, parameterList, attributeList, date, until)
def traceJobParameter(self, site, localID, parameter, date=None, until=None):
return self._getRPC().traceJobParameter(site, localID, parameter, date, until)
|
andresailer/DIRAC
|
WorkloadManagementSystem/Client/JobMonitoringClient.py
|
Python
|
gpl-3.0
| 696
|
[
"DIRAC"
] |
1567e332e130d1830862f4fc3af4a36a1266461df301de2965c4eaadf3f6d705
|
from toee import *
from utilities import *
from toee import anyone
from py00439script_daemon import *
import _include
from co8Util.TimedEvent import *
from combat_standard_routines import *
from py00439script_daemon import get_f, set_f, get_v, set_v, tpsts, record_time_stamp
def san_dialog( attachee, triggerer ):
if game.global_vars[923] == 0:
tempp = 0
for p in range(0, 12):
tempp += game.random_range(0, 8)
tempp -= 24
if tempp < 5:
tempp = 5
game.global_vars[923] = tempp
elif tpsts('s_ranths_bandits_1', 0) == 0:
record_time_stamp('s_ranths_bandits_1')
attachee.turn_towards(triggerer)
if (game.quests[78].state == qs_completed and game.quests[107].state == qs_unknown and game.quests[112].state == qs_mentioned):
triggerer.begin_dialog( attachee, 430 )
if (game.quests[74].state == qs_completed and game.quests[78].state == qs_unknown and game.quests[111].state == qs_mentioned):
triggerer.begin_dialog( attachee, 450 )
elif (game.global_vars[993] == 7):
triggerer.begin_dialog( attachee, 630 )
elif (game.global_vars[993] == 9):
triggerer.begin_dialog( attachee, 710 )
elif (attachee.map == 5156):
triggerer.begin_dialog( attachee, 910 )
else:
triggerer.begin_dialog( attachee, 1 )
return SKIP_DEFAULT
def san_first_heartbeat( attachee, triggerer ):
if (game.global_flags[992] == 1):
attachee.object_flag_set(OF_OFF)
elif (attachee.map == 5156 and game.global_vars[704] == 3 and is_daytime() == 1 and game.quests[76].state != qs_accepted):
attachee.object_flag_unset(OF_OFF)
elif (attachee.map == 5170 and game.global_vars[979] == 2):
if (is_daytime() == 1):
attachee.object_flag_unset(OF_OFF)
elif (is_daytime() == 0):
attachee.object_flag_set(OF_OFF)
elif (attachee.map == 5135 and game.global_vars[979] == 2):
if (is_daytime() == 1):
attachee.object_flag_set(OF_OFF)
elif (is_daytime() == 0):
attachee.object_flag_unset(OF_OFF)
return RUN_DEFAULT
def san_dying( attachee, triggerer ):
if should_modify_CR( attachee ):
modify_CR( attachee, get_av_level() )
for pc in game.party:
pc.condition_add('fallen_paladin')
if (attachee.map == 5170 or attachee.map == 5135):
game.global_flags[992] = 1
game.global_flags[935] = 1
game.party[0].reputation_add( 44 )
elif (attachee.map == 5156):
if (game.global_flags[940] == 1):
game.global_flags[935] = 1
game.party[0].reputation_add( 44 )
game.global_flags[992] = 1
return RUN_DEFAULT
def san_enter_combat( attachee, triggerer ):
if (attachee.name == 8703):
if (attachee.map == 5156):
attachee.float_line( 5000,triggerer )
if (attachee.map == 5170):
samson = game.obj_create(14660, location_from_axis (501L, 484L))
samson.turn_towards(triggerer)
samson.attack(game.party[0])
goliath = game.obj_create(14661, location_from_axis (498L, 484L))
goliath.turn_towards(triggerer)
goliath.attack(game.party[0])
bathsheba = game.obj_create(14659, location_from_axis (495L, 484L))
bathsheba.turn_towards(triggerer)
bathsheba.float_line(1000,triggerer)
bathsheba.attack(game.party[0])
if (attachee.map == 5135 and attachee.name == 8703):
samson = game.obj_create(14660, location_from_axis (494L, 488L))
samson.turn_towards(triggerer)
samson.attack(game.party[0])
goliath = game.obj_create(14661, location_from_axis (494L, 491L))
goliath.turn_towards(triggerer)
goliath.attack(game.party[0])
bathsheba = game.obj_create(14659, location_from_axis (481L, 496L))
bathsheba.turn_towards(triggerer)
bathsheba.float_line(1000,triggerer)
bathsheba.attack(game.party[0])
ProtectTheInnocent(attachee, triggerer)
return RUN_DEFAULT
def san_start_combat( attachee, triggerer ):
game.counters[0] = game.counters[0] + 1
if (game.counters[0] == 1):
attachee.float_line(1000,triggerer)
return SKIP_DEFAULT
elif (game.counters[0] == 2):
overseers_show_up( attachee, triggerer )
game.global_vars[704] = 4
elif (game.counters[0] == 3):
attachee.float_line(2000,triggerer)
return SKIP_DEFAULT
elif (game.counters[0] == 4):
guards_show_up( attachee, triggerer )
game.global_vars[704] = 5
elif (game.counters[0] == 5):
attachee.float_line(4000,triggerer)
return SKIP_DEFAULT
elif (game.counters[0] == 6):
guardian_show_up( attachee, triggerer )
game.global_vars[704] = 6
elif (game.counters[0] == 7):
attachee.float_line(3000,triggerer)
return SKIP_DEFAULT
elif (game.counters[0] == 8):
mages_show_up( attachee, triggerer )
game.global_vars[704] = 7
elif (game.counters[0] == 9):
game.global_vars[704] = 8
return RUN_DEFAULT
def san_will_kos( attachee, triggerer ):
if (game.party[0].reputation_has(34) == 1):
return RUN_DEFAULT
elif (game.global_flags[992] == 0):
return SKIP_DEFAULT
return RUN_DEFAULT
def distribute_verbobonc_uniform(npc,pc):
for obj in pc.group_list():
create_item_in_inventory( 6498, obj )
create_item_in_inventory( 6269, obj )
return RUN_DEFAULT
def overseers_show_up( attachee, triggerer ):
samson = game.obj_create(14660, location_from_axis (482L, 494L))
samson.turn_towards(triggerer)
samson.float_line(1000,triggerer)
goliath = game.obj_create(14661, location_from_axis (484L, 495L))
goliath.turn_towards(triggerer)
samson.attack(game.party[0])
goliath.attack(game.party[0])
return RUN_DEFAULT
def guards_show_up( attachee, triggerer ):
guard1 = game.obj_create(14644, location_from_axis (481L, 493L))
guard1.turn_towards(triggerer)
guard1.float_line(1000,triggerer)
guard2 = game.obj_create(14644, location_from_axis (483L, 495L))
guard2.turn_towards(triggerer)
guard3 = game.obj_create(14644, location_from_axis (479L, 493L))
guard3.turn_towards(triggerer)
guard4 = game.obj_create(14644, location_from_axis (481L, 495L))
guard4.turn_towards(triggerer)
guard1.attack(game.party[0])
guard2.attack(game.party[0])
guard3.attack(game.party[0])
guard4.attack(game.party[0])
return RUN_DEFAULT
def guardian_show_up( attachee, triggerer ):
bathsheba = game.obj_create(14659, location_from_axis (484L, 494L))
bathsheba.turn_towards(triggerer)
bathsheba.float_line(2000,triggerer)
bathsheba.attack(game.party[0])
return RUN_DEFAULT
def mages_show_up( attachee, triggerer ):
mage1 = game.obj_create(14658, attachee.location-4)
game.particles( "sp-Teleport", mage1 )
mage1.turn_towards(triggerer)
mage1.float_line(1000,triggerer)
mage2 = game.obj_create(14658, attachee.location-4)
game.particles( "sp-Teleport", mage2 )
mage2.turn_towards(triggerer)
game.sound( 4035, 1 )
for obj in game.obj_list_vicinity(mage1.location,OLC_PC):
mage1.attack(obj)
for obj in game.obj_list_vicinity(mage2.location,OLC_PC):
mage2.attack(obj)
return RUN_DEFAULT
def ditch_captains( attachee, triggerer ):
abiram = find_npc_near(attachee,8706)
abiram.runoff(attachee.location-3)
absalom = find_npc_near(attachee,8707)
absalom.runoff(attachee.location-3)
achan = find_npc_near(attachee,8708)
achan.runoff(attachee.location-3)
return
def switch_to_captain( attachee, triggerer, line):
abiram = find_npc_near(attachee,8706)
absalom = find_npc_near(attachee,8707)
achan = find_npc_near(attachee,8708)
if (abiram != OBJ_HANDLE_NULL):
triggerer.begin_dialog(abiram, line)
if (absalom != OBJ_HANDLE_NULL):
triggerer.begin_dialog(absalom, line)
if (achan != OBJ_HANDLE_NULL):
triggerer.begin_dialog(achan, line)
return SKIP_DEFAULT
def schedule_bandits_1( attachee, triggerer ):
tempp = game.global_vars[923]
if game.global_vars[923] == 0:
for p in range(0, 12):
tempp += game.random_range(0, 8)
tempp -= 24
if tempp < 5:
tempp = 5
# approximate a gaussian distribution by adding together 12 uniformly distributed random variables
# average result will be 24 days, standard deviation will be 8 days
# it is then truncated at 5 days minimum (feel free to change) (roughly 1% of results might reach 5 or lower otherwise, even negative is possible though rare)
game.global_vars[923] = tempp
game.timevent_add( set_bandits, (), tempp * 24 * 60 * 60 * 1000 )
record_time_stamp('s_ranths_bandits_1')
return RUN_DEFAULT
def set_bandits():
game.encounter_queue.append(3434)
set_f('s_ranths_bandits_scheduled')
return RUN_DEFAULT
def slavers_movie_setup( attachee, triggerer ):
set_slavers_slides()
return
def set_slavers_slides():
game.moviequeue_add(601)
game.moviequeue_add(602)
game.moviequeue_add(603)
game.moviequeue_add(604)
game.moviequeue_add(605)
game.moviequeue_add(606)
game.moviequeue_add(607)
game.moviequeue_add(608)
game.moviequeue_add(609)
game.moviequeue_play()
return RUN_DEFAULT
|
GrognardsFromHell/TemplePlus
|
tpdatasrc/co8fixes/scr/py00338Viscount.py
|
Python
|
mit
| 8,561
|
[
"Gaussian"
] |
4029d1989842ae3001fc6bcfb5d959b2f8274d926b9768f985d403299962dc4c
|
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import datetime
from .TextAnnotationSource import TextAnnotationSource
class TimeAnnotationSource(TextAnnotationSource):
"""
Source for creating time stamps.
"""
@staticmethod
def getOptions():
"""
Return default options for this object.
"""
opt = TextAnnotationSource.getOptions()
opt.add('time', 330667320, "The time to display, in seconds.", vtype=float)
opt.add('prefix', 'Time:', "The text to display prior to the time string.")
opt.add('suffix', None, "The text to display after the time string.", vtype=str)
opt.add('timedelta', False, "Format the time using the python datetime.timedelta")
opt.setDefault('position', [0.01, 0.01])
opt.pop('text')
return opt
def update(self, **kwargs):
"""
Converts timestamp to a text string for display. (override)
"""
super(TimeAnnotationSource, self).update(**kwargs)
# The time to display
time = self.getOption('time')
# Build the text string
text = []
if self.isOptionValid('prefix'):
text.append(self.getOption('prefix'))
if self.isOptionValid('timedelta') and self.getOption('timedelta'):
t = datetime.timedelta(seconds=time)
text.append(str(t))
else:
text.append(str(time))
if self.isOptionValid('suffix'):
text.append(self.getOption('suffix'))
self._vtkmapper.GetTextProperty().Modified()
self._vtkmapper.SetInput(' '.join(text))
|
harterj/moose
|
python/chigger/annotations/TimeAnnotationSource.py
|
Python
|
lgpl-2.1
| 1,918
|
[
"MOOSE"
] |
5c0604b68f364ae29e6955e5b1d67198421b12f1edd53670c8eefe3a3bc4ba6a
|
""" This is the Proxy storage element client """
__RCSID__ = "$Id$"
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Resources.Storage.Utilities import checkArgumentFormat
from DIRAC.Resources.Storage.StorageBase import StorageBase
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.Core.DISET.TransferClient import TransferClient
from DIRAC.Core.Utilities.File import getSize
import os
class ProxyStorage( StorageBase ):
def __init__( self, storageName, parameters ):
StorageBase.__init__( self, storageName, parameters )
self.pluginName = 'Proxy'
self.isok = True
self.url = PathFinder.getServiceURL( "DataManagement/StorageElementProxy" )
if not self.url:
self.isok = False
######################################
# File transfer functionalities
######################################
def getFile( self, path, localPath = False ):
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
successful = {}
client = RPCClient( self.url )
transferClient = TransferClient( self.url )
for src_url in urls.keys():
res = client.prepareFile( self.name, src_url )
if not res['OK']:
gLogger.error( "ProxyStorage.getFile: Failed to prepare file on remote server.", res['Message'] )
failed[src_url] = res['Message']
else:
fileName = os.path.basename( src_url )
if localPath:
dest_file = "%s/%s" % ( localPath, fileName )
else:
dest_file = "%s/%s" % ( os.getcwd(), fileName )
res = transferClient.receiveFile( dest_file, 'getFile/%s' % fileName )
if not res['OK']:
gLogger.error( "ProxyStorage.getFile: Failed to recieve file from proxy server.", res['Message'] )
failed[src_url] = res['Message']
elif not os.path.exists( dest_file ):
errStr = "ProxyStorage.getFile: The destination local file does not exist."
gLogger.error( errStr, dest_file )
failed[src_url] = errStr
else:
destSize = getSize( dest_file )
if destSize == -1:
errStr = "ProxyStorage.getFile: Failed to get the local file size."
gLogger.error( errStr, dest_file )
failed[src_url] = errStr
else:
successful[src_url] = destSize
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def putFile( self, path, sourceSize = 0 ):
client = RPCClient( self.url )
if sourceSize:
gLogger.debug( "ProxyStorage.putFile: The client has provided the source file size implying a replication is requested." )
return client.callProxyMethod( self.name, 'putFile', [path], {'sourceSize':sourceSize} )
gLogger.debug( "ProxyStorage.putFile: No source size was provided therefore a simple put will be performed." )
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
successful = {}
client = RPCClient( self.url )
transferClient = TransferClient( self.url )
for dest_url, src_file in urls.items():
fileName = os.path.basename( dest_url )
res = transferClient.sendFile( src_file, 'putFile/%s' % fileName )
if not res['OK']:
gLogger.error( "ProxyStorage.putFile: Failed to send file to proxy server.", res['Message'] )
failed[dest_url] = res['Message']
else:
res = client.uploadFile( self.name, dest_url )
if not res['OK']:
gLogger.error( "ProxyStorage.putFile: Failed to upload file to storage element from proxy server.", res['Message'] )
failed[dest_url] = res['Message']
else:
res = self.__executeOperation( dest_url, 'getFileSize' )
if not res['OK']:
gLogger.error( "ProxyStorage.putFile: Failed to determine destination file size.", res['Message'] )
failed[dest_url] = res['Message']
else:
successful[dest_url] = res['Value']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
######################################
# File manipulation functionalities
######################################
def exists( self, path ):
client = RPCClient( self.url )
return client.callProxyMethod( self.name, 'exists', [path], {} )
def isFile( self, path ):
client = RPCClient( self.url )
return client.callProxyMethod( self.name, 'isFile', [path], {} )
def getFileSize( self, path ):
client = RPCClient( self.url )
return client.callProxyMethod( self.name, 'getFileSize', [path], {} )
def getFileMetadata( self, path ):
client = RPCClient( self.url )
return client.callProxyMethod( self.name, 'getFileMetadata', [path], {} )
def removeFile( self, path ):
client = RPCClient( self.url )
return client.callProxyMethod( self.name, 'removeFile', [path], {} )
def prestageFile( self, path ):
client = RPCClient( self.url )
return client.callProxyMethod( self.name, 'prestageFile', [path], {} )
def prestageFileStatus( self, path ):
client = RPCClient( self.url )
return client.callProxyMethod( self.name, 'prestageFileStatus', [path], {} )
def pinFile( self, path, lifetime = 60 * 60 * 24 ):
client = RPCClient( self.url )
return client.callProxyMethod( self.name, 'pinFile', [path], {'lifetime':lifetime} )
def releaseFile( self, path ):
client = RPCClient( self.url )
return client.callProxyMethod( self.name, 'releaseFile', [path], {} )
######################################
# Directory manipulation functionalities
######################################
def isDirectory( self, path ):
client = RPCClient( self.url )
return client.callProxyMethod( self.name, 'isDirectory', [path], {} )
def getDirectoryMetadata( self, path ):
client = RPCClient( self.url )
return client.callProxyMethod( self.name, 'getDirectoryMetadata', [path], {} )
def getDirectorySize( self, path ):
client = RPCClient( self.url )
return client.callProxyMethod( self.name, 'getDirectorySize', [path], {} )
def listDirectory( self, path ):
client = RPCClient( self.url )
return client.callProxyMethod( self.name, 'listDirectory', [path], {} )
def createDirectory( self, path ):
client = RPCClient( self.url )
return client.callProxyMethod( self.name, 'createDirectory', [path], {} )
def removeDirectory( self, path, recursive = False ):
client = RPCClient( self.url )
return client.callProxyMethod( self.name, 'removeDirectory', [path], {'recursive':recursive} )
def getDirectory( self, path ):
return S_ERROR( "Not supported" )
def putDirectory( self, path ):
return S_ERROR( "Not supported" )
def __executeOperation( self, url, method ):
""" Executes the requested functionality with the supplied url
"""
fcn = None
if hasattr( self, method ) and callable( getattr( self, method ) ):
fcn = getattr( self, method )
if not fcn:
return S_ERROR( "Unable to invoke %s, it isn't a member function of ProxyStorage" % method )
res = fcn( [url] )
if not res['OK']:
return res
elif url not in res['Value']['Successful']:
return S_ERROR( res['Value']['Failed'][url] )
return S_OK( res['Value']['Successful'][url] )
|
vmendez/DIRAC
|
Resources/Storage/ProxyStorage.py
|
Python
|
gpl-3.0
| 7,482
|
[
"DIRAC"
] |
c296ab258f92e9ffd5ed8b1456b3cfe2265961733175f2ca8d4b9a7000169d11
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("trading.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
|
volkandkaya/trading
|
config/urls.py
|
Python
|
bsd-3-clause
| 1,229
|
[
"VisIt"
] |
5c3fc65f2030a4f5b582aac1e5b41c8bcd5eab4830648e4a926211bc36a72505
|
#
# io - Data input and output
#
from info import __doc__
from numpy import deprecate_with_doc
# These are all deprecated (until the end deprecated tag)
from npfile import npfile
from data_store import save, load, create_module, create_shelf
from array_import import read_array, write_array
from pickler import objload, objsave
from numpyio import packbits, unpackbits, bswap, fread, fwrite, \
convert_objectarray
fread = deprecate_with_doc("""
scipy.io.fread is can be replaced with NumPy I/O routines such as
np.load, np.fromfile as well as NumPy's memory-mapping capabilities.
""")(fread)
fwrite = deprecate_with_doc("""
scipy.io.fwrite can be replaced with NumPy I/O routines such as np.save,
np.savez and x.tofile. Also, files can be directly memory-mapped into NumPy
arrays which is often a better way of reading large files.
""")(fwrite)
bswap = deprecate_with_doc("""
scipy.io.bswap can be replaced with the byteswap method on an array.
out = scipy.io.bswap(arr) --> out = arr.byteswap(True)
""")(bswap)
packbits = deprecate_with_doc("""
The functionality of scipy.io.packbits is now available as numpy.packbits
The calling convention is a bit different, as the 2-d case is no
longer specialized.
However, you can simulate scipy.packbits by raveling the last 2 dimensions
of the array and calling numpy.packbits with an axis=-1 keyword:
def scipy_packbits(inp):
a = np.asarray(inp)
if a.ndim < 2:
return np.packbits(a)
oldshape = a.shape
newshape = oldshape[:-2] + (oldshape[-2]*oldshape[-1],)
a = np.reshape(a, newshape)
return np.packbits(a, axis=-1).ravel()
""")(packbits)
unpackbits = deprecate_with_doc("""
The functionality of scipy.io.unpackbits is now available in numpy.unpackbits
The calling convention is different, however, as the 2-d case is no longer
specialized.
Thus, the scipy.unpackbits behavior must be simulated using numpy.unpackbits.
def scipy_unpackbits(inp, els_per_slice, out_type=None):
inp = np.asarray(inp)
num4els = ((els_per_slice-1) >> 3) + 1
inp = np.reshape(inp, (-1,num4els))
res = np.unpackbits(inp, axis=-1)[:,:els_per_slice]
return res.ravel()
""")(unpackbits)
convert_objectarray = deprecate_with_doc("""
The same functionality can be obtained using NumPy string arrays and the
.astype method (except for the optional missing value feature).
""")(convert_objectarray)
# end deprecated
# matfile read and write
from matlab.mio import loadmat, savemat
# netCDF file support
from netcdf import netcdf_file, netcdf_variable
from recaster import sctype_attributes, Recaster
import matlab.byteordercodes as byteordercodes
from data_store import save_as_module
from mmio import mminfo, mmread, mmwrite
__all__ = filter(lambda s:not s.startswith('_'),dir())
from numpy.testing import Tester
test = Tester().test
|
stefanv/scipy3
|
scipy/io/__init__.py
|
Python
|
bsd-3-clause
| 2,826
|
[
"NetCDF"
] |
858a66b9b2a7a1fc27c269344077c91eb30220e9dc099e5e29712795a5233165
|
#!/usr/bin/env python3
"""
Copyright 2020 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import pymysql
import dbShared
#
def getItemName(sqlStr):
result = ""
conn = dbShared.ghConn()
cursor = conn.cursor()
if (cursor):
cursor.execute(sqlStr)
row = cursor.fetchone()
if (row != None):
result = row[1]
cursor.close()
conn.close()
return result
def getPlanetName(planetid):
nameStr = getItemName('SELECT planetID, planetName FROM tPlanet WHERE planetID='+dbShared.dbInsertSafe(str(planetid))+';')
return nameStr
def getSpawnName(spawnid):
nameStr = getItemName('SELECT spawnID, spawnName FROM tResources WHERE spawnID='+dbShared.dbInsertSafe(str(spawnid))+';')
return nameStr
def getResourceTypeName(typeid):
nameStr = getItemName('SELECT resourceType, resourceTypeName FROM tResourceType WHERE resourceType="'+dbShared.dbInsertSafe(typeid)+'";')
return nameStr
def getResourceGroupName(groupid):
nameStr = getItemName('SELECT resourceGroup, groupName FROM tResourceGroup WHERE resourceGroup="'+dbShared.dbInsertSafe(groupid)+'";')
return nameStr
def getGalaxyName(galaxyid):
nameStr = getItemName('SELECT galaxyID, galaxyName FROM tGalaxy WHERE galaxyID="'+dbShared.dbInsertSafe(galaxyid)+'";')
return nameStr
def getStatName(stat):
if (stat == 'CR'):
return 'Cold Resist'
elif (stat == 'CD'):
return 'Conductivity'
elif (stat == 'DR'):
return 'Decay Resist'
elif (stat == 'FL'):
return 'Flavor'
elif (stat == 'HR'):
return 'Heat Resist'
elif (stat == 'MA'):
return 'Malleability'
elif (stat == 'PE'):
return 'Potential Energy'
elif (stat == 'OQ'):
return 'Overall Quality'
elif (stat == 'SR'):
return 'Shock Resist'
elif (stat == 'UT'):
return 'Unit Toughness'
elif (stat == 'ER'):
return 'Entangle Resist'
else:
return stat
|
pwillworth/galaxyharvester
|
html/ghNames.py
|
Python
|
gpl-3.0
| 2,565
|
[
"Galaxy"
] |
785ebdbccf7869ca1a52f1e8922ce6199f10ab8d04e2c683b9c521a5d1792f69
|
# -*- coding: utf-8 -*-
'''====
EZ-Fit
====
Provides an easy to use wrapper to fit common functions to a data set using the
Levenberg–Marquardt algorithm provided by mpfit. A full description of the
supported functions and how to use the wrapper is given in easyfit.fit
-------------------------------------------------------------------------------
Copyright (C) 2015 - Bjorn J. Scholz
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details: http://www.gnu.org/licenses.
-------------------------------------------------------------------------------
'''
import numpy as np
from mpfit import mpfit
import warnings
def __version__():
print 'Easy-Fit 0.2'
return
def const(x,p):
'''Parameter: constant\n
Return
------
>>> p[0]
'''
return [p[0]]*len(x)
def line(x,p):
'''Parameter: Slope, Intercept\n
Return
------
>>> p[0]*x + p[1]
'''
return p[0]*x + p[1]
def line0(x,p):
'''Parameter: Slope\n
Return
------
>>> p[0]*x
'''
return p[0]*x
def sine(x,p):
'''Parameter: Scale, Wavelength, Phase, Offset\n
Return
------
>>> p[0]*np.sin(2*np.pi*x/p[1]+p[2])+p[3]
'''
return p[0]*np.sin(2*np.pi*x/p[1]+p[2])+p[3]
def fermi(x,p):
'''Parameter: Scale, Edge Position, Width, Offset\n
Return
------
>>> p[0]/(np.exp((x-p[1])/p[2])+1)+p[3]
'''
return p[0]/(np.exp((x-p[1])/p[2])+1)+p[3]
def gauss(x,p):
'''Parameter: Scale, Mean, Std, Offset\n
Return
------
>>> p[0]*np.exp(-0.5*(x-p[1])**2/p[2]**2)+p[3]
'''
return p[0]*np.exp(-0.5*(x-p[1])**2/p[2]**2)+p[3]
def exp(x,p):
'''Parameter: Scale, Decay Time, Offset\n
Return
------
>>> p[0]*np.exp(-x*p[1])+p[2]
'''
return p[0]*np.exp(-x*p[1])+p[2]
def poly(x,p,n):
'''Parameter: Scale of each power from [0..n]\n
Return
------
>>> Sum[n=0,n=N] p[n]*x**n
'''
y = 0
for i in range(n+1):
y+=np.power(x,i)*p[i]
return y
def ipoly(x,p,n):
'''Parameter: Scale of each power from [0..n]\n
Return
------
>>> Sum[n=0,n=N] p[n]*x**-n
'''
y = 0
for i in range(n+1):
y+=np.power(x,-i)*p[i]
return y
def plaw(x,p):
'''Parameter: Scale, Exponent
Return
------
>>> p[0]*x**p[1]
'''
return p[0]*x**p[1]
def lognorm(x,p):
'''Parameter: Scale, 'Mean', 'Std', Offset\n
Return
------
>>> p[0]/x*np.exp(-0.5*(np.log(x)-p[1])**2/p[2]**2)+p[3]
'''
return p[0]/x*np.exp(-0.5*(np.log(x)-p[1])**2/p[2]**2)+p[3]
def fit(typ='line',x='None', y='None', yerr='None',p0='None'):
'''
Takes the data and performs a least square fit of the specified type.
Parameters
----------
typ : string
Predefined function that will be fitted to the data. You can find a
list of all supported functions below.
x : array_like or None
X data. If None is given a fit will be performed, yet it is based on
an internally created x data set that runs from [0,N] where N is the
number of y data points provided. Thus all parameters that are not
independent of your choice of x, e.g. slope, are not to be trusted!
If you are only interested in parameters that are independent of x such
as the heigth of a gaussian you'll probably get away without providing
an adequate set of x data.
y : array_like
y data. You have to provide a y array. Otherwise there is nothing to
fit.
yerr : array_like or None
Error in y direction. If None is given the fit will assume a uniform
weight of 1.
p0 : array_like or None
Initial guess of fit parameters. If p0 is None all parameters are
initalized to one or zero depending on the meaning of the individual
parameter.
Returns
-------
x2 : float
Reducd chi-square.
pars : array_like
Fit parameters returned by mpfit. The meaning of the subarrays are:\n
pars[0]\tBest fit parameters\n
pars[1]\tFit errors\n
pars[2]\tProperly scaled errors\n
Note that it is assumed that the chi-squared returned is sufficiently
good to justify the scaling of the fit erros. It is pars[2] = pars[1]*
sqrt(x2)
xfit,yfit : array_like
x and y data that can directly be used within matplotlib or another
comparable plotting library to display the fit.
Available functions/fits
------------------------
const
Constant
>>> p[0]
line
Straight line, parameters: Slope and intercept\n
>>> p[0]*x + p[1]
line0
Straight line with designated zero crossing, parameters: Slope\n
>>> p[0]*x
sine
Sine, parameters: Scaling, Wavelength, Phase, Offset\n
>>> p[0]*sin(2*Pi*x/p[1]+p[2])+p[3]
fermi
Fermifunction, parameters: Scaling, Edge Position, Width, Offset\n
>>> p[0]/(exp((x-p[1])/p[2])+1)+p[3]
gauss
Gaussian, parameters: Scaling, Mean, Std, Offset\n
>>> p[0]*exp(-0.5*(x-p[1])**2/p[2]**2)+p[3]
exp
Exponential, parameters: Scaling, Inverse Decaytime, Offset\n
>>> p[0]*exp(-x*p[1])+p[2]
plaw
Power law, parameters: Scaling, Power\n
>>> p[0]*x**p[1]
polyN
Polynomial of order N. Usage: poly3, poly5, poly10, etc. Parameters:
Scalings\n
>>> Sum[n=0,n=N] p[n]*x**n
ipolyN
Inverse polynomial of order N. Usage: ipoly3, poly5, poly10 etc.
Parameters: Scalings\n
>>> Sum[n=0,n=N] p[n]*x**-n
lognorm
Lognormal distribution, Parameters: Scale, 'Mean', 'Std', Offset
The mean is E(X) = exp(μ + 1/2 σ^2), the median is med(X) = exp(μ), and the variance Var(X) = exp(2*μ + σ^2)*(exp(σ^2) - 1) and hence the coefficient of variation is sqrt(exp(σ^2) - 1)
>>> p[0]/x*np.exp(-0.5*(np.log(x)-p[1])**2/p[2]**2)+p[3]
Example
-------
The following code snippet explains the use of the easyfit wrapper
>>> import matplotlib.pylab as plt
>>> import numpy as np
>>> import easyfit as ef
>>>
>>> x = np.linspace(0,100,30)
>>> y = 0.05*x + 2*(np.random.rand(30)-0.5)
>>>
>>> p0 = [1]
>>> x2, pars, xfit, yfit = ef.fit('line0',x,y,None,p0)
>>>
>>> plt.scatter(x,y)
>>> plt.plot(xfit,yfit)
>>> plt.show()
'''
#=========================================================================#
# Filter Future Warning From Numpy
#=========================================================================#
# warnings.filterwarnings("ignore",category=FutureWarning)
#=========================================================================#
# Set default arrays
#=========================================================================#
n=0
if 'ipoly' in typ:
n = int(typ[5:])
typ = 'ipoly'
elif 'poly' in typ:
n = int(typ[4:])
typ = 'poly'
if x is 'None': x = np.arange(len(y))
if yerr is 'None': yerr = np.ones(len(y))
elif yerr is 'Poisson':
_ty = np.copy(y)
_ty[_ty <= 0] = 1
yerr = np.sqrt(_ty)
if p0 is 'None':
if typ == 'const': p0 = [0]
elif typ == 'line': p0 = [1,0]
elif typ == 'line0': p0 = [1]
elif typ == 'sine': p0 = [1,1,0,0]
elif typ == 'fermi': p0 = [1,1,1,0]
elif typ == 'gauss': p0 = [1,0,1,0]
elif typ == 'lognorm': p0 = [1,0,1,0]
elif typ == 'exp': p0 = [1,1,0]
elif typ == 'plaw': p0 = [1,1,0]
elif typ == 'poly' or typ == 'ipoly':
p0 = [1]*(n+1)
#=========================================================================#
# Ensure that all given arrays are numpy arrays
#=========================================================================#
x = np.array(x)
y = np.array(y)
yerr = np.array(yerr)
p0 = np.array(p0)
#=========================================================================#
# Setup proper fit function
#=========================================================================#
models = {'const': const,
'line': line,
'line0': line0,
'sine': sine,
'fermi': fermi,
'gauss': gauss,
'exp': exp,
'plaw': plaw,
'lognorm': lognorm,
'poly': lambda x,p: poly(x,p,n),
'ipoly': lambda x,p: ipoly(x,p,n)}
def fitfunc(p, fjac=None, x=None, y=None, err=None):
model = models[typ](x,p)
status = 0
return [status, (y-model)/err]
#=========================================================================#
# Initialize fit info dictionary and try to fit function to data
#=========================================================================#
parbase = {'value':0,'fixed':0,'limited':[0,0],'limits':[0.,0.]}
parinfo = [{k:v for k,v in parbase.items()} for _ti in range(len(p0))]
for i in range(len(p0)):
parinfo[i]['value'] = p0[i]
fa = {'x': x, 'y': y, 'err': yerr}
m = mpfit(fitfunc, p0, parinfo=parinfo, functkw=fa,quiet=1)
dof = len(x) - len(m.params)
pcerror = m.perror * np.sqrt(m.fnorm / dof)
par = [m.params,m.perror,pcerror]
if(m.status <=0):
print 'status = ', m.status
#=========================================================================#
# Calculate goodness of fit and an easy to plot fitted data set
#=========================================================================#
x2 = m.fnorm/dof
xfit = np.linspace(np.min(x),np.max(x),1000)
yfit = models[typ](xfit,par[0])
return x2,par,xfit,yfit
def arbFit(fct=line,x='None', y='None', yerr='None',p0='None',limits='None'):
'''
Takes the data and performs a least square fit of the specified type.
Parameters
----------
fct : function
User defined function that will be fitted to the data. Has to obey the
following convention for its arguments: F(x,p)
x : array_like or None
X data. If None is given a fit will be performed, yet it is based on
an internally created x data set that runs from [0,N] where N is the
number of y data points provided. Thus all parameters that are not
independent of your choice of x, e.g. slope, are not to be trusted!
If you are only interested in parameters that are independent of x such
as the heigth of a gaussian you'll probably get away without providing
an adequate set of x data.
y : array_like
y data. You have to provide a y array. Otherwise there is nothing to
fit.
yerr : array_like or None
Error in y direction. If None is given the fit will assume a uniform
weight of 1.
p0 : array_like or None
Initial guess of fit parameters. If p0 is None all parameters are
initalized to one or zero depending on the meaning of the individual
parameter.
Returns
-------
x2 : float
Reducd chi-square.
pars : array_like
Fit parameters returned by mpfit. The meaning of the subarrays are:\n
pars[0]\tBest fit parameters\n
pars[1]\tFit errors\n
pars[2]\tProperly scaled errors\n
Note that it is assumed that the chi-squared returned is sufficiently
good to justify the scaling of the fit erros. It is pars[2] = pars[1]*
sqrt(x2)
xfit,yfit : array_like
x and y data that can directly be used within matplotlib or another
comparable plotting library to display the fit.
Example
-------
The following code snippet explains the use of the easyfit wrapper
>>> import matplotlib.pylab as plt
>>> import numpy as np
>>> import easyfit as ef
>>>
>>> def userFct(x,p):
>>> return p[0]*x**2 + np.exp(-p[1]*x)
>>>
>>> x = np.linspace(0,100,30)
>>> y = userFct(x,[-0.5,0.25]) + 100*(2*np.random.rand(30)-1)
>>>
>>> p0 = [1,0]
>>> x2, pars, xfit, yfit = ef.arbFit(userFct,x,y,None,p0)
>>>
>>> plt.scatter(x,y)
>>> plt.plot(xfit,yfit)
>>> plt.show()
'''
#=========================================================================#
# Filter Future Warning From Numpy
#=========================================================================#
# warnings.filterwarnings("ignore",category=FutureWarning)
#=========================================================================#
# Set default arrays
#=========================================================================#
if x is 'None': x = np.arange(len(y))
if yerr is 'None': yerr = np.ones(len(y))
elif yerr is 'Poisson':
_ty = np.copy(y)
_ty[_ty <= 0] = 1
yerr = np.sqrt(_ty)
if p0 is 'None':
p0 = np.ones(100)
#=========================================================================#
# Ensure that all given arrays are numpy arrays
#=========================================================================#
x = np.array(x)
y = np.array(y)
yerr = np.array(yerr)
p0 = np.array(p0)
#=========================================================================#
# Setup proper fit function
#=========================================================================#
def fitfunc(p, fjac=None, x=None, y=None, err=None):
model = fct(x,p)
status = 0
return [status, (y-model)/err]
#=========================================================================#
# Initialize fit info dictionary and try to fit function to data
#=========================================================================#
parbase = {'value':0,'fixed':0,'limited':[0,0],'limits':[0.,0.]}
parinfo = [{k:v for k,v in parbase.items()} for _ti in range(len(p0))]
for i in range(len(p0)):
parinfo[i]['value'] = p0[i]
if limits != 'None':
for i in range(len(limits)):
parinfo[int(limits[i][0])]['limited'] = limits[i][1:3]
parinfo[int(limits[i][0])]['limits'] = limits[i][3:]
fa = {'x': x, 'y': y, 'err': yerr}
m = mpfit(fitfunc, p0, parinfo=parinfo, functkw=fa,quiet=1)
dof = len(x) - len(m.params)
pcerror = m.perror * np.sqrt(m.fnorm / dof)
par = [m.params,m.perror,pcerror]
if(m.status <=0):
print 'status = ', m.status
#=========================================================================#
# Calculate goodness of fit and an easy to plot fitted data set
#=========================================================================#
x2 = m.fnorm/dof
xfit = np.linspace(np.min(x),np.max(x),1000)
yfit = fct(xfit,par[0])
return x2,par,xfit,yfit
|
Nablaquabla/ezfit
|
easyfit.py
|
Python
|
gpl-2.0
| 15,570
|
[
"Gaussian"
] |
febedfaf0fa714255b9f0182497ce7227ac6fe8393b7634899f07635652c640c
|
""" This is the main module that interprets DIRAC cfg format
"""
from __future__ import print_function
import types
import copy
import os
import re
import zipfile
__RCSID__ = "$Id$"
try:
from DIRAC.Core.Utilities import List, ThreadSafe
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
gCFGSynchro = ThreadSafe.Synchronizer( recursive = True )
except Exception:
#We're out of python, define required utilities
import threading
def S_ERROR( messageString = '' ):
return { 'OK' : False, 'Message' : str( messageString ) }
def S_OK( value = '' ):
return { 'OK' : True, 'Value' : value }
class ListDummy:
def fromChar( self, inputString, sepChar = "," ):
if not ( isinstance( inputString, basestring) and
isinstance( sepChar, basestring) and
sepChar ): # to prevent getting an empty String as argument
return None
return [ fieldString.strip() for fieldString in inputString.split( sepChar ) if len( fieldString.strip() ) > 0 ]
List = ListDummy()
class Synchronizer:
""" Class enapsulating a lock
allowing it to be used as a synchronizing
decorator making the call thread-safe"""
def __init__( self, lockName = "", recursive = False ):
self.lockName = lockName
if recursive:
self.lock = threading.RLock()
else:
self.lock = threading.Lock()
def __call__( self, funcToCall ):
def lockedFunc( *args, **kwargs ):
try:
if self.lockName:
print("LOCKING", self.lockName)
self.lock.acquire()
return funcToCall( *args, **kwargs )
finally:
if self.lockName:
print("UNLOCKING", self.lockName)
self.lock.release()
return lockedFunc
gCFGSynchro = Synchronizer( recursive = True )
#END OF OUT OF DIRAC
#START OF CFG MODULE
class CFG( object ):
def __init__( self ):
"""
Constructor
"""
self.__orderedList = []
self.__commentDict = {}
self.__dataDict = {}
self.reset()
@gCFGSynchro
def reset( self ):
"""
Empty the CFG
"""
self.__orderedList = []
self.__commentDict = {}
self.__dataDict = {}
@gCFGSynchro
def createNewSection( self, sectionName, comment = "", contents = False ):
"""
Create a new section
:type sectionName: string
:param sectionName: Name of the section
:type comment: string
:param comment: Comment for the section
:type contents: CFG
:param contents: Optional cfg with the contents of the section.
"""
if sectionName == "":
raise ValueError( "Creating a section with empty name! You shouldn't do that!" )
if sectionName.find( "/" ) > -1:
recDict = self.getRecursive( sectionName, -1 )
if not recDict:
return S_ERROR( "Parent section does not exist %s" % sectionName )
parentSection = recDict[ 'value' ]
if isinstance( parentSection, basestring ):
raise KeyError( "Entry %s doesn't seem to be a section" % recDict[ 'key' ] )
return parentSection.createNewSection( recDict[ 'levelsBelow' ], comment, contents )
self.__addEntry( sectionName, comment )
if sectionName not in self.__dataDict:
if not contents:
self.__dataDict[ sectionName ] = CFG()
else:
self.__dataDict[ sectionName ] = contents
else:
raise KeyError( "%s key already exists" % sectionName )
return self.__dataDict[ sectionName ]
def __overrideAndCloneSection( self, sectionName, oCFGToClone ):
"""
Replace the contents of a section
:type sectionName: string
:params sectionName: Name of the section
:type oCFGToClone: CFG
:param oCFGToClone: CFG with the contents of the section
"""
if sectionName not in self.listSections():
raise KeyError( "Section %s does not exist" % sectionName )
self.__dataDict[ sectionName ] = oCFGToClone.clone()
@gCFGSynchro
def setOption( self, optionName, value, comment = "" ):
"""
Create a new option.
:type optionName: string
:param optionName: Name of the option to create
:type value: string
:param value: Value of the option
:type comment: string
:param comment: Comment for the option
"""
if optionName == "":
raise ValueError( "Creating an option with empty name! You shouldn't do that!" )
if optionName.find( "/" ) > -1:
recDict = self.getRecursive( optionName, -1 )
if not recDict:
return S_ERROR( "Parent section does not exist %s" % optionName )
parentSection = recDict[ 'value' ]
if isinstance( parentSection, basestring ):
raise KeyError( "Entry %s doesn't seem to be a section" % recDict[ 'key' ] )
return parentSection.setOption( recDict[ 'levelsBelow' ], value, comment )
self.__addEntry( optionName, comment )
self.__dataDict[ optionName ] = str( value )
def __addEntry( self, entryName, comment ):
"""
Add an entry and set the comment
:type entryName: string
:param entryName: Name of the entry
:type comment: string
:param comment: Comment for the entry
"""
if not entryName in self.__orderedList:
self.__orderedList.append( entryName )
self.__commentDict[ entryName ] = comment
def existsKey( self, key ):
"""
Check if an option/section with that name exists
:type key: string
:param key: Name of the option/section to check
:return: Boolean with the result
"""
return key in self.__orderedList
def sortAlphabetically( self, ascending = True ):
"""
Order this cfg alphabetically
returns True if modified
"""
if not ascending:
return self.sortByKey( reverse = True )
return self.sortByKey()
def sortByKey( self, key = None , reverse = False ):
"""
Order this cfg by function refered in key, default is None
corresponds to alphabetic sort
returns True if modified
"""
unordered = list( self.__orderedList )
self.__orderedList.sort( key = key , reverse = reverse )
return unordered != self.__orderedList
@gCFGSynchro
def deleteKey( self, key ):
"""
Delete an option/section
:type key: string
:param key: Name of the option/section to delete
:return: Boolean with the result
"""
result = self.getRecursive( key, -1 )
if not result:
raise KeyError( "%s does not exist" % "/".join( List.fromChar( key, "/" )[:-1] ) )
cfg = result[ 'value' ]
end = result[ 'levelsBelow' ]
if end in cfg.__orderedList:
del cfg.__commentDict[ end ]
del cfg.__dataDict[ end ]
cfg.__orderedList.remove( end )
return True
return False
@gCFGSynchro
def copyKey( self, oldName, newName ):
"""
Copy an option/section
:type oldName: string
:param oldName: Name of the option / section to copy
:type newName: string
:param newName: Destination name
:return: Boolean with the result
"""
if oldName == newName:
return True
result = self.getRecursive( oldName, -1 )
if not result:
raise KeyError( "%s does not exist" % "/".join( List.fromChar( oldName, "/" )[:-1] ) )
oldCfg = result[ 'value' ]
oldEnd = result[ 'levelsBelow' ]
if oldEnd in oldCfg.__dataDict:
result = self.getRecursive( newName, -1 )
if not result:
raise KeyError( "%s does not exist" % "/".join( List.fromChar( newName, "/" )[:-1] ) )
newCfg = result[ 'value' ]
newEnd = result[ 'levelsBelow' ]
newCfg.__dataDict[ newEnd ] = oldCfg.__dataDict[ oldEnd ]
newCfg.__commentDict[ newEnd ] = oldCfg.__commentDict[ oldEnd ]
refKeyPos = oldCfg.__orderedList.index( oldEnd )
newCfg.__orderedList.insert( refKeyPos + 1, newEnd )
return True
else:
return False
@gCFGSynchro
def listOptions( self, ordered = True ):
"""
List options
:type ordered: boolean
:param ordered: Return the options ordered. By default is False
:return: List with the option names
"""
if ordered:
return [ sKey for sKey in self.__orderedList if isinstance( self.__dataDict[ sKey ], basestring ) ]
else:
return [ sKey for sKey in self.__dataDict.keys() if isinstance( self.__dataDict[ sKey ], basestring ) ]
@gCFGSynchro
def listSections( self, ordered = True ):
"""
List subsections
:type ordered: boolean
:param ordered: Return the subsections ordered. By default is False
:return: List with the subsection names
"""
if ordered:
return [ sKey for sKey in self.__orderedList if not isinstance( self.__dataDict[ sKey ], basestring ) ]
else:
return [ sKey for sKey in self.__dataDict.keys() if not isinstance( self.__dataDict[ sKey ], basestring ) ]
@gCFGSynchro
def isSection( self, key ):
"""
Return if a section exists
:type key: string
:param key: Name to check
:return: Boolean with the results
"""
if key.find( "/" ) != -1:
keyDict = self.getRecursive( key, -1 )
if not keyDict:
return False
section = keyDict[ 'value' ]
if isinstance( section, basestring ):
return False
secKey = keyDict[ 'levelsBelow' ]
return section.isSection( secKey )
return key in self.__dataDict and not isinstance( self.__dataDict[ key ], basestring )
@gCFGSynchro
def isOption( self, key ):
"""
Return if an option exists
:type key: string
:param key: Name to check
:return: Boolean with the results
"""
if key.find( "/" ) != -1:
keyDict = self.getRecursive( key, -1 )
if not keyDict:
return False
section = keyDict[ 'value' ]
if isinstance( section, basestring ):
return False
secKey = keyDict[ 'levelsBelow' ]
return section.isOption( secKey )
return key in self.__dataDict and isinstance( self.__dataDict[ key ], basestring )
def listAll( self ):
"""
List all sections and options
:return: List with names of all options and subsections
"""
return self.__orderedList
def __recurse( self, pathList ):
"""
Explore recursively a path
:type pathList: list
:param pathList: List containing the path to explore
:return: Dictionary with the contents { key, value, comment }
"""
if pathList[0] in self.__dataDict:
if len( pathList ) == 1:
return { 'key' : pathList[0],
'value' : self.__dataDict[ pathList[0] ],
'comment' : self.__commentDict[ pathList[0] ] }
else:
return self.__dataDict[ pathList[0] ].__recurse( pathList[1:] )
else:
return False
@gCFGSynchro
def getRecursive( self, path, levelsAbove = 0 ):
"""
Get path contents
:type path: string
:param path: Path to explore recursively and get the contents
:type levelsAbove: integer
:param levelsAbove: Number of children levels in the path that won't be explored.
For instance, to explore all sections in a path except the last one use
levelsAbove = 1
:return: Dictionary containing:
key -> name of the entry
value -> content of the key
comment -> comment of the key
"""
pathList = [ dirName.strip() for dirName in path.split( "/" ) if not dirName.strip() == "" ]
levelsAbove = abs( levelsAbove )
if len( pathList ) - levelsAbove < 0:
return None
if len( pathList ) - levelsAbove == 0:
lBel = ""
if levelsAbove > 0:
lBel = "/".join( pathList[len( pathList ) - levelsAbove: ] )
return { 'key' : "", 'value' : self, 'comment' : "", 'levelsBelow' : lBel }
levelsBelow = ""
if levelsAbove > 0:
levelsBelow = "/".join( pathList[-levelsAbove:] )
pathList = pathList[:-levelsAbove]
retDict = self.__recurse( pathList )
if not retDict:
return None
retDict[ 'levelsBelow' ] = levelsBelow
return retDict
def getOption( self, opName, defaultValue = None ):
"""
Get option value with default applied
:type opName: string
:param opName: Path to the option to retrieve
:type defaultValue: optional (any python type)
:param defaultValue: Default value for the option if the option is not defined.
If the option is defined, the value will be returned casted to
the type of defaultValue if it is defined.
:return: Value of the option casted to defaultValue type, or defaultValue
"""
levels = List.fromChar( opName, "/" )
dataD = self.__dataDict
while len( levels ) > 0:
try:
dataV = dataD[ levels.pop( 0 ) ]
except KeyError:
return defaultValue
dataD = dataV
if not isinstance( dataV, basestring ):
optionValue = defaultValue
else:
optionValue = dataV
#Return value if existing, defaultValue if not
if optionValue == defaultValue:
if defaultValue == None or type( defaultValue ) == types.TypeType:
return defaultValue
return optionValue
#Value has been returned from the configuration
if defaultValue == None:
return optionValue
#Casting to defaultValue's type
defaultType = defaultValue
if not type( defaultValue ) == types.TypeType:
defaultType = type( defaultValue )
if defaultType == types.ListType:
try:
return List.fromChar( optionValue, ',' )
except Exception:
return defaultValue
elif defaultType == types.BooleanType:
try:
return optionValue.lower() in ( "y", "yes", "true", "1" )
except Exception:
return defaultValue
else:
try:
return defaultType( optionValue )
except Exception:
return defaultValue
def getAsCFG(self, path=""):
"""Return subsection as CFG object.
:param str path: Path to the section
:return: CFG object, of path is not found the CFG is empty
"""
if not path:
return self.clone()
splitPath = path.lstrip('/').split('/')
basePath = splitPath[0]
remainingPath = splitPath[1:]
if basePath not in self.__dataDict:
return CFG()
return self.__dataDict[basePath].getAsCFG("/".join(remainingPath))
def getAsDict( self, path = "" ):
"""
Get the contents below a given path as a dict
:type path: string
:param path: Path to retrieve as dict
:return: Dictionary containing the data
"""
resVal = {}
if path:
reqDict = self.getRecursive( path )
if not reqDict:
return resVal
keyCfg = reqDict[ 'value' ]
if isinstance( keyCfg, basestring ):
return resVal
return keyCfg.getAsDict()
for op in self.listOptions():
resVal[ op ] = self[ op ]
for sec in self.listSections():
resVal[ sec ] = self[ sec ].getAsDict()
return resVal
@gCFGSynchro
def appendToOption( self, optionName, value ):
"""
Append a value to an option prepending a comma
:type optionName: string
:param optionName: Name of the option to append the value
:type value: string
:param value: Value to append to the option
"""
result = self.getRecursive( optionName, -1 )
if not result:
raise KeyError( "%s does not exist" % "/".join( List.fromChar( optionName, "/" )[:-1] ) )
cfg = result[ 'value' ]
end = result[ 'levelsBelow' ]
if end not in cfg.__dataDict:
raise KeyError( "Option %s has not been declared" % end )
cfg.__dataDict[ end ] += str( value )
@gCFGSynchro
def addKey( self, key, value, comment, beforeKey = "" ):
"""
Add a new entry (option or section)
:type key: string
:param key: Name of the option/section to add
:type value: string/CFG
:param value: Contents of the new option/section
:type comment: string
:param comment: Comment for the option/section
:type beforeKey: string
:param beforeKey: Name of the option/section to add the entry above. By default
the new entry will be added at the end.
"""
result = self.getRecursive( key, -1 )
if not result:
raise KeyError( "%s does not exist" % "/".join( List.fromChar( key, "/" )[:-1] ) )
cfg = result[ 'value' ]
end = result[ 'levelsBelow' ]
if end in cfg.__dataDict:
raise KeyError( "%s already exists" % key )
cfg.__dataDict[ end ] = value
cfg.__commentDict[ end ] = comment
if beforeKey == "":
cfg.__orderedList.append( end )
else:
refKeyPos = cfg.__orderedList.index( beforeKey )
cfg.__orderedList.insert( refKeyPos, end )
@gCFGSynchro
def renameKey( self, oldName, newName ):
"""
Rename a option/section
:type oldName: string
:param oldName: Name of the option/section to change
:type newName: string
:param newName: New name of the option/section
:return: Boolean with the result of the rename
"""
if oldName == newName:
return True
result = self.getRecursive( oldName, -1 )
if not result:
raise KeyError( "%s does not exist" % "/".join( List.fromChar( oldName, "/" )[:-1] ) )
oldCfg = result[ 'value' ]
oldEnd = result[ 'levelsBelow' ]
if oldEnd in oldCfg.__dataDict:
result = self.getRecursive( newName, -1 )
if not result:
raise KeyError( "%s does not exist" % "/".join( List.fromChar( newName, "/" )[:-1] ) )
newCfg = result[ 'value' ]
newEnd = result[ 'levelsBelow' ]
newCfg.__dataDict[ newEnd ] = oldCfg.__dataDict[ oldEnd ]
newCfg.__commentDict[ newEnd ] = oldCfg.__commentDict[ oldEnd ]
refKeyPos = oldCfg.__orderedList.index( oldEnd )
oldCfg.__orderedList.remove( oldEnd )
newCfg.__orderedList.insert( refKeyPos, newEnd )
del oldCfg.__dataDict[ oldEnd ]
del oldCfg.__commentDict[ oldEnd ]
return True
else:
return False
def __getitem__( self, key ):
"""
Get the contents of a section/option
:type key: string
:param key: Name of the section/option to retrieve
:return: String/CFG with the contents
"""
if key.find( "/" ) > -1:
subDict = self.getRecursive( key )
if not subDict:
return False
return subDict[ 'value' ]
return self.__dataDict[ key ]
def __iter__( self ):
"""
Iterate though the contents in order
"""
for key in self.__orderedList:
yield key
def __contains__( self, key ):
"""
Check if a key is defined
"""
return self.getRecursive( key )
def __str__( self ):
"""
Get a print friendly representation of the CFG
:return: String with the contents of the CFG
"""
return self.serialize()
def __repr__( self ):
"""
Get a print friendly representation of the CFG
:return: String with the contents of the CFG
"""
return self.serialize()
def __nonzero__( self ):
"""
CFGs are not zeroes! ;)
"""
return True
def __eq__( self, cfg ):
"""
Check CFGs
"""
if not self.__orderedList == cfg.__orderedList:
return False
for key in self.__orderedList:
if not self.__commentDict[ key ].strip() == cfg.__commentDict[ key ].strip():
return False
if not self.__dataDict[ key ] == cfg.__dataDict[ key ]:
return False
return True
@gCFGSynchro
def getComment( self, entryName ):
"""
Get the comment for an option/section
:type entryName: string
:param entryName: Name of the option/section
:return: String with the comment
"""
try:
return self.__commentDict[ entryName ]
except:
raise ValueError( "%s does not have any comment defined" % entryName )
@gCFGSynchro
def setComment( self, entryName, comment ):
"""
Set the comment for an option/section
:type entryName: string
:param entryName: Name of the option/section
:type comment: string
:param comment: Comment for the option/section
"""
if entryName in self.__orderedList:
self.__commentDict[ entryName ] = comment
return True
return False
@gCFGSynchro
def serialize( self, tabLevelString = "" ):
"""
Generate a human readable serialization of a CFG
:type tabLevelString: string
:param tabLevelString: Tab string to apply to entries before representing them
:return: String with the contents of the CFG
"""
indentation = " "
cfgString = ""
for entryName in self.__orderedList:
if entryName in self.__commentDict:
for commentLine in List.fromChar( self.__commentDict[ entryName ], "\n" ):
cfgString += "%s#%s\n" % ( tabLevelString, commentLine )
if entryName in self.listSections():
cfgString += "%s%s\n%s{\n" % ( tabLevelString, entryName, tabLevelString )
cfgString += self.__dataDict[ entryName ].serialize( "%s%s" % ( tabLevelString, indentation ) )
cfgString += "%s}\n" % tabLevelString
elif entryName in self.listOptions():
valueList = List.fromChar( self.__dataDict[ entryName ] )
if len( valueList ) == 0:
cfgString += "%s%s = \n" % ( tabLevelString, entryName )
else:
cfgString += "%s%s = %s\n" % ( tabLevelString, entryName, valueList[0] )
for value in valueList[1:]:
cfgString += "%s%s += %s\n" % ( tabLevelString, entryName, value )
else:
raise ValueError( "Oops. There is an entry in the order which is not a section nor an option" )
return cfgString
@gCFGSynchro
def clone( self ):
"""
Create a copy of the CFG
:return: CFG copy
"""
clonedCFG = CFG()
clonedCFG.__orderedList = copy.deepcopy( self.__orderedList )
clonedCFG.__commentDict = copy.deepcopy( self.__commentDict )
for option in self.listOptions():
clonedCFG.__dataDict[ option ] = self[ option ]
for section in self.listSections():
clonedCFG.__dataDict[ section ] = self[ section ].clone()
return clonedCFG
@gCFGSynchro
def mergeWith( self, cfgToMergeWith ):
"""
Generate a CFG by merging with the contents of another CFG.
:type cfgToMergeWith: CFG
:param cfgToMergeWith: CFG with the contents to merge with. This contents are more
preemtive than this CFG ones
:return: CFG with the result of the merge
"""
mergedCFG = CFG()
for option in self.listOptions():
mergedCFG.setOption( option,
self[ option ],
self.getComment( option ) )
for option in cfgToMergeWith.listOptions():
mergedCFG.setOption( option,
cfgToMergeWith[ option ],
cfgToMergeWith.getComment( option ) )
for section in self.listSections():
if section in cfgToMergeWith.listSections():
oSectionCFG = self[ section ].mergeWith( cfgToMergeWith[ section ] )
mergedCFG.createNewSection( section,
cfgToMergeWith.getComment( section ),
oSectionCFG )
else:
mergedCFG.createNewSection( section,
self.getComment( section ),
self[ section ].clone() )
for section in cfgToMergeWith.listSections():
if section not in self.listSections():
mergedCFG.createNewSection( section,
cfgToMergeWith.getComment( section ),
cfgToMergeWith[ section ] )
return mergedCFG
def getModifications(self, newerCfg, ignoreMask=None, parentPath="",
ignoreOrder=False, ignoreComments=False):
"""
Compare two cfgs
:type newerCfg: ~DIRAC.Core.Utilities.CFG.CFG
:param newerCfg: Cfg to compare with
:param list ignoreMask: List of paths to ignore
:param str parentPath: Start from this path
:param ignoreOrder: Do not return changes only in ordering
:param ignoreComments: Do not return changes for changed commens
:return: A list of modifications
"""
modList = []
#Options
oldOptions = self.listOptions( True )
newOptions = newerCfg.listOptions( True )
for newOption in newOptions:
iPos = newerCfg.__orderedList.index( newOption )
newOptPath = "%s/%s" % ( parentPath, newOption )
if ignoreMask and newOptPath in ignoreMask:
continue
if newOption not in oldOptions:
modList.append( ( 'addOpt', newOption, iPos,
newerCfg[ newOption ],
newerCfg.getComment( newOption ) ) )
else:
modified = False
if iPos != self.__orderedList.index(newOption) and not ignoreOrder:
modified = True
elif newerCfg[ newOption ] != self[ newOption ]:
modified = True
elif newerCfg.getComment(newOption) != self.getComment(newOption) and not ignoreComments:
modified = True
if modified:
modList.append( ( 'modOpt', newOption, iPos,
newerCfg[ newOption ],
newerCfg.getComment( newOption ) ) )
for oldOption in oldOptions:
oldOptPath = "%s/%s" % ( parentPath, oldOption )
if ignoreMask and oldOptPath in ignoreMask:
continue
if oldOption not in newOptions:
modList.append( ( 'delOpt', oldOption, -1, '' ) )
#Sections
oldSections = self.listSections( True )
newSections = newerCfg.listSections( True )
for newSection in newSections:
iPos = newerCfg.__orderedList.index( newSection )
newSecPath = "%s/%s" % ( parentPath, newSection )
if ignoreMask and newSecPath in ignoreMask:
continue
if newSection not in oldSections:
modList.append( ( 'addSec', newSection, iPos,
str( newerCfg[ newSection ] ),
newerCfg.getComment( newSection ) ) )
else:
modified = False
if iPos != self.__orderedList.index( newSection ):
modified = True
elif newerCfg.getComment( newSection ) != self.getComment( newSection ):
modified = True
subMod = self[newSection].getModifications(newerCfg[newSection],
ignoreMask, newSecPath,
ignoreOrder, ignoreComments)
if subMod:
modified = True
if modified:
modList.append( ( 'modSec', newSection, iPos,
subMod,
newerCfg.getComment( newSection ) ) )
for oldSection in oldSections:
oldSecPath = "%s/%s" % ( parentPath, oldSection )
if ignoreMask and oldSecPath in ignoreMask:
continue
if oldSection not in newSections:
modList.append( ( 'delSec', oldSection, -1, '' ) )
return modList
def applyModifications( self, modList, parentSection = "" ):
"""
Apply modifications to a CFG
:type modList: List
:param modList: Modifications from a getModifications call
:return: True/False
"""
for modAction in modList:
action = modAction[0]
key = modAction[1]
iPos = modAction[2]
value = modAction[3]
if action == 'addSec':
if key in self.listSections():
return S_ERROR( "Section %s/%s already exists" % ( parentSection, key ) )
#key, value, comment, beforeKey = ""
value = CFG().loadFromBuffer( value )
comment = modAction[4].strip()
if iPos < len( self.__orderedList ):
beforeKey = self.__orderedList[ iPos ]
else:
beforeKey = ""
self.addKey( key, value, comment, beforeKey )
elif action == 'delSec':
if key not in self.listSections():
return S_ERROR( "Section %s/%s does not exist" % ( parentSection, key ) )
self.deleteKey( key )
elif action == 'modSec':
if key not in self.listSections():
return S_ERROR( "Section %s/%s does not exist" % ( parentSection, key ) )
comment = modAction[4].strip()
self.setComment( key, comment )
if value:
result = self[ key ].applyModifications( value, "%s/%s" % ( parentSection, key ) )
if not result[ 'OK' ]:
return result
if iPos >= len( self.__orderedList ) or key != self.__orderedList[ iPos ]:
prevPos = self.__orderedList.index( key )
del self.__orderedList[ prevPos ]
self.__orderedList.insert( iPos, key )
elif action == "addOpt":
if key in self.listOptions():
return S_ERROR( "Option %s/%s exists already" % ( parentSection, key ) )
#key, value, comment, beforeKey = ""
comment = modAction[4].strip()
if iPos < len( self.__orderedList ):
beforeKey = self.__orderedList[ iPos ]
else:
beforeKey = ""
self.addKey( key, value, comment, beforeKey )
elif action == "modOpt":
if key not in self.listOptions():
return S_ERROR( "Option %s/%s does not exist" % ( parentSection, key ) )
comment = modAction[4].strip()
self.setOption( key , value, comment )
if iPos >= len( self.__orderedList ) or key != self.__orderedList[ iPos ]:
prevPos = self.__orderedList.index( key )
del( self.__orderedList[ prevPos ] )
self.__orderedList.insert( iPos, key )
elif action == "delOpt":
if key not in self.listOptions():
return S_ERROR( "Option %s/%s does not exist" % ( parentSection, key ) )
self.deleteKey( key )
return S_OK()
#Functions to load a CFG
def loadFromFile( self, fileName ):
"""
Load the contents of the CFG from a file
:type fileName: string
:param fileName: File name to load the contents from
:return: This CFG
"""
if zipfile.is_zipfile( fileName ):
#Zipped file
zipHandler = zipfile.ZipFile( fileName )
nameList = zipHandler.namelist()
fileToRead = nameList[0]
fileData = zipHandler.read( fileToRead )
zipHandler.close()
else:
with open( fileName ) as fd:
fileData = fd.read()
return self.loadFromBuffer( fileData )
@gCFGSynchro
def loadFromBuffer( self, data ):
"""
Load the contents of the CFG from a string
:type data: string
:param data: Contents of the CFG
:return: This CFG
"""
commentRE = re.compile( r"^\s*#" )
self.reset()
levelList = []
currentLevel = self
currentlyParsedString = ""
currentComment = ""
for line in data.split( "\n" ):
line = line.strip()
if len( line ) < 1:
continue
if commentRE.match( line ):
currentComment += "%s\n" % line.replace( "#", "" )
continue
for index in range( len( line ) ):
if line[ index ] == "{":
currentlyParsedString = currentlyParsedString.strip()
currentLevel.createNewSection( currentlyParsedString, currentComment )
levelList.append( currentLevel )
currentLevel = currentLevel[ currentlyParsedString ]
currentlyParsedString = ""
currentComment = ""
elif line[ index ] == "}":
currentLevel = levelList.pop()
elif line[ index ] == "=":
lFields = line.split( "=" )
currentLevel.setOption( lFields[0].strip(), "=".join( lFields[1:] ).strip(), currentComment )
currentlyParsedString = ""
currentComment = ""
break
elif line[ index: index + 2 ] == "+=":
valueList = line.split( "+=" )
currentLevel.appendToOption( valueList[0].strip(), ", %s" % "+=".join( valueList[1:] ).strip() )
currentlyParsedString = ""
currentComment = ""
break
else:
currentlyParsedString += line[ index ]
return self
@gCFGSynchro
def loadFromDict( self, data ):
for k in data:
value = data[ k ]
if isinstance( value, dict ):
self.createNewSection( k , "", CFG().loadFromDict( value ) )
elif isinstance( value, (list, tuple) ):
self.setOption( k , ", ".join( value ), "" )
else:
self.setOption( k , str( value ), "" )
return self
def writeToFile( self, fileName ):
"""
Write the contents of the cfg to file
:type fileName: string
:param fileName: Name of the file to write the cfg to
:return: True/False
"""
try:
directory = os.path.dirname( fileName )
if directory and ( not os.path.exists( directory ) ):
os.makedirs( directory )
fd = open(fileName, "w")
fd.write( str( self ) )
fd.close()
return True
except Exception:
return False
|
petricm/DIRAC
|
Core/Utilities/CFG.py
|
Python
|
gpl-3.0
| 32,787
|
[
"DIRAC"
] |
588ad2294e83291d4fe37bc6e25c1a3544835d00cb70e78d7847316e2b750691
|
import numpy as NP
"""
A module which implements the continuous wavelet transform
Wavelet classes:
Morlet
MorletReal
MexicanHat
Paul2 : Paul order 2
Paul4 : Paul order 4
DOG1 : 1st Derivative Of Gaussian
DOG4 : 4th Derivative Of Gaussian
Haar : Unnormalised version of continuous Haar transform
HaarW : Normalised Haar
Usage e.g.
wavelet=Morlet(data, largestscale=2, notes=0, order=2, scaling="log")
data: Numeric array of data (float), with length ndata.
Optimum length is a power of 2 (for FFT)
Worst-case length is a prime
largestscale:
largest scale as inverse fraction of length
scale = len(data)/largestscale
smallest scale should be >= 2 for meaningful data
notes: number of scale intervals per octave
if notes == 0, scales are on a linear increment
order: order of wavelet for wavelets with variable order
[Paul, DOG, ..]
scaling: "linear" or "log" scaling of the wavelet scale.
Note that feature width in the scale direction
is constant on a log scale.
Attributes of instance:
wavelet.cwt: 2-d array of Wavelet coefficients, (nscales,ndata)
wavelet.nscale: Number of scale intervals
wavelet.scales: Array of scale values
Note that meaning of the scale will depend on the family
wavelet.fourierwl: Factor to multiply scale by to get scale
of equivalent FFT
Using this factor, different wavelet families will
have comparable scales
References:
A practical guide to wavelet analysis
C Torrance and GP Compo
Bull Amer Meteor Soc Vol 79 No 1 61-78 (1998)
naming below vaguely follows this.
updates:
(24/2/07): Fix Morlet so can get MorletReal by cutting out H
(10/04/08): Numeric -> numpy
(25/07/08): log and lin scale increment in same direction!
swap indices in 2-d coeffiecient matrix
explicit scaling of scale axis
"""
class Cwt:
"""
Base class for continuous wavelet transforms
Implements cwt via the Fourier transform
Used by subclass which provides the method wf(self,s_omega)
wf is the Fourier transform of the wavelet function.
Returns an instance.
"""
fourierwl=1.00
def _log2(self, x):
# utility function to return (integer) log2
return int( NP.log(float(x))/ NP.log(2.0)+0.0001 )
def __init__(self, data, largestscale=1, notes=0, order=2, scaling='linear'):
"""
Continuous wavelet transform of data
data: data in array to transform, length must be power of 2
notes: number of scale intervals per octave
largestscale: largest scale as inverse fraction of length
of data array
scale = len(data)/largestscale
smallest scale should be >= 2 for meaningful data
order: Order of wavelet basis function for some families
scaling: Linear or log
"""
ndata = len(data)
self.order=order
self.scale=largestscale
self._setscales(ndata,largestscale,notes,scaling)
self.cwt= NP.zeros((self.nscale,ndata), NP.complex64)
omega= NP.array(range(0,ndata/2)+range(-ndata/2,0))*(2.0*NP.pi/ndata)
datahat=NP.fft.fft(data)
self.fftdata=datahat
#self.psihat0=self.wf(omega*self.scales[3*self.nscale/4])
# loop over scales and compute wvelet coeffiecients at each scale
# using the fft to do the convolution
for scaleindex in range(self.nscale):
currentscale=self.scales[scaleindex]
self.currentscale=currentscale # for internal use
s_omega = omega*currentscale
psihat=self.wf(s_omega)
psihat = psihat * NP.sqrt(2.0*NP.pi*currentscale)
convhat = psihat * datahat
W = NP.fft.ifft(convhat)
self.cwt[scaleindex,0:ndata] = W
return
def _setscales(self,ndata, largestscale,notes,scaling):
"""
if notes non-zero, returns a log scale based on notes per ocave
else a linear scale
(25/07/08): fix notes!=0 case so smallest scale at [0]
"""
if scaling=="log":
if notes<=0: notes=1
# adjust nscale so smallest scale is 2
noctave=self._log2( ndata/largestscale/2 )
self.nscale=notes*noctave
self.scales=NP.zeros(self.nscale,float)
for j in range(self.nscale):
self.scales[j] = ndata/(self.scale*(2.0**(float(self.nscale-1-j)/notes)))
elif scaling=="linear":
nmax=ndata/largestscale/2
self.scales=NP.arange(float(2),float(nmax))
self.nscale=len(self.scales)
elif scaling=="direct":
# largestscale now contains scales
self.scales=largestscale
self.nscale=len(self.scales)
else: raise ValueError, "scaling must be linear or log"
return
def getdata(self):
"""
returns wavelet coefficient array
"""
return self.cwt
def getcoefficients(self):
return self.cwt
def getpower(self):
"""
returns square of wavelet coefficient array
"""
return (self.cwt* NP.conjugate(self.cwt)).real
def getscales(self):
"""
returns array containing scales used in transform
"""
return self.scales
def getnscale(self):
"""
return number of scales
"""
return self.nscale
# wavelet classes
class Morlet(Cwt):
"""
Morlet wavelet
"""
_omega0=5.0
fourierwl=4* NP.pi/(_omega0+ NP.sqrt(2.0+_omega0**2))
def wf(self, s_omega):
H= NP.ones(len(s_omega))
n=len(s_omega)
for i in range(len(s_omega)):
if s_omega[i] < 0.0: H[i]=0.0
# !!!! note : was s_omega/8 before 17/6/03
xhat=0.75112554*( NP.exp(-(s_omega-self._omega0)**2/2.0))*H
return xhat
class MorletReal(Cwt):
"""
Real Morlet wavelet
"""
_omega0=5.0
fourierwl=4* NP.pi/(_omega0+ NP.sqrt(2.0+_omega0**2))
def wf(self, s_omega):
H= NP.ones(len(s_omega))
n=len(s_omega)
for i in range(len(s_omega)):
if s_omega[i] < 0.0: H[i]=0.0
# !!!! note : was s_omega/8 before 17/6/03
xhat=0.75112554*( NP.exp(-(s_omega-self._omega0)**2/2.0)+ NP.exp(-(s_omega+self._omega0)**2/2.0)- NP.exp(-(self._omega0)**2/2.0)+ NP.exp(-(self._omega0)**2/2.0))
return xhat
class Paul4(Cwt):
"""
Paul m=4 wavelet
"""
fourierwl=4* NP.pi/(2.*4+1.)
def wf(self, s_omega):
n=len(s_omega)
xhat= NP.zeros(n)
xhat[0:n/2]=0.11268723*s_omega[0:n/2]**4* NP.exp(-s_omega[0:n/2])
#return 0.11268723*s_omega**2*exp(-s_omega)*H
return xhat
class Paul2(Cwt):
"""
Paul m=2 wavelet
"""
fourierwl=4* NP.pi/(2.*2+1.)
def wf(self, s_omega):
n=len(s_omega)
xhat= NP.zeros(n)
xhat[0:n/2]=1.1547005*s_omega[0:n/2]**2* NP.exp(-s_omega[0:n/2])
#return 0.11268723*s_omega**2*exp(-s_omega)*H
return xhat
class Paul(Cwt):
"""
Paul order m wavelet
"""
def wf(self, s_omega):
Cwt.fourierwl=4* NP.pi/(2.*self.order+1.)
m=self.order
n=len(s_omega)
normfactor=float(m)
for i in range(1,2*m):
normfactor=normfactor*i
normfactor=2.0**m/ NP.sqrt(normfactor)
xhat= NP.zeros(n)
xhat[0:n/2]=normfactor*s_omega[0:n/2]**m* NP.exp(-s_omega[0:n/2])
#return 0.11268723*s_omega**2*exp(-s_omega)*H
return xhat
class MexicanHat(Cwt):
"""
2nd Derivative Gaussian (mexican hat) wavelet
"""
fourierwl=2.0* NP.pi/ NP.sqrt(2.5)
def wf(self, s_omega):
# should this number be 1/sqrt(3/4) (no pi)?
#s_omega = s_omega/self.fourierwl
#print max(s_omega)
a=s_omega**2
b=s_omega**2/2
return a* NP.exp(-b)/1.1529702
#return s_omega**2*exp(-s_omega**2/2.0)/1.1529702
class DOG4(Cwt):
"""
4th Derivative Gaussian wavelet
see also T&C errata for - sign
but reconstruction seems to work best with +!
"""
fourierwl=2.0* NP.pi/ NP.sqrt(4.5)
def wf(self, s_omega):
return s_omega**4* NP.exp(-s_omega**2/2.0)/3.4105319
class DOG1(Cwt):
"""
1st Derivative Gaussian wavelet
but reconstruction seems to work best with +!
"""
fourierwl=2.0* NP.pi/ NP.sqrt(1.5)
def wf(self, s_omega):
dog1= NP.zeros(len(s_omega),complex64)
dog1.imag=s_omega* NP.exp(-s_omega**2/2.0)/sqrt(pi)
return dog1
class DOG(Cwt):
"""
Derivative Gaussian wavelet of order m
but reconstruction seems to work best with +!
"""
def wf(self, s_omega):
try:
from scipy.special import gamma
except ImportError:
print "Requires scipy gamma function"
raise ImportError
Cwt.fourierwl=2* NP.pi/ NP.sqrt(self.order+0.5)
m=self.order
dog=1.0J**m*s_omega**m* NP.exp(-s_omega**2/2)/ NP.sqrt(gamma(self.order+0.5))
return dog
class Haar(Cwt):
"""
Continuous version of Haar wavelet
"""
# note: not orthogonal!
# note: s_omega/4 matches Lecroix scale defn.
# s_omega/2 matches orthogonal Haar
# 2/8/05 constants adjusted to match artem eim
fourierwl=1.0#1.83129 #2.0
def wf(self, s_omega):
haar= NP.zeros(len(s_omega),complex64)
om = s_omega[:]/self.currentscale
om[0]=1.0 #prevent divide error
#haar.imag=4.0*sin(s_omega/2)**2/om
haar.imag=4.0* NP.sin(s_omega/4)**2/om
return haar
class HaarW(Cwt):
"""
Continuous version of Haar wavelet (norm)
"""
# note: not orthogonal!
# note: s_omega/4 matches Lecroix scale defn.
# s_omega/2 matches orthogonal Haar
# normalised to unit power
fourierwl=1.83129*1.2 #2.0
def wf(self, s_omega):
haar= NP.zeros(len(s_omega),complex64)
om = s_omega[:]#/self.currentscale
om[0]=1.0 #prevent divide error
#haar.imag=4.0*sin(s_omega/2)**2/om
haar.imag=4.0* NP.sin(s_omega/2)**2/om
return haar
if __name__=="__main__":
import numpy as np
import pylab as mpl
wavelet=Morlet
maxscale=4
notes=16
scaling="log" #or "linear"
#scaling="linear"
plotpower2d=True #False
# set up some data
Ns=1024
#limits of analysis
Nlo=0
Nhi=Ns
# sinusoids of two periods, 128 and 32.
x=np.arange(0.0,1.0*Ns,1.0)
A=np.sin(2.0*np.pi*x/128.0)
B=np.sin(2.0*np.pi*x/32.0)
A[512:768]+=B[0:256]
# Wavelet transform the data
cw=wavelet(A,maxscale,notes,scaling=scaling)
scales=cw.getscales()
cwt=cw.getdata()
# power spectrum
pwr=cw.getpower()
scalespec=np.sum(pwr,axis=1)/scales # calculate scale spectrum
# scales
y=cw.fourierwl*scales
x=np.arange(Nlo*1.0,Nhi*1.0,1.0)
fig=mpl.figure(1)
# 2-d coefficient plot
ax=mpl.axes([0.4,0.1,0.55,0.4])
mpl.xlabel('Time [s]')
plotcwt=np.clip(np.fabs(cwt.real), 0., 1000.)
if plotpower2d: plotcwt=pwr
im=mpl.imshow(plotcwt,cmap=mpl.cm.jet,extent=[x[0],x[-1],y[-1],y[0]],aspect='auto')
#colorbar()
if scaling=="log": ax.set_yscale('log')
mpl.ylim(y[0],y[-1])
ax.xaxis.set_ticks(np.arange(Nlo*1.0,(Nhi+1)*1.0,100.0))
ax.yaxis.set_ticklabels(["",""])
theposition=mpl.gca().get_position()
# data plot
ax2=mpl.axes([0.4,0.54,0.55,0.3])
mpl.ylabel('Data')
pos=ax.get_position()
mpl.plot(x,A,'b-')
mpl.xlim(Nlo*1.0,Nhi*1.0)
ax2.xaxis.set_ticklabels(["",""])
mpl.text(0.5,0.9,"Wavelet example with extra panes",
fontsize=14,bbox=dict(facecolor='green',alpha=0.2),
transform = fig.transFigure,horizontalalignment='center')
# projected power spectrum
ax3=mpl.axes([0.08,0.1,0.29,0.4])
mpl.xlabel('Power')
mpl.ylabel('Period [s]')
vara=1.0
if scaling=="log":
mpl.loglog(scalespec/vara+0.01,y,'b-')
else:
mpl.semilogx(scalespec/vara+0.01,y,'b-')
mpl.ylim(y[0],y[-1])
mpl.xlim(1000.0,0.01)
mpl.show()
|
MattNolanLab/ei-attractor
|
grid_cell_model/analysis/Wavelets.py
|
Python
|
gpl-3.0
| 12,300
|
[
"Gaussian"
] |
99e989057ac17077580bb9b351b928174eedbd8a50a5df05d41abf7ada660845
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Kyle A. Beauchamp
# Contributors: Robert McGibbon, Matthew Harrigan, Carlos Xavier Hernandez
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import os
import pickle
import tempfile
import mdtraj as md
import numpy as np
import pytest
from mdtraj.testing import eq
try:
from simtk.openmm import app
import simtk.unit as u
HAVE_OPENMM = True
except ImportError:
HAVE_OPENMM = False
needs_openmm = pytest.mark.skipif(not HAVE_OPENMM, reason='needs OpenMM')
@needs_openmm
def test_topology_openmm(get_fn):
topology = md.load(get_fn('1bpi.pdb')).topology
topology_with_bond_order = md.load(get_fn('imatinib.mol2')).topology
# the openmm trajectory doesn't have the distinction
# between resSeq and index, so if they're out of whack
# in the openmm version, that cant be preserved
for top in [topology, topology_with_bond_order]:
for residue in top.residues:
residue.resSeq = residue.index
mm = top.to_openmm()
assert isinstance(mm, app.Topology)
topology2 = md.Topology.from_openmm(mm)
eq(top, topology2)
@needs_openmm
def test_topology_openmm_boxes(get_fn):
traj = md.load(get_fn('1vii_sustiva_water.pdb'))
mmtop = traj.topology.to_openmm(traj=traj)
box = mmtop.getUnitCellDimensions() / u.nanometer
def test_topology_pandas(get_fn):
topology = md.load(get_fn('native.pdb')).topology
atoms, bonds = topology.to_dataframe()
topology2 = md.Topology.from_dataframe(atoms, bonds)
eq(topology, topology2)
# Make sure default argument of None works, see issue #774
topology3 = md.Topology.from_dataframe(atoms)
def test_topology_pandas_TIP4PEW(get_fn):
topology = md.load(get_fn('GG-tip4pew.pdb')).topology
atoms, bonds = topology.to_dataframe()
topology2 = md.Topology.from_dataframe(atoms, bonds)
eq(topology, topology2)
def test_topology_pandas_2residues_same_resSeq(get_fn):
topology = md.load(get_fn('two_residues_same_resnum.gro')).topology
atoms, bonds = topology.to_dataframe()
topology2 = md.Topology.from_dataframe(atoms, bonds)
eq(topology, topology2)
def test_topology_numbers(get_fn):
topology = md.load(get_fn('1bpi.pdb')).topology
assert len(list(topology.atoms)) == topology.n_atoms
assert len(list(topology.residues)) == topology.n_residues
assert all([topology.atom(i).index == i for i in range(topology.n_atoms)])
def test_topology_unique_elements_bpti(get_fn):
traj = md.load(get_fn('bpti.pdb'))
top, bonds = traj.top.to_dataframe()
atoms = np.unique(["C", "O", "N", "H", "S"])
eq(atoms, np.unique(top.element.values))
def test_chain(get_fn):
top = md.load(get_fn('bpti.pdb')).topology
chain = top.chain(0)
assert chain.n_residues == len(list(chain.residues))
atoms = list(chain.atoms)
assert chain.n_atoms == len(atoms)
for i in range(chain.n_atoms):
assert atoms[i] == chain.atom(i)
def test_residue(get_fn):
top = md.load(get_fn('bpti.pdb')).topology
residue = top.residue(0)
assert len(list(residue.atoms)) == residue.n_atoms
atoms = list(residue.atoms)
for i in range(residue.n_atoms):
assert residue.atom(i) == atoms[i]
def test_segment_id(get_fn):
top = md.load(get_fn('ala_ala_ala.pdb')).topology
assert next(top.residues).segment_id == "AAL", "Segment id is not being assigned correctly for ala_ala_ala.psf"
df = top.to_dataframe()[0]
assert len(df["segmentID"] == "AAL") == len(
df), "Segment id is not being assigned correctly to topology data frame ala_ala_ala.psf"
def test_nonconsective_resSeq(get_fn):
t = md.load(get_fn('nonconsecutive_resSeq.pdb'))
assert eq(np.array([r.resSeq for r in t.top.residues]), np.array([1, 3, 5]))
df1 = t.top.to_dataframe()
df2 = md.Topology.from_dataframe(*df1).to_dataframe()
assert eq(df1[0], df2[0])
# round-trip through a PDB load/save loop
fd, fname = tempfile.mkstemp(suffix='.pdb')
os.close(fd)
t.save(fname)
t2 = md.load(fname)
assert eq(df1[0], t2.top.to_dataframe()[0])
os.unlink(fname)
def test_pickle(get_fn):
# test pickling of topology (bug #391)
topology_without_bond_order = md.load(get_fn('bpti.pdb')).topology
topology_with_bond_order = md.load(get_fn('imatinib.mol2')).topology
for top in [topology_with_bond_order, topology_without_bond_order]:
loaded_top = pickle.loads(pickle.dumps(top))
assert loaded_top == top
def test_atoms_by_name(get_fn):
top = md.load(get_fn('bpti.pdb')).topology
atoms = list(top.atoms)
for atom1, atom2 in zip(top.atoms_by_name('CA'), top.chain(0).atoms_by_name('CA')):
assert atom1 == atom2
assert atom1 in atoms
assert atom1.name == 'CA'
assert len(list(top.atoms_by_name('CA'))) == sum(1 for _ in atoms if _.name == 'CA')
assert top.residue(15).atom('CA') == [a for a in top.residue(15).atoms if a.name == 'CA'][0]
with pytest.raises(KeyError):
top.residue(15).atom('sdfsdf')
def test_select_atom_indices(get_fn):
top = md.load(get_fn('native.pdb')).topology
assert eq(top.select_atom_indices('alpha'), np.array([8]))
assert eq(top.select_atom_indices('minimal'),
np.array([4, 5, 6, 8, 10, 14, 15, 16, 18]))
with pytest.raises(ValueError):
top.select_atom_indices('sdfsdf')
@needs_openmm
def test_top_dataframe_openmm_roundtrip(get_fn):
t = md.load(get_fn('2EQQ.pdb'))
top, bonds = t.top.to_dataframe()
t.topology = md.Topology.from_dataframe(top, bonds)
omm_top = t.top.to_openmm()
def test_n_bonds(get_fn):
t = md.load(get_fn('2EQQ.pdb'))
for atom in t.top.atoms:
if atom.element.symbol == 'H':
assert atom.n_bonds == 1
elif atom.element.symbol == 'C':
assert atom.n_bonds in [3, 4]
elif atom.element.symbol == 'O':
assert atom.n_bonds in [1, 2]
def test_load_unknown_topology(get_fn):
try:
md.load(get_fn('frame0.dcd'), top=get_fn('frame0.dcd'))
except IOError as e:
# we want to make sure there's a nice error message than includes
# a list of the supported topology formats.
assert all(s in str(e) for s in ('.pdb', '.psf', '.prmtop'))
else:
assert False # fail
def test_unique_pairs():
n = 10
a = np.arange(n)
b = np.arange(n, n + n)
eq(md.Topology._unique_pairs(a, a).sort(), md.Topology._unique_pairs_equal(a).sort())
eq(md.Topology._unique_pairs(a, b).sort(), md.Topology._unique_pairs_mutually_exclusive(a, b).sort())
def test_select_pairs(get_fn):
traj = md.load(get_fn('tip3p_300K_1ATM.pdb'))
select_pairs = traj.top.select_pairs
assert len(select_pairs(selection1='name O', selection2='name O')) == 258 * (258 - 1) // 2
assert len(select_pairs(selection1='name H1', selection2='name O')) == 258 * 258
selections = iter([
# Equal
("(name O) or (name =~ 'H.*')", "(name O) or (name =~ 'H.*')"),
('all', 'all'),
# Exclusive
('name O', 'name H1'),
('name H1', 'name O'),
# Overlap
(range(traj.n_atoms), 'name O'),
('all', 'name O')])
for select1, select2 in selections:
select3, select4 = next(selections)
assert eq(select_pairs(selection1=select1, selection2=select2).sort(),
select_pairs(selection1=select3, selection2=select4).sort())
def test_to_fasta(get_fn):
t = md.load(get_fn('2EQQ.pdb'))
assert t.topology.to_fasta(0) == "ENFSGGCVAGYMRTPDGRCKPTFYQLIT"
def test_subset(get_fn):
t1 = md.load(get_fn('2EQQ.pdb')).top
t2 = t1.subset([1, 2, 3])
assert t2.n_residues == 1
def test_subset_re_index_residues(get_fn):
t1 = md.load(get_fn('2EQQ.pdb')).top
t2 = t1.subset(t1.select('resid 0 2'))
np.testing.assert_array_equal([0, 1], [rr.index for rr in t2.residues])
def test_molecules(get_fn):
top = md.load(get_fn('4OH9.pdb')).topology
molecules = top.find_molecules()
assert sum(len(mol) for mol in molecules) == top.n_atoms
assert sum(1 for mol in molecules if len(mol) > 1) == 2 # All but two molecules are water
def test_copy_and_hash(get_fn):
t = md.load(get_fn('traj.h5'))
t1 = t.topology
t2 = t.topology.copy()
assert t1 == t2
assert hash(tuple(t1._chains)) == hash(tuple(t2._chains))
assert hash(tuple(t1._atoms)) == hash(tuple(t2._atoms))
assert hash(tuple(t1._bonds)) == hash(tuple(t2._bonds))
assert hash(tuple(t1._residues)) == hash(tuple(t2._residues))
assert hash(t1) == hash(t2)
def test_topology_sliced_residue_indices(get_fn):
# https://github.com/mdtraj/mdtraj/issues/1585
full = md.load(get_fn('1bpi.pdb'))
residues = full.top.select("resid 1 to 10")
sliced = full.atom_slice(residues)
idx = [res.index for res in sliced.top.residues][-1]
assert idx == sliced.top.n_residues-1
# Now see if this works
_ = sliced.topology.residue(idx)
def test_topology_join(get_fn):
top_1 = md.load(get_fn('2EQQ.pdb')).topology
top_2 = md.load(get_fn('4OH9.pdb')).topology
out_topology = top_1.join(top_2)
eq(out_topology.n_atoms, top_1.n_atoms + top_2.n_atoms)
eq(out_topology.n_residues, top_1.n_residues + top_2.n_residues)
eq(top_1.atom(0).residue.name, out_topology.atom(0).residue.name)
eq(top_2.atom(-1).residue.name, out_topology.atom(-1).residue.name)
eq(top_1.atom(0).element, out_topology.atom(0).element)
eq(top_2.atom(-1).element, out_topology.atom(-1).element)
def test_topology_join_keep_resSeq(get_fn):
top_1 = md.load(get_fn('2EQQ.pdb')).topology
top_2 = md.load(get_fn('4OH9.pdb')).topology
out_topology_keepId_True = top_1.join(top_2, keep_resSeq=True)
out_topology_keepId_False = top_1.join(top_2, keep_resSeq=False)
out_resSeq_keepId_True = [residue.resSeq for residue in out_topology_keepId_True.residues]
out_resSeq_keepId_False = [residue.resSeq for residue in out_topology_keepId_False.residues]
expected_resSeq_keepId_True = (
[residue.resSeq for residue in top_1.residues
] + [
residue.resSeq for residue in top_2.residues])
expected_resSeq_keepId_False = list(range(1, len(expected_resSeq_keepId_True) + 1))
eq(out_resSeq_keepId_True, expected_resSeq_keepId_True)
eq(out_resSeq_keepId_False, expected_resSeq_keepId_False)
|
dwhswenson/mdtraj
|
tests/test_topology.py
|
Python
|
lgpl-2.1
| 11,353
|
[
"MDTraj",
"OpenMM"
] |
a9a732cfe5cbd52a20afb272506c94c3c981b4dace83cd07d6e3c1237468e3dc
|
#!/usr/bin/python
# This script predicts the grid of probabilities
from sklearn.preprocessing import RobustScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.kernel_approximation import RBFSampler
import numpy as np
import json
import math
from sklearn.externals import joblib
from geojson import Feature, Polygon, FeatureCollection, dumps
import os
import sys
from scipy.odr.odrpack import Output
from matplotlib.backends.backend_ps import ps_backend_helper
config_path = "/home/angelica/Git/osiris/srp/utilities/"
sys.path.append(os.path.abspath(config_path))
from geo import get_position_in_grid
from geo import get_polygon
from config import get_grid
from config import get_training_set
from MyAPI import MyAPI
from utilities import print_result
from numpy.distutils.misc_util import cxx_ext_match
import json
import argparse
import time
from datetime import datetime
def platt_func(x):
return 1/(1+np.exp(-x))
def parse_recordid(args,discretize):
record_id = args.record_id
# TODO correggere psi: 0,1,2 in modo da avere la predizione a 30, 45 e 60
tp = get_training_set()
psl = tp['prediction_steps']
api = MyAPI()
X = []
y = {}
for psi in range(0,len(psl)):
ps = str(psl[psi])
# X is always the same, y is not
X_temp,y_temp = api.get_dataset(psi,record_id=record_id,nr=1,discretize=discretize)
if len(X_temp) > 0:
X = X_temp
if len(y_temp) > 0:
y[ps] = y_temp.tolist()
return X,y
def parse_features(args,discretize):
gp = get_grid()
clat = float(args.latitude)
clng = float(args.longitude)
[x,y] = get_position_in_grid(clng, clat, float(gp['cx']),float(gp['cy']))
cspeed = float(args.speed)
ccourse = float(args.course)
ccourse_sin = math.sin(float(args.course))
ccourse_cos = math.cos(float(args.course))
bc = int(args.basic_class)
#cstatus_orig = [[int(y),int(x),ccourse_sin,ccourse_cos,cspeed, bc]]
cstatus_orig = [[clat,clng,ccourse_sin,ccourse_cos,cspeed, bc]]
if discretize:
dspeed = api.get_discretized_speed(cspeed)
dcourse = api.get_discretized_course(ccourse)
cstatus_orig = [[int(y),int(x),dspeed,dcourse, bc]]
return cstatus_orig,None
# receive the current position, the speed, the course and time as input
parser = argparse.ArgumentParser(description='Ship Route Preditction')
subparsers = parser.add_subparsers()
recordid_p = subparsers.add_parser('record_id')
recordid_p.add_argument('-r', '--record_id', help='define record_id',required=True)
recordid_p.set_defaults(func=parse_recordid)
features_p = subparsers.add_parser('features')
features_p.add_argument('-l', '--latitude', help='define current latitude',type=float,required=True)
features_p.add_argument('-n', '--longitude', help='define current longitude',type=float,required=True)
features_p.add_argument('-s', '--speed',help='define current speed',required=True)
features_p.add_argument('-c', '--course',help='define current course',required=True)
features_p.add_argument('-b', '--basic_class',help='define basic class (0 = small ship, 1 = medium ship, 2 = big ship)',required=True)
features_p.set_defaults(func=parse_features)
parser.add_argument('-a', '--algorithm',help='select algorithm (default knn (knn, one-vs-one, one-vs-rest,gaussian-nb,bernoulli-nb,decision-tree,svm,linear-svm,mlp,radius-neighbor,sgd,kernel-approx)',required=False)
parser.add_argument('-i', '--sdi',help='ship identifier',required=False)
parser.add_argument('-f', '--no_feature_collection',action='store_true',help='set output without feature collection',required=False)
parser.add_argument('-d', '--discretize',action='store_true',help='set feature discretization',required=False)
parser.add_argument('-v', '--verbose',action='store_true',help='set verbosity',required=False)
parser.add_argument('-o', '--output',help='specify output file name',required=False)
args = parser.parse_args()
startTime = datetime.now()
algorithm = "knn"
if args.algorithm is not None:
algorithm = args.algorithm
verbose = False
if args.verbose:
verbose = True;
sdi = None
if args.sdi is not None:
sdi = args.sdi
no_feature_collection = False
if args.no_feature_collection:
no_feature_collection = True
discretize = False
if args.discretize:
discretize = True
# current position
cstatus_orig,y = args.func(args,discretize)
api = MyAPI()
# prediction step
# TODO manage prediction step
#ps = args.prediction_steps
#print cstatus_orig
prop = {}
polygons = {}
tp = get_training_set()
gp = get_grid()
psl = tp['prediction_steps']
features = []
for ps in psl:
ps = str(ps)
#prop['probability_' + ps] = []
#prop['class_' + ps] = []
# restore classifier set from file
classifier = joblib.load('data/' + algorithm + '-' + ps + '.pkl')
# restore robust scaler from file
robust_scaler = joblib.load('data/rs-' + algorithm + '-' + ps + '.pkl')
# restore classes from file
classes = joblib.load('data/classes-' + algorithm + '-' + ps + '.pkl')
cstatus = robust_scaler.transform(cstatus_orig)
if algorithm == 'kernel-approx':
rbf_feature = RBFSampler(gamma=1, random_state=1)
cstatus = rbf_feature.fit_transform(cstatus)
prob = None
if algorithm == 'one-vs-rest' or algorithm == 'linear-svm':
f = np.vectorize(platt_func)
raw_predictions = classifier.decision_function(cstatus)
platt_predictions = f(raw_predictions)
prob = platt_predictions / platt_predictions.sum(axis=1)
#prob = prob.tolist()
else:
prob = classifier.predict_proba(cstatus).tolist()
for i in range(0,len(classes)):
if algorithm == 'one-vs-rest' or algorithm == 'linear-svm':
nz_prob = float("{0:.4f}".format(prob[0][i]))
else:
nz_prob = float("{0:.2f}".format(prob[0][i]))
if nz_prob > 0:
coord = classes[i].split("_")
#print coord
polygons[classes[i]] = get_polygon(int(coord[1]),int(coord[0]),float(gp['cx']),float(gp['cy']))
try:
prop[classes[i]]['probability_' + ps] = nz_prob
prop[classes[i]]['row'] = int(coord[0])
prop[classes[i]]['column'] = int(coord[1])
except KeyError:
prop[classes[i]] = {}
prop[classes[i]]['probability_' + ps] = nz_prob
prop[classes[i]]['row'] = int(coord[0])
prop[classes[i]]['column'] = int(coord[1])
if sdi is not None:
prop[classes[i]]['sdi'] = sdi
prop[classes[i]]['type'] = "probability"
i=0
for key in prop:
pol = Polygon(polygons[key])
if no_feature_collection is True:
result = dumps({'type': 'Feature', 'geometry' : pol, "properties" : prop[key]})
print_result(args.output,result)
if i < len(prop)-1:
print_result(args.output,",")
else:
features.append(Feature(geometry=pol,properties=prop[key]))
i = i + 1
if y is not None and no_feature_collection is False:
prop = {}
polygon = {}
for ps in psl:
ps = str(ps)
if ps in y:
coord = y[ps][0].split("_")
label = y[ps][0]
polygon[label] = get_polygon(int(coord[1]),int(coord[0]),float(gp['cx']),float(gp['cy']))
try:
prop[label]['row'] = int(coord[0])
prop[label]['column'] = int(coord[1])
prop[label]['type'] = "effective"
prop[label]['delta'].append(ps)
except KeyError:
prop[label] = {}
prop[label]['row'] = int(coord[0])
prop[label]['column'] = int(coord[1])
prop[label]['type'] = "effective"
prop[label]['delta'] = [ps]
for key in prop:
pol = Polygon(polygon[key])
myprop = prop[key]
features.append(Feature(geometry=pol,properties=myprop))
if no_feature_collection is False:
result = FeatureCollection(features)
result = dumps(result)
print_result(args.output,result)
if verbose:
seconds = datetime.now() - startTime
print "Number of seconds to execute the script: " + str(seconds)
|
alod83/osiris
|
srp/predict.py
|
Python
|
mit
| 8,001
|
[
"Gaussian"
] |
496710b7c4f74865f99c4335224ef9792b0c9927c6d14de7bb3e53c4dc5a4691
|
import lb_loader
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems
from collections import OrderedDict
EXPERIMENTS = OrderedDict()
def load_lj(cutoff=None, dispersion_correction=False, switch_width=None, shift=False, charge=None, ewaldErrorTolerance=None):
reduced_density = 0.90
testsystem = testsystems.LennardJonesFluid(nparticles=2048, reduced_density=reduced_density, dispersion_correction=dispersion_correction,
cutoff=cutoff, switch_width=switch_width, shift=shift, lattice=True, charge=charge, ewaldErrorTolerance=ewaldErrorTolerance)
system, positions = testsystem.system, testsystem.positions
parameters = dict(
timestep=2 * u.femtoseconds,
langevin_timestep=0.5 * u.femtoseconds,
)
return testsystem, system, positions, parameters
class Experiment(object):
def __init__(self, integrator, sysname, prms):
self.integrator = integrator
self.system = sysname
self.prms = prms
itype = type(integrator).__name__
prms["itype"] = itype
int_string = lb_loader.format_int_name(prms)
key = (sysname, int_string)
EXPERIMENTS[key] = self
def enumerate_experiments():
experiments = OrderedDict()
############################################################################
sysname = "switchedljbox"
system, positions, groups, temperature, timestep, langevin_timestep, testsystem, equil_steps, steps_per_hmc = lb_loader.load(sysname)
############################################################################
for timestep in [2.5 * u.femtoseconds, 5.0 * u.femtoseconds]:
collision_rate = 1.0 / u.picoseconds
integrator = mm.LangevinIntegrator(temperature, collision_rate, timestep)
prms = dict(sysname=sysname, timestep=timestep / u.femtoseconds, collision=lb_loader.fixunits(collision_rate))
expt = Experiment(integrator=integrator, sysname=sysname, prms=prms)
collision_rate = None
for timestep in [20.0 * u.femtoseconds]:
integrator = hmc_integrators.GHMCIntegrator(temperature=temperature, steps_per_hmc=steps_per_hmc, timestep=timestep, collision_rate=collision_rate)
prms = dict(sysname=sysname, timestep=timestep / u.femtoseconds, collision=lb_loader.fixunits(collision_rate))
expt = Experiment(integrator=integrator, sysname=sysname, prms=prms)
timestep = 35.0 * u.femtoseconds
extra_chances = 2
collision_rate = 1.0 / u.picoseconds
integrator = hmc_integrators.XCGHMCIntegrator(temperature=temperature, steps_per_hmc=steps_per_hmc, timestep=timestep, extra_chances=extra_chances, collision_rate=collision_rate)
itype = type(integrator).__name__
prms = dict(sysname=sysname, itype=itype, timestep=timestep / u.femtoseconds, collision=lb_loader.fixunits(collision_rate))
expt = Experiment(integrator=integrator, sysname=sysname, prms=prms)
return
collision_rate = None
for timestep in []: # [2.0 * u.femtoseconds]:
integrator = hmc_integrators.XCGHMCIntegrator(temperature=temperature, steps_per_hmc=steps_per_hmc, timestep=timestep, extra_chances=extra_chances, collision_rate=collision_rate)
itype = type(integrator).__name__
prms = dict(sysname=sysname, itype=itype, timestep=timestep / u.femtoseconds, collision=lb_loader.fixunits(collision_rate))
int_string = lb_loader.format_int_name(prms)
key = (sysname, int_string)
experiments[key] = integrator
############################################################################
sysname = "switchedaccurateflexiblewater"
system, positions, groups, temperature, timestep, langevin_timestep, testsystem, equil_steps, steps_per_hmc = lb_loader.load(sysname)
############################################################################
for timestep in [0.10 * u.femtoseconds, 0.15 * u.femtoseconds, 0.5 * u.femtoseconds]:
collision_rate = 1.0 / u.picoseconds
integrator = mm.LangevinIntegrator(temperature, collision_rate, timestep)
itype = type(integrator).__name__
prms = dict(sysname=sysname, itype=itype, timestep=timestep / u.femtoseconds, collision=lb_loader.fixunits(collision_rate))
int_string = lb_loader.format_int_name(prms)
key = (sysname, int_string)
experiments[key] = integrator
xcghmc_parms = dict(timestep=0.668 * u.femtoseconds, steps_per_hmc=10, extra_chances=1, collision_rate=None)
integrator = hmc_integrators.XCGHMCIntegrator(temperature=temperature, **xcghmc_parms)
itype = type(integrator).__name__
prms = dict(sysname=sysname, itype=itype, timestep=integrator.timestep / u.femtoseconds, collision=lb_loader.fixunits(None))
int_string = lb_loader.format_int_name(prms)
key = (sysname, int_string)
experiments[key] = integrator
# hyperopt determined optimal settings obtain ~113 effective ns / day
xcghmc_parms = dict(timestep=1.1868 * u.femtoseconds, steps_per_hmc=23, collision_rate=None, groups=((0, 1), (1, 4)))
integrator = hmc_integrators.GHMCRESPAIntegrator(temperature=temperature, **xcghmc_parms)
itype = type(integrator).__name__
prms = dict(sysname=sysname, itype=itype, timestep=integrator.timestep / u.femtoseconds, collision=lb_loader.fixunits(None))
int_string = lb_loader.format_int_name(prms)
key = (sysname, int_string)
experiments[key] = integrator
# Obtained by taking hyperopt optimal GHMCRespa parameters and adding 2 extra chances
xcghmc_parms = dict(timestep=1.1868 * u.femtoseconds, steps_per_hmc=23, collision_rate=None, extra_chances=2, groups=((0, 1), (1, 4)))
integrator = hmc_integrators.XCGHMCRESPAIntegrator(temperature=temperature, **xcghmc_parms)
itype = type(integrator).__name__
prms = dict(sysname=sysname, itype=itype, timestep=integrator.timestep / u.femtoseconds, collision=lb_loader.fixunits(None))
int_string = lb_loader.format_int_name(prms)
key = (sysname, int_string)
experiments[key] = integrator
# hyperopt determined optimal settings obtain ~79.8 effective ns/day
xcghmc_parms = dict(timestep=0.6791 * u.femtoseconds, steps_per_hmc=20, collision_rate=None)
integrator = hmc_integrators.GHMCIntegrator(temperature=temperature, **xcghmc_parms)
itype = type(integrator).__name__
prms = dict(sysname=sysname, itype=itype, timestep=integrator.timestep / u.femtoseconds, collision=lb_loader.fixunits(None))
int_string = lb_loader.format_int_name(prms)
key = (sysname, int_string)
experiments[key] = integrator
xcghmc_parms = dict(timestep=0.6791 * u.femtoseconds, steps_per_hmc=20, collision_rate=None)
xcghmc_parms.update(dict())
integrator = hmc_integrators.XCGHMCIntegrator(temperature=temperature, **xcghmc_parms)
itype = type(integrator).__name__
prms = dict(sysname=sysname, itype=itype, timestep=timestep / u.femtoseconds, collision=lb_loader.fixunits(collision_rate))
int_string = lb_loader.format_int_name(prms)
key = (sysname, int_string)
experiments[key] = integrator
############################################################################
sysname = "switchedaccuratebigflexiblewater"
system, positions, groups, temperature, timestep, langevin_timestep, testsystem, equil_steps, steps_per_hmc = lb_loader.load(sysname)
############################################################################
experiments = OrderedDict()
# hyperopt determined optimal settings obtain ~113 effective ns / day
xcghmc_parms = dict(timestep=0.256927 * u.femtoseconds, steps_per_hmc=24, collision_rate=None, groups=((0, 4), (1, 1)))
integrator = hmc_integrators.GHMCRESPAIntegrator(temperature=temperature, **xcghmc_parms)
itype = type(integrator).__name__
prms = dict(sysname=sysname, itype=itype, timestep=integrator.timestep / u.femtoseconds, collision=lb_loader.fixunits(None))
int_string = lb_loader.format_int_name(prms)
key = (sysname, int_string)
experiments[key] = integrator
|
kyleabeauchamp/HMCNotes
|
code/experiments.py
|
Python
|
gpl-2.0
| 8,143
|
[
"OpenMM"
] |
e753b4dbe89ed6f43597b4dbe3a052ace247f839a3c597534ce2e7ceb832c441
|
#!/usr/bin/python
"""
This peak-caller script is part of the CLAM pipeline.
It takes input from re-aligner output, and use permutation to call peaks.
Tested under python 2.7.3
"""
__author__ = 'Zijun Zhang'
__version__ = '1.0.0'
__email__ = 'zj.z@ucla.edu'
from optparse import OptionParser
import os, subprocess, sys
from collections import defaultdict
from statsmodels.sandbox.stats.multicomp import multipletests
from time import strftime
import cPickle as pickle
import bisect, random
import pysam
import pybedtools
from multiprocessing import Pool
def main():
"""
The main wrapper for CLAM peak-caller.
"""
# options parsing
usage='usage: %prog <options>'
parser=OptionParser(usage)
parser.add_option('--resume', dest='resume', action='store_true', default=False, help='Resume mode - skipping pre-processing [Default: %default]')
parser.add_option('--verbose', dest='verbose', action='store_true', default=False, help='Verbose mode - print out all intermediate steps [Default: %default]')
parser.add_option('-o', dest='output_dir', default='./out_CLAM', help='Output file folder [Default %default]')
parser.add_option('-t', dest='tmp_dir', default='./tmp_CLAM', help='Temporary file folder [Default %default]')
parser.add_option('-p', dest='peak_file', default=None, help='Output peak calling filename; if None then do not call peaks [Default %default]')
parser.add_option('--is-stranded', dest='is_stranded', default=False, action='store_true', help='Indicates if the reads are mapped with strand information. [Default: %default]')
parser.add_option('--extend', dest='extend', type='int', default=50, help='Extend to given nucleotides symmetrically at peak calling [Default: %default]')
parser.add_option('--pval-cutoff', dest='pval_cutoff', type='float', default=0.001, help='Corrected p-value threshold at peak calling [Default: %default]')
parser.add_option('--merge-size', dest='merge_size', type='int', default=50, help='merging window size at peak calling [Default: %default]')
parser.add_option('--max-iter', dest='max_iter', type='int', default=1000, help='maximum iterations for permutation tests [Default: %default]')
parser.add_option('-g', dest='gtf', default='./GTF/hg19_ensembl.sorted_gene.bed', help='GTF file [Default: %default]')
parser.add_option('--ThreadN', dest='nb_proc', type='int', default=4, help='Number of threads when doing permutations. [Default: %default]')
parser.add_option('--seed', dest='seed', type='int', default=100, help='Random seed for permutations. [Default: %default]')
parser.add_option('--merge-method', dest='merge_method', type='int', default=1, help='Peak merging method. 1: Narrow peak 2: Broad peak [Default: %default]')
parser.add_option('--pval-method', dest='correction_method', type='int', default=1, help='Multiple testing correction method. 1: Bonferroni 2: BH FDR [Default: %default]')
parser.add_option('--call-transcriptome', dest='call_all', action='store_true', default=False, help='Call peaks on transcriptome instead of genes with multi-mappers. [Default: %default]')
(options,args)=parser.parse_args()
output_dir=os.path.abspath(options.output_dir)
tmp_dir=os.path.abspath(options.tmp_dir)
verbose=options.verbose
#random.seed(options.seed)
write_parameter_log(options, output_dir)
# find transcripts associated with multi-mapped reads
if verbose:
print_time_stamp('Finding transcripts with multimapped reads.')
if not os.path.isfile(output_dir + '/CLAM_mapper.sorted.out'):
subprocess.call(''' sort -k1,1 -k2,2n %s/CLAM_mapper.out | awk '{print $1"\t"$2"\t"$3"\t"$4"\t"$5"\t"$6}' > %s/CLAM_mapper.sorted.out ''' % (output_dir, output_dir), shell=True)
# Note: tid_list: tid -> [chr:strand, start, end]
tid_list=read_aligner_output(output_dir + '/CLAM_mapper.sorted.out', options.gtf, options.is_stranded, tmp_dir, options.resume, options.call_all)
# make bam file for re-aligner output, if non-exist
if not (options.resume and os.path.isfile(output_dir + '/assigned_multimapped_reads.bam')):
if verbose:
print_time_stamp('Making bamfile for aligner output.')
header_cmd='samtools view -H ' + tmp_dir + '/filter100.sorted.bam > ' + output_dir + '/sam_header.sam'
subprocess.call(header_cmd, shell=True)
body_cmd = ''' awk '{if($6=="+"){print $4"\t256\t"$1"\t"$2+1"\t0\t"$3-$2+1"M\t*\t0\t0\t*\t*\tAS:f:"$5}else{print $4"\t272\t"$1"\t"$2+1"\t0\t"$3-$2+1"M\t*\t0\t0\t*\t*\tAS:f:"$5 }}' ''' + output_dir + '/CLAM_mapper.sorted.out > ' + output_dir + '/CLAM_mapper.sorted.sam'
subprocess.call(body_cmd, shell=True)
makeBam_cmd = 'cat %s/sam_header.sam %s/CLAM_mapper.sorted.sam | samtools view -bS - > %s/assigned_multimapped_reads.bam' % (output_dir, output_dir,output_dir)
subprocess.call(makeBam_cmd, shell=True)
index_cmd = 'samtools index %s/assigned_multimapped_reads.bam' % output_dir
subprocess.call(index_cmd, shell=True)
# multi-processing peak-caller
if not (options.resume and os.path.isfile(tmp_dir+'/unique_to_qval.pdata') and os.path.isfile(tmp_dir+'/combined_to_qval.pdata')):
child_transcr_ind = list(chunkify(range(len(tid_list)), options.nb_proc))
pool = Pool(processes=options.nb_proc)
unibam_file=tmp_dir+'/filter100.sorted.bam'
multibam_file=output_dir+'/assigned_multimapped_reads.bam'
tid_to_qval_compact = pool.map(get_permutation_fdr, [ (unibam_file, multibam_file, tid_list, child_transcr_ind[i], options.pval_cutoff, options.max_iter, options.is_stranded, verbose, options.correction_method, options.seed) for i in range(options.nb_proc) ])
pool.terminate()
pool.join()
unique_tid_to_qval, combined_tid_to_qval = unpack_tid_to_qval(tid_to_qval_compact)
pickle.dump(unique_tid_to_qval, open(tmp_dir+'/unique_to_qval.pdata','wb'), -1)
pickle.dump(combined_tid_to_qval, open(tmp_dir+'/combined_to_qval.pdata','wb'), -1)
else:
print_time_stamp('Resume mode, found qval data files.')
unique_tid_to_qval=pickle.load(open(tmp_dir+'/unique_to_qval.pdata','rb'))
combined_tid_to_qval=pickle.load(open(tmp_dir+'/combined_to_qval.pdata','rb'))
# merge peaks
if options.merge_method==1:
merge_peaks=merge_peaks_singleNucl
mm='singleNucl'
elif options.merge_method==2:
merge_peaks=merge_peaks_broadPeak
mm='broadPeak'
else:
merge_peaks=merge_peaks_singleNucl
mm='unknown selection, using default singleNucl'
if verbose:
print_time_stamp('Merging peaks within ' + str(options.merge_size) + 'bp, using ' + mm + '..')
unique_peaks=merge_peaks(unique_tid_to_qval, options.merge_size, options.pval_cutoff)
combined_peaks=merge_peaks(combined_tid_to_qval, options.merge_size, options.pval_cutoff)
print_time_stamp('Comparing results and writing to file..')
# write peak-calling results to file.
with open(output_dir + '/all_peaks.txt', 'w') as f:
for peak in unique_peaks: # peak = ['chr\tstart\tend\tstrand', 'height\tqval\t', tid]
if options.extend is None:
wt_loc=peak[0]
else:
wt_loc=extend_peak_region(peak[0], options.extend)
f.write(wt_loc + '\t' + '\t'.join([str(x) for x in peak[1]]) + '\t' + peak[2] + '\tunique\n')
for peak in combined_peaks:
if options.extend is None:
wt_loc=peak[0]
else:
wt_loc=extend_peak_region(peak[0], options.extend)
f.write(wt_loc + '\t' + '\t'.join([str(x) for x in peak[1]]) + '\t' + peak[2] + '\tcombined\n')
subprocess.call(''' sort -k1,1 -k2,2n %s/all_peaks.txt | awk '{print $1"\t"$2"\t"$3"\t"$5";"$6";"$7"\t"$8"\t"$4}' | bedtools merge -s -d -1 -i stdin -c 4,5,6, -o collapse,collapse,distinct > %s''' % (output_dir, options.peak_file), shell=True)
print_time_stamp('Peak-calling done.')
def write_parameter_log(options, output_dir):
"""
Write paramter values to a log file, named by current time.
"""
merge_method_dict={1:'narrowPeak', 2:'broadPeak'}
correction_method_dict={1:'Bonferroni', 2:'BH_FDR'}
with open(output_dir+'/CLAM_Peaker.Parameters.'+ strftime("%Y%m%d_%H%M") + '.txt', 'w') as log:
log.write('CLAM Peaker ' + __version__ + '\n')
log.write('resume: ' + str(options.resume) + '\n')
log.write('verbose: ' + str(options.verbose) + '\n')
log.write('output_dir:' + str(options.output_dir) + '\n')
log.write('tmp_dir: ' + str(options.tmp_dir) + '\n')
log.write('peak_file: ' + str(options.peak_file) + '\n')
log.write('is_stranded: ' + str(options.is_stranded) + '\n')
log.write('extend: ' + str(options.extend) + '\n')
log.write('pval_cutoff: ' + str(options.pval_cutoff) + '\n')
log.write('merge_size: ' + str(options.merge_size) + '\n')
log.write('max_iter: ' + str(options.max_iter) + '\n')
log.write('gtf: ' + str(options.gtf) + '\n')
log.write('seed: ' + str(options.seed) + '\n')
log.write('merge_method: ' + merge_method_dict[options.merge_method] + '\n')
log.write('correction_method: ' + correction_method_dict[options.correction_method] + '\n')
log.write('thread: ' + str(options.nb_proc) + '\n')
def chunkify(a, n):
"""
Separate a list (a) into consecutive n chunks.
Returns the chunkified index
"""
k, m = len(a) / n, len(a) % n
return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in xrange(n))
def unpack_tid_to_qval(compact):
"""
Unpacks the returned values from multi-processing.
"""
unique_tid_to_qval=defaultdict(list)
combined_tid_to_qval=defaultdict(list)
for item in compact:
unique, combined = item[0], item[1]
for tid in combined:
if len(unique[tid])>0:
unique_tid_to_qval[tid]=unique[tid]
if len(combined[tid])>1:
combined_tid_to_qval[tid]=combined[tid]
return unique_tid_to_qval,combined_tid_to_qval
def get_permutation_fdr((unibam_file, multibam_file, tid_list, tid_ind, pval_cutoff, max_iter, is_stranded, verbose, correction_method, seed)):
"""
General permutation wrapper for a list of genes. Gets called by multi-processing generated by Pool()
Returns packed FDRs from each child process.
"""
random.seed(seed)
unique_tid_to_qval=defaultdict(list)
combined_tid_to_qval=defaultdict(list)
unibam=pysam.Samfile(unibam_file, 'rb')
multibam=pysam.Samfile(multibam_file, 'rb')
processed=0
pid=os.getpid()
for ind in tid_ind:
processed+=1
if verbose and not processed % 100:
print_time_stamp(str(processed) + '/' + str(len(tid_ind)) + ' finished in pid ' + str(pid))
tid, chr, strand, start, end = tid_list[ind]
unique_reads = read_tid_frag_from_bam(tid_list[ind], unibam, is_stranded, True)
multi_reads = read_tid_frag_from_bam(tid_list[ind], multibam, is_stranded, False)
this_unique_to_qval = do_permutation(tid_list[ind], unique_reads, max_iter, pval_cutoff, correction_method)
this_combined_to_qval = do_permutation(tid_list[ind], unique_reads+multi_reads, max_iter, pval_cutoff, correction_method)
unique_tid_to_qval[tid].extend(this_unique_to_qval)
combined_tid_to_qval[tid].extend(this_combined_to_qval)
unibam.close()
multibam.close()
return unique_tid_to_qval, combined_tid_to_qval
def do_permutation(transcr, read_transcript, max_iter, pval_cutoff, correction_method):
"""
Permutes the reads along a given gene length, sub-routine that get called by get_permutation_fdr(..).
Returns the locally corrected p-values for each observed height on the given gene.
"""
tid, chr, strand, tstart, tend = transcr
tid_length=tend-tstart+1
obs_heights_count=count_pileup_heights(tid_length, read_transcript)
tid_to_qval=[]
rand_heights_dist=defaultdict(int)
rand_sum=0
# need to account for the 'observed' data, since permutation tests should never report p-value as 0. 3/22/16
for i in obs_heights_count:
if i==0:
continue
else:
rand_heights_dist[int(i)]+=1
rand_sum+=1
for B in range(max_iter):
new_heights_count=permutate_heights(tid_length, read_transcript)
for i in new_heights_count:
if i==0:
continue
else:
rand_heights_dist[i]+=1
rand_sum+=1
height_to_pval={}
for h in set(obs_heights_count):
if h < 1:
continue
else:
lefter=0
for j in range(int(h), max(rand_heights_dist)+1):
lefter+=rand_heights_dist[j]
height_to_pval[h]=lefter/float(rand_sum)
pval_list=[]
for i in obs_heights_count:
if i<1:
continue
pval_list.append(height_to_pval[i])
if len(pval_list)<=1:
return []
if correction_method==2:
qval_list=multipletests(pval_list, method='fdr_bh')[1]
else:
qval_list=[min(x*(len(set([int(y) for y in height_to_pval if y!=0]))), 1.0) for x in pval_list]
ind=0
last_height=0
for j in range(len(obs_heights_count)):
this_height=obs_heights_count[j]
if this_height<1:
last_height=0
continue
if qval_list[ind] <= pval_cutoff:
if this_height==last_height:
chr, last_start, last_end, last_strand, last_height, last_qval=tid_to_qval[-1]
tid_to_qval[-1]=[chr, last_start, tstart+j+1, strand, last_height, last_qval]
else:
tid_to_qval.append([chr, tstart+j, tstart+j+1, strand, obs_heights_count[j], qval_list[ind]]) # chr, start, end, strand, height, this_qval
last_height=this_height
ind+=1
return tid_to_qval
def heights_to_dist(rand_heights):
"""
sub-routine
"""
rand_heights_dist=defaultdict(int)
rand_sum=0
for new_heights_count in rand_heights:
for i in new_heights_count:
if i==0:
continue
else:
rand_heights_dist[i]+=1
rand_sum+=1
return rand_heights_dist, rand_sum
def permutate_heights(tlen, reads):
"""
Sub-routine for do_permutation(...)
Randomly allocate the read locations.
"""
loc_heights=[0] * tlen
for id, pos, read_len, score in reads:
if score<1 and random.random() > score:
continue
rand_pos=random.randint(1, max(1, tlen-read_len))
for i in range(rand_pos, min(rand_pos + read_len, tlen)):
loc_heights[i]+=1
return loc_heights
def count_pileup_heights(tlen, reads):
"""
Sub-routine for do_permutation(...)
Counts the distribution of pile-up heights for a given gene/permutation
"""
loc_heights=[0] * tlen
for id, pos, read_len, score in reads:
for i in range(pos, min(pos+read_len-1, tlen)):
loc_heights[i]+=score
return loc_heights
def merge_peaks_broadPeak(transcript_to_qval, merge_size, pval_cutoff):
"""
Merge called peaks on a gene using option 2,
i.e. if two peaks close to each other, region
between two peaks are also called as peaks
Retuns a list of merged peaks.
"""
peaks=[]
last_qval=[0,1]
for tid in transcript_to_qval:
init=True
for chr, start, end, strand, height, this_qval in transcript_to_qval[tid]:
loc=[chr, str(start), str(end), strand]
this_qval=[height, this_qval] # this_qval=[height, qval] so that when qval=0, we can compare height
if this_qval[1] > pval_cutoff:
continue
if init:
last_qval=this_qval
last_pos=[start, end]
last_loc=loc
last_chr=chr
write_out=False
init=False
continue
if int(start) - int(last_pos[1]) > merge_size:
write_out=True
else:
last_pos=[last_pos[0], end]
last_qval=this_qval if last_qval[0]<this_qval[0] else last_qval
last_loc[2]=str(end)
write_out=False
if write_out and last_qval[1] < pval_cutoff:
peaks.append(['\t'.join(last_loc), last_qval, tid])
last_qval=this_qval
last_pos=[start, end]
last_loc=loc
last_chr=[chr, str(start), str(end), strand]
write_out=False
if last_qval[1] < pval_cutoff:
peaks.append(['\t'.join(last_loc), last_qval, tid])
return peaks
def merge_peaks_singleNucl(transcript_to_qval, merge_size, pval_cutoff):
"""
Merge called peaks on a gene using option 1
(default), i.e. if two peaks close to each other,
only pick the most significant one peak
Retuns a list of merged peaks.
"""
peaks=[]
last_qval=[0,1]
for tid in transcript_to_qval:
init=True
for chr, start, end, strand, height, this_qval in transcript_to_qval[tid]:
loc='\t'.join([chr, str(start), str(end), strand])
this_qval=[height, this_qval] # this_qval=[height, qval] so that when qval=0, we can compare height
if this_qval[1] > pval_cutoff:
continue
if init:
last_qval=this_qval
last_pos=[start, end]
last_loc=loc
last_chr=chr
write_out=False
init=False
continue
if last_chr == chr:
if abs( int(start) - int(last_pos[0]) ) > merge_size:
write_out=True
elif last_qval[0] < this_qval[0]:
last_pos=[start, end]
last_qval=this_qval
last_loc=loc
write_out=False
else:
write_out=True
if write_out and last_qval[1] < pval_cutoff:
#peaks[last_loc]=last_qval
peaks.append([last_loc, last_qval, tid])
last_qval=this_qval
last_pos=[start, end]
last_loc=loc
last_chr=chr
write_out=False
if last_qval[1] < pval_cutoff:
peaks.append([last_loc, last_qval, tid])
return peaks
def extend_peak_region(loc, target_len):
"""
Extends peak symmetrically if peak is smaller than target_len.
"""
chr, start, end, strand = loc.split('\t')
start = int(start)
end = int(end)
old_len = end - start
if old_len > target_len:
return loc
else:
center = int((start + end)/2)
start = center - int(target_len /2)
end = center + int(target_len/2)
return '\t'.join([chr, str(start), str(end), strand])
def read_aligner_output(rm_out, gtffile, is_stranded, tmp_dir, resume, call_all):
"""
Use bedtools to get transcripts/genes with multi-mapped reads.
Returns a list of transcripts/genes.
"""
if not (resume and os.path.isfile(tmp_dir + '/gtf2multireads.bed')):
rm_bed=pybedtools.BedTool(rm_out)
gtf=pybedtools.BedTool(gtffile)
gtf_bed_rm = gtf.intersect(rm_bed, s=True, u=True) if is_stranded else gtf.intersect(rm_bed, u=True)
gtf_bed_rm.saveas(tmp_dir + '/gtf2multireads.bed')
pybedtools.cleanup()
tid_list=[]
if call_all:
gtf_to_read=gtffile
else:
gtf_to_read=tmp_dir+'/gtf2multireads.bed'
with open(gtf_to_read,'r') as f:
for line in f:
ele=line.rstrip().split('\t')
gene_id=ele[3]
gene_chr, gene_start, gene_end=ele[0], int(ele[1]), int(ele[2])
gene_strand=ele[5]
tid_list.append([gene_id, gene_chr, gene_strand, gene_start, gene_end])
print_time_stamp('Read transcripts with multi-reads: ' + str(len(tid_list)))
return tid_list
def read_tid_frag_from_bam(tid, bamfile, is_stranded, is_unique):
"""
Use pysam to fetch reads info for a given gene and its loci.
Returns reads, read weights and its mapped loci.
"""
tid_reads=[]
gene, chr, strand, start, end=tid
if strand=='-':
is_reverse=True
else:
is_reverse=False
reads=[x for x in bamfile.fetch(chr, int(start), int(end)) if x.is_reverse==is_reverse or not is_stranded]
reads=[x for x in reads if x.pos>=int(start) and x.pos<=int(end)]
for read in reads:
if is_unique:
try:
opt_NH=read.opt('NH')
if opt_NH > 1:
continue
except:
pass
score=1
else:
try:
opt_AS=read.opt('AS')
if isinstance(opt_AS, float):
score=opt_AS
else:
continue
except:
continue
read_length = read.qlen if read.qlen > 0 else read.positions[-1] - read.positions[0] + 1
if read.pos-start>=0 and read_length<500: # to avoid junction reads
tid_reads.append([read.qname, read.pos-start, read_length, score])
return tid_reads
def print_time_stamp(msg):
"""
Reporter function for logging.
"""
current_time='[' + strftime("%Y-%m-%d %H:%M:%S") + '] '
print >> sys.stderr, current_time + msg
if __name__=='__main__':
main()
|
Xinglab/CLAM
|
deprecated/CLAM.fdr_peak.MP.py
|
Python
|
gpl-3.0
| 19,701
|
[
"pysam"
] |
1835b1bd8fecf11b2b0d43e8990a613d6f9a01dc1c33264a08f1378b3abeb9d4
|
# coding: utf8
import webapp2
from requestmodel import *
from google.appengine.api import mail as gae_mail
from google.appengine.api import users
from google.appengine.api.taskqueue import taskqueue
import gig
import member
import assoc
import logging
import re
import pickle
import os
import stats
import cryptoutil
import sendgrid
import os
from sendgrid.helpers.mail import *
from email_sg_db import get_sendgrid_api
from webapp2_extras import i18n
from webapp2_extras.i18n import gettext as _
from google.appengine.ext.webapp.mail_handlers import BounceNotification, BounceNotificationHandler, InboundMailHandler
# need this for sending stuff to the superuser - can't use the decorated version
_bare_admin_email_address = 'superuser@gig-o-matic.com'
admin_name = 'Gig-o-Matic'
# The MailServiceStub class used by dev_appserver can't handle a sender address that's more
# than a raw email address, but production GAE doesn't have this limitation.
if os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine/'):
_admin_email_address = 'Gig-o-matic <gigomatic.superuser@gmail.com>'
else:
_admin_email_address = 'gigomatic.superuser@gmail.com'
def validate_email(to):
# + and . are allowed in username, and . in the domain name, but neither can be
# the leading character. Alphanumerics, - and _ are allowed anywhere.
valid_address = r"^[_a-z0-9-]+((\.|\+)[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,4})$"
if (not gae_mail.is_email_valid(to)) or (re.match(valid_address, to.lower()) is None):
logging.error("invalid recipient address '{0}'".format(to))
return False
else:
return True
def _send_admin_mail(to, subject, body, html=None, reply_to=None):
if validate_email(to) is False:
return False
sg = sendgrid.SendGridAPIClient(api_key=get_sendgrid_api())
from_email = Email(_bare_admin_email_address, admin_name)
to_email = To(to)
the_subject = subject
plain_text_content=PlainTextContent(body.encode('utf-8'))
if html is not None:
html_content = HtmlContent(html)
else:
html_content = None
mail = Mail(from_email, to_email, subject, plain_text_content=plain_text_content, html_content=html_content)
if reply_to:
mail.reply_to = Email(reply_to)
try:
response = sg.client.mail.send.post(request_body=mail.get())
except Exception as e:
logging.error("Failed to send mail {0} to {1}.\n{2}".format(subject, to, e))
return False
if response.status_code == 202:
return True
else:
logging.error("Failed to send mail {0} to {1}.\n{2}".format(subject, to, e))
return False
def send_registration_email(the_email, the_url):
return _send_admin_mail(the_email, _('Welcome to Gig-o-Matic'), _('welcome_msg_email').format(the_url))
def send_band_accepted_email(the_email, the_band, the_message=None):
if the_message:
the_text = "\n\n--\n\n" + the_message
elif the_band.new_member_message:
the_text = "\n\n--\n\n" + the_band.new_member_message
else:
the_text = ""
whole_message = "{0}{1}".format(
_('member_confirmed_email').format(the_band.name, the_band.key.urlsafe()),
the_text
)
return _send_admin_mail(the_email, _('Gig-o-Matic: Confirmed!'), whole_message)
def send_forgot_email(the_email, the_url):
return _send_admin_mail(the_email, _('Gig-o-Matic Password Reset'), _('forgot_password_email').format(the_url))
# send an email announcing a new gig
def send_newgig_email(the_member, the_gig, the_band, the_gig_url, is_edit=False, is_reminder=False, change_string=""):
the_locale=the_member.preferences.locale
the_email_address = the_member.email_address
if not gae_mail.is_email_valid(the_email_address):
return False
i18n.get_i18n().set_locale(the_locale)
contact_key=the_gig.contact
if contact_key:
contact = contact_key.get()
contact_name=contact.name
else:
contact = None
contact_name="??"
# get the special URLs for "yes" and "no" answers
the_yes_url, the_no_url, the_snooze_url = gig.get_confirm_urls(the_member, the_gig)
reply_to = None
if contact is not None:
reply_to = contact.email_address
if is_edit:
title_string='{0} ({1})'.format(_('Gig Edit').encode('utf-8'),change_string)
elif is_reminder:
title_string='Gig Reminder:'
else:
title_string=_('New Gig:')
the_date_string = "{0} ({1})".format(member.format_date_for_member(the_member, the_gig.date),
member.format_date_for_member(the_member, the_gig.date, "day"))
if the_gig.enddate:
the_date_string = "{0} - {1} ({2})".format( the_date_string,
member.format_date_for_member(the_member, the_gig.enddate),
member.format_date_for_member(the_member, the_gig.enddate, "day"))
the_time_string = ""
if the_gig.calltime:
the_time_string = u'{0} ({1})'.format(the_gig.calltime, _('Call Time'))
if the_gig.settime:
if the_time_string:
the_time_string = u'{0}, '.format(the_time_string)
the_time_string = u'{0}{1} ({2})'.format(the_time_string,the_gig.settime, _('Set Time'))
if the_gig.endtime:
if the_time_string:
the_time_string = u'{0}, '.format(the_time_string)
the_time_string = u'{0}{1} ({2})'.format(the_time_string,the_gig.endtime, _('End Time'))
the_status_string = [_('Unconfirmed'), _('Confirmed!'), _('Cancelled!')][the_gig.status]
def format_details(details, setlist, newline='\n'):
if setlist:
the_details_string = u"{0}{1}{2}:{3}{4}".format(newline.join(details.splitlines()) if details else '',
u'{0}{0}'.format(newline) if details else '',
_('Setlist'),
newline,
newline.join(setlist.splitlines()))
else:
the_details_string = newline.join(details.splitlines())
return the_details_string
def format_body(body_format_str, newline='\n'):
return body_format_str.format(the_band.name, the_gig.title, the_date_string, the_time_string, contact_name,
the_status_string, format_details(the_gig.details, the_gig.setlist, newline), the_gig_url, "",
the_yes_url, the_no_url, the_snooze_url)
if is_edit:
body = _('edited_gig_email').format(the_band.name, the_gig.title, the_date_string, the_time_string, contact_name,
the_status_string, format_details(the_gig.details, the_gig.setlist), the_gig_url, change_string)
html = None
elif is_reminder:
body = format_body(_('reminder_gig_email'))
html = format_body(_('reminder_gig_email_html'), newline='<br>')
else:
body = format_body(_('new_gig_email'))
html = format_body(_('new_gig_email_html'), newline='<br>')
try:
ret= _send_admin_mail(the_email_address, u'{0} {1}'.format(title_string, the_gig.title), body, html=html, reply_to=reply_to)
except UnicodeDecodeError:
logging.error("unicode error title_string with gig {0} email {1}".format(the_gig.key, the_email_address))
return ret
def announce_new_gig(the_gig, the_gig_url, is_edit=False, is_reminder=False, change_string="", the_members=[]):
the_params = pickle.dumps({'the_gig_key': the_gig.key,
'the_gig_url': the_gig_url,
'is_edit': is_edit,
'is_reminder': is_reminder,
'change_string': change_string,
'the_members': the_members})
_safe_taskqueue_add(
url='/announce_new_gig_handler',
params={'the_params': the_params
})
class AnnounceNewGigHandler(webapp2.RequestHandler):
def post(self):
_check_taskqueue_trust(self.request)
the_params = pickle.loads(self.request.get('the_params'))
the_gig_key = the_params['the_gig_key']
the_gig_url = the_params['the_gig_url']
is_edit = the_params['is_edit']
is_reminder = the_params['is_reminder']
change_string = the_params['change_string']
the_members = the_params['the_members']
the_gig = the_gig_key.get()
the_band_key = the_gig_key.parent()
the_assocs = assoc.get_confirmed_assocs_of_band_key(the_band_key, include_occasional=the_gig.invite_occasionals)
if is_reminder and the_members:
recipient_assocs=[]
for a in the_assocs:
if a.member in the_members:
recipient_assocs.append(a)
else:
recipient_assocs = the_assocs
logging.info('announcing gig {0} to {1} people'.format(the_gig_key,len(recipient_assocs)))
the_shared_params = pickle.dumps({
'the_gig_key': the_gig_key,
'the_band_key': the_band_key,
'the_gig_url': the_gig_url,
'is_edit': is_edit,
'is_reminder': is_reminder,
'change_string': change_string
})
for an_assoc in recipient_assocs:
if an_assoc.email_me:
the_member_key = an_assoc.member
the_member_params = pickle.dumps({
'the_member_key': the_member_key
})
_safe_taskqueue_add(
url='/send_new_gig_handler',
params={'the_shared_params': the_shared_params,
'the_member_params': the_member_params
})
logging.info('announced gig {0}'.format(the_gig_key))
stats.update_band_email_stats(the_band_key, len(recipient_assocs))
self.response.write( 200 )
class SendNewGigHandler(webapp2.RequestHandler):
def post(self):
_check_taskqueue_trust(self.request)
the_shared_params = pickle.loads(self.request.get('the_shared_params'))
the_member_params = pickle.loads(self.request.get('the_member_params'))
the_member_key = the_member_params['the_member_key']
the_gig_key = the_shared_params['the_gig_key']
the_band_key = the_shared_params['the_band_key']
the_gig_url = the_shared_params['the_gig_url']
is_edit = the_shared_params['is_edit']
is_reminder = the_shared_params['is_reminder']
change_string = the_shared_params['change_string']
send_newgig_email(the_member_key.get(), the_gig_key.get(), the_band_key.get(), the_gig_url, is_edit, is_reminder, change_string)
self.response.write( 200 )
def send_new_member_email(band,new_member):
members=assoc.get_admin_members_from_band_key(band.key)
for the_member in members:
send_the_new_member_email(the_member.preferences.locale, the_member.email_address, new_member=new_member, the_band=band)
def send_the_new_member_email(the_locale, the_email_address, new_member, the_band):
i18n.get_i18n().set_locale(the_locale)
return _send_admin_mail(the_email_address,
_('Gig-o-Matic New Member for band {0}').format(the_band.name),
_('new_member_email').format('{0} ({1})'.format(new_member.name, new_member.email_address),
the_band.name, the_band.key.urlsafe()))
def send_new_band_via_invite_email(the_band, the_member, the_message=None):
if the_message:
the_text = "\n\n--\n\n" + the_message
elif the_band and the_band.new_member_message:
the_text = "\n\n--\n\n" + the_band.new_member_message
else:
the_text = ""
whole_message = "{0}{1}".format(
_('new_band_via_invite_email').format(the_band.name if the_band else "new band"),
the_text,
)
return _send_admin_mail(the_member.email_address, _('Gig-o-Matic New Band Invite'), whole_message)
def send_gigo_invite_email(the_band, the_member, the_url):
return _send_admin_mail(the_member.email_address, _('Invitation to Join Gig-o-Matic'),
_('gigo_invite_email').format(the_band.name, the_url))
def send_the_pending_email(the_email_address, the_confirm_link):
return _send_admin_mail(the_email_address, _('Gig-o-Matic Confirm Email Address'),
_('confirm_email_address_email').format(the_confirm_link))
def notify_superuser_of_archive(the_num):
return _send_admin_mail(_bare_admin_email_address, 'Gig-o-Matic Auto-Archiver'
"Yo! The Gig-o-Matic archived {0} gigs last night.".format(the_num))
def notify_superuser_of_old_tokens(the_num):
return _send_admin_mail(_bare_admin_email_address, 'Gig-o-Matic Old Tokens',
"Yo! The Gig-o-Matic found {0} old signup tokens last night.".format(the_num))
def send_band_request_email(the_email_address, the_name, the_info):
if not gae_mail.is_email_valid(the_email_address):
return False
body = u"""
Hi there! Someone has requested to add their band to the Gig-o-Matic. SO EXCITING!
{0}
{1}
{2}
Enjoy,
Team Gig-o-Matic
""".format(the_email_address, the_name, the_info)
return _send_admin_mail(_bare_admin_email_address, 'Gig-o-Matic New Band Request', body)
class LogBounceHandler(BounceNotificationHandler):
def receive(self, bounce_message):
# logging.info('Received bounce post ... [%s]', self.request)
# logging.info('Bounce original: %s', bounce_message.original)
logging.info('Bounce notification: %s', bounce_message.notification)
class IncomingEmailHandler(InboundMailHandler):
def post(self, address):
self.receive(mail.InboundEmailMessage(self.request.body))
def receive(self, mail_message):
logging.info('Incoming email to {0} from {1}'.format(mail_message.to, mail_message.sender))
class AdminPage(BaseHandler):
""" Page for member administration """
@user_required
@superuser_required
def get(self):
if member.member_is_superuser(self.user):
self._make_page(the_user=self.user)
else:
return self.redirect('/')
def _make_page(self,the_user):
template_args = {}
self.render_template('email_admin.html', template_args)
class SendTestEmail(BaseHandler):
@user_required
@superuser_required
def post(self):
address = self.request.get('address', None)
if address:
_safe_taskqueue_add(
url='/send_test_email_handler',
params={'the_address':address}
)
self.response.write( 200 )
class SendTestEmailHandler(webapp2.RequestHandler):
def post(self):
_check_taskqueue_trust(self.request)
the_address = self.request.get('the_address', None)
if the_address:
_send_admin_mail(the_address, "testing email",
"This is a test email from gig-o-matic. Please let superuser@gig-o-matic.com know if you recieved this! Thanks.",
html=None, reply_to=None)
else:
logging.error('bad request to send email from {0}'.format(self.request.remote_addr))
self.response.write( 200 )
class MemberTestEmail(BaseHandler):
@user_required
def post(self):
member_key_urlsafe = self.request.get('mk', None)
if member_key_urlsafe:
the_member=member.member_key_from_urlsafe(member_key_urlsafe).get()
else:
raise ValueError('illegal member key to MemberTestEmail')
_safe_taskqueue_add(
url='/member_test_email_handler',
params={'the_address':the_member.email_address}
)
self.response.write( 200 )
class MemberTestEmailHandler(webapp2.RequestHandler):
def post(self):
_check_taskqueue_trust(self.request)
the_address = self.request.get('the_address', None)
if the_address:
_send_admin_mail(the_address, "testing email",
"This is a test email from gig-o-matic. Looks like everything worked!",
html=None, reply_to=None)
else:
logging.error('bad request to send member test email from {0}'.format(self.request.remote_addr))
self.response.write( 200 )
def _safe_taskqueue_add(url, params):
params['the_key'] = cryptoutil.encrypt_string("Trust Me")
taskqueue.add(queue_name='emailqueue', url=url, params=params)
def _check_taskqueue_trust(request):
the_key = request.get('the_key','')
plain_key = cryptoutil.decrypt_string(the_key).strip()
if not plain_key == "Trust Me":
raise RuntimeError('bad key to send email from {0}'.format(request.remote_addr))
|
SecondLiners/GO2
|
goemail.py
|
Python
|
gpl-3.0
| 17,363
|
[
"exciting"
] |
f57a3b1f8700a268a67d73654069793160c18cff17a7219aeb0ac3416837a2ba
|
"""Handle disambiguation of reads from a chimeric input, splitting by organism.
Given specification of mixed input samples, splits a sample into multiple
sub-samples for alignment to individual genomes, then runs third-party disambiguation
scripts to reconcile.
Uses disambiguation scripts contributed by AstraZeneca, incorporated into bcbio-nextgen:
https://github.com/mjafin/disambiguate
"""
import collections
import copy
import os
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline.disambiguate.run import main as disambiguate_main
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import merge, run_info
from bcbio.provenance import do
from bcbio import bam
def split(*items):
"""Split samples into all possible genomes for alignment.
"""
out = []
for data in [x[0] for x in items]:
dis_orgs = data["config"]["algorithm"].get("disambiguate")
if dis_orgs:
data["disambiguate"] = {"genome_build": data["genome_build"],
"base": True}
out.append([data])
# handle the instance where a single organism is disambiguated
if isinstance(dis_orgs, basestring):
dis_orgs = [dis_orgs]
for dis_org in dis_orgs:
dis_data = copy.deepcopy(data)
dis_data["disambiguate"] = {"genome_build": dis_org}
dis_data["genome_build"] = dis_org
dis_data = run_info.add_reference_resources(dis_data)
out.append([dis_data])
else:
out.append([data])
return out
def resolve(items, run_parallel):
"""Combine aligned and split samples into final set of disambiguated reads.
"""
out = []
to_process = collections.defaultdict(list)
for data in [x[0] for x in items]:
if "disambiguate" in data:
split_part = tuple(data["align_split"]) if data.get("combine") else None
to_process[(dd.get_sample_name(data), split_part)].append(data)
else:
out.append([data])
if len(to_process) > 0:
dis1 = run_parallel("run_disambiguate",
[(xs, xs[0]["config"]) for xs in to_process.itervalues()])
disambigs = []
for xs in dis1:
assert len(xs) == 1
disambigs.append(xs[0])
dis2 = run_parallel("disambiguate_merge_extras",
[[disambigs, disambigs[0]["config"]]])
else:
dis2 = []
return out + dis2
def merge_extras(items, config):
"""Merge extra disambiguated reads into a final BAM file.
"""
final = {}
for extra_name in items[0]["disambiguate"].keys():
items_by_name = collections.defaultdict(list)
for data in items:
items_by_name[dd.get_sample_name(data)].append(data)
for sname, name_items in items_by_name.items():
if sname not in final:
final[sname] = {}
in_files = []
for data in name_items:
in_files.append(data["disambiguate"][extra_name])
out_file = "%s-allmerged%s" % os.path.splitext(in_files[0])
if in_files[0].endswith(".bam"):
merged_file = merge.merge_bam_files(in_files, os.path.dirname(out_file), config,
out_file=out_file)
else:
assert extra_name == "summary", extra_name
merged_file = _merge_summary(in_files, out_file, name_items[0])
final[sname][extra_name] = merged_file
out = []
for data in items:
data["disambiguate"] = final[dd.get_sample_name(data)]
out.append([data])
return out
def _merge_summary(in_files, out_file, data):
"""Create one big summary file for disambiguation from multiple splits.
"""
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for i, in_file in enumerate(in_files):
with open(in_file) as in_handle:
for j, line in enumerate(in_handle):
if j == 0:
if i == 0:
out_handle.write(line)
else:
out_handle.write(line)
return out_file
def run(items, config):
"""Run third party disambiguation script, resolving into single set of calls.
"""
assert len(items) == 2, "Can only resolve two organism disambiguation"
# check aligner, handling tophat/tophat2 distinctions
aligner = config["algorithm"].get("aligner")
aligner = "tophat" if aligner.startswith("tophat") else aligner
assert aligner in ["bwa", "tophat", "star"], "Disambiguation only supported for bwa, star and tophat alignments."
if items[0]["disambiguate"].get("base"):
data_a, data_b = items
else:
data_b, data_a = items
work_bam_a = bam.sort(data_a["work_bam"], config, "queryname")
work_bam_b = bam.sort(data_b["work_bam"], config, "queryname")
if data_a.get("align_split"):
base_dir = utils.safe_makedir(os.path.normpath(os.path.join(os.path.dirname(work_bam_a),
os.pardir, os.pardir,
"disambiguate_%s" % aligner)))
out_dir = os.path.join(base_dir, "_".join([str(x) for x in data_a["align_split"]]))
else:
out_dir = os.path.normpath(os.path.join(os.path.dirname(work_bam_a),
os.pardir, "disambiguate_%s" % aligner))
base_name = os.path.join(out_dir, os.path.splitext(os.path.basename(work_bam_a))[0])
summary_file = "%s_summary.txt" % base_name
if not utils.file_exists(summary_file):
with file_transaction(items[0], out_dir) as tx_out_dir:
Args = collections.namedtuple("Args", "A B output_dir intermediate_dir "
"no_sort prefix aligner")
args = Args(work_bam_a, work_bam_b, tx_out_dir, tx_out_dir,
True, "", aligner)
disambiguate_main(args)
data_a["disambiguate"] = \
{data_b["genome_build"]: bam.sort("%s.disambiguatedSpeciesB.bam" % base_name, config),
"%s-ambiguous" % data_a["genome_build"]: bam.sort("%s.ambiguousSpeciesA.bam" % base_name, config),
"%s-ambiguous" % data_b["genome_build"]: bam.sort("%s.ambiguousSpeciesB.bam" % base_name, config),
"summary": summary_file}
data_a["work_bam"] = bam.sort("%s.disambiguatedSpeciesA.bam" % base_name, config)
return [[data_a]]
def run_cplusplus(items, config):
"""Run third party disambiguation script, resolving into single set of calls.
"""
assert len(items) == 2, "Can only resolve two organism disambiguation"
# check aligner, handling tophat/tophat2 distinctions
aligner = config["algorithm"].get("aligner")
aligner = "tophat" if aligner.startswith("tophat") else aligner
assert aligner in ["bwa", "tophat", "star"], "Disambiguation only supported for bwa, star and tophat alignments."
if items[0]["disambiguate"].get("base"):
data_a, data_b = items
else:
data_b, data_a = items
work_bam_a = bam.sort(data_a["work_bam"], config, "queryname")
work_bam_b = bam.sort(data_b["work_bam"], config, "queryname")
out_dir = os.path.normpath(os.path.join(os.path.dirname(work_bam_a),
os.pardir, os.pardir, "disambiguate"))
base_name = os.path.join(out_dir, os.path.splitext(os.path.basename(work_bam_a))[0])
summary_file = "%s_summary.txt" % base_name
if not utils.file_exists(summary_file):
with file_transaction(items[0], out_dir) as tx_out_dir:
raise NotImplementedError("Still need to test and support C++ version")
cmd = ""
do.run(cmd.format(**locals()), "Disambiguation", data_a)
data_a["disambiguate"] = \
{data_b["genome_build"]: "%s.disambiguatedSpeciesB.bam" % base_name,
"%s-ambiguous" % data_a["genome_build"]: "%s.ambiguousSpeciesA.bam" % base_name,
"%s-ambiguous" % data_b["genome_build"]: "%s.ambiguousSpeciesB.bam" % base_name,
"summary": summary_file}
data_a["work_bam"] = bam.sort("%s.disambiguatedSpeciesA.bam" % base_name, config)
return [[data_a]]
|
hjanime/bcbio-nextgen
|
bcbio/pipeline/disambiguate/__init__.py
|
Python
|
mit
| 8,568
|
[
"BWA"
] |
a60fef29db373f538753b70cd525a4bd890c5a75109d5f58f5d3ddb8d7cd7138
|
"""edc_dashboard URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from edc_base.views import LoginView, LogoutView
from edc_example.admin_site import edc_example_admin
from .views import HomeView
urlpatterns = [
url(r'login', LoginView.as_view(), name='login_url'),
url(r'logout', LogoutView.as_view(pattern_name='login_url'), name='logout_url'),
url(r'^edc/', include('edc_base.urls', 'edc-base')),
url(r'^visit-schedule/', include('edc_visit_schedule.urls', 'edc-visit-schedule')),
url(r'^dashboard/', include('edc_dashboard.urls', 'edc-dashboard')),
url(r'^admin/', edc_example_admin.urls),
url(r'^admin/', admin.site.urls),
url(r'^home/', HomeView.as_view(), name='home_url'),
url(r'^', HomeView.as_view(), name='home_url'),
]
|
botswana-harvard/edc-dashboard
|
example/urls.py
|
Python
|
gpl-2.0
| 1,427
|
[
"VisIt"
] |
dacb5318b57e29e1aea5fd0b01acfe4dc140d1697f6720351257b082d0ba6605
|
'''
Created on Jan 20, 2016
@author: rch
'''
from traits.api import \
Float, HasTraits, Property, cached_property, Int, \
Instance, Array, Bool
import numpy as np
from oricreate.api import MappingTask
from oricreate.api import YoshimuraCPFactory, \
fix, link, r_, s_, t_, MapToSurface,\
GuConstantLength, GuDofConstraints, SimulationConfig, SimulationTask, \
FTV, FTA
from oricreate.crease_pattern.crease_pattern_state import CreasePatternState
from oricreate.export import \
InfoCadMeshExporter, ScaffoldingExporter
from oricreate.forming_tasks.forming_task import FormingTask
from oricreate.fu import \
FuPotEngTotal
from oricreate.mapping_tasks.mask_task import MaskTask
from oricreate.simulation_tasks.simulation_history import \
SimulationHistory
import sympy as sp
a_, b_ = sp.symbols('a,b')
def get_fr(var_, L, H):
fx = a_ * (var_ / L)**2 + b_ * (var_ / L)
eqns = [fx.subs(var_, L), fx.subs(var_, L / 2) - H]
ab_subs = sp.solve(eqns, [a_, b_])
fx = fx.subs(ab_subs)
return fx
class AddBoundaryTask(MappingTask):
'''
'''
def _add_boundary_facet(self, N1, N2, dir_=-1, delta=0.1, N_start_idx=0):
cp = self.previous_task.formed_object
x1, x2 = cp.x_0[N1, :], cp.x_0[N2, :]
dx = x1[:, 0] - x2[:, 0]
dy = x1[:, 1] - x2[:, 1]
dz = np.zeros_like(dy)
dirvec = np.c_[dx, dy, dz]
x4 = x2[:, :]
x4[:, 1] += dir_ * delta
x3 = np.copy(x4)
x3[:, :] += dirvec * 0.82
x_add = np.vstack([x3, x4])
N3 = N_start_idx + np.arange(len(x3))
N4 = N_start_idx + len(x3) + np.arange(len(x4))
L_add = np.vstack([
np.array([N1, N3]).T,
np.array([N2, N3]).T,
np.array([N3, N4]).T,
np.array([N2, N4]).T
])
F_add = np.vstack([
np.array([N1, N3, N2]).T,
np.array([N3, N4, N2]).T
])
return x_add, L_add, F_add
def _get_formed_object(self):
'''attach additional facets at the obundary
'''
cp = self.previous_task.formed_object
x_0, L, F = cp.x_0, cp.L, cp.F
n_N = len(x_0)
n_N_add = 8
x_br, L_br, F_br = self._add_boundary_facet(
[8, 37, 15, 43], [37, 15, 43, 20], -1, 0.1, n_N)
x_bl, L_bl, F_bl = self._add_boundary_facet(
[8, 31, 3, 27], [31, 3, 27, 0], -1, 0.1, n_N + n_N_add)
x_tr, L_tr, F_tr = self._add_boundary_facet(
[14, 42, 19, 46], [42, 19, 46, 22], 1, 0.1, n_N + 2 * n_N_add)
x_tl, L_tl, F_tl = self._add_boundary_facet(
[14, 36, 7, 30], [36, 7, 30, 2], 1, 0.1, n_N + 3 * n_N_add)
x_0 = np.vstack([x_0, x_br, x_bl, x_tr, x_tl])
L = np.vstack([L, L_br, L_bl, L_tr, L_tl])
F = np.vstack([F, F_br, F_bl, F_tr, F_tl])
return CreasePatternState(x_0=x_0,
L=L,
F=F)
class DoublyCurvedYoshiFormingProcess(HasTraits):
'''
Define the simulation task prescribing the boundary conditions,
target surfaces and configuration of the algorithm itself.
'''
L_x = Float(3.0, auto_set=False, enter_set=True, input=True)
L_y = Float(2.2, auto_set=False, enter_set=True, input=True)
u_x = Float(0.1, auto_set=False, enter_set=True, input=True)
n_fold_steps = Int(30, auto_set=False, enter_set=True, input=True)
n_load_steps = Int(30, auto_set=False, enter_set=True, input=True)
stiffening_boundary = Bool(False)
ctf = Property(depends_on='+input')
'''control target surface'''
@cached_property
def _get_ctf(self):
return [r_, s_, - 0.2 * t_ * r_ * (1 - r_ / self.L_x) - 0.0000015]
factory_task = Property(Instance(FormingTask))
'''Factory task generating the crease pattern.
'''
@cached_property
def _get_factory_task(self):
return YoshimuraCPFactory(L_x=self.L_x, L_y=self.L_y,
n_x=4, n_y=12)
mask_task = Property(Instance(MaskTask))
'''Configure the simulation task.
'''
@cached_property
def _get_mask_task(self):
return MaskTask(previous_task=self.factory_task,
F_mask=[0, 6, 12, 18, 12, 24, 36, 48,
96, 78, 90, 102, 54, 42,
72, 1, 12, 43, 49, 97, 103, 19,
59, 65, 71, 77, 101, 83, 96, 107,
47, 29, 41, 53,
5, 23, 95,
58, 76,
100, 106,
46, 52],
L_mask=[0, 7, 14, 21, 148, 160, 172, 154, 1, 22, 149, 155,
152, 158, 5, 26, 153, 165, 177, 159, 6, 13, 20, 27,
28, 40, 29, 41, 32, 44, 33, 45, 34, 46, 35, 47, 38, 50, 39, 51,
58, 52, 76, 53, 57, 81, 70, 94, 98, 75, 99, 93,
124, 100, 128, 129, 105, 135, 112, 142, 118, 119,
147, 123],
N_mask=[0, 7, 21, 28, 35, 47, 65, 41, 1, 29, 36, 42, 39,
45, 5, 33, 40, 52, 70, 46, 6, 13, 27, 34])
add_boundary_task = Property(Instance(FormingTask))
'''Initialization to render the desired folding branch.
'''
@cached_property
def _get_add_boundary_task(self):
if self.stiffening_boundary:
return AddBoundaryTask(previous_task=self.mask_task)
else:
return self.mask_task
init_displ_task = Property(Instance(FormingTask))
'''Initialization to render the desired folding branch.
'''
@cached_property
def _get_init_displ_task(self):
cp = self.mask_task.formed_object
return MapToSurface(previous_task=self.add_boundary_task,
target_faces=[(self.ctf, cp.N)])
fold_task = Property(Instance(FormingTask))
'''Configure the simulation task.
'''
@cached_property
def _get_fold_task(self):
self.init_displ_task.x_1
# cp = self.init_displ_task.formed_object
# print 'nodes', x_1[(0, 1, 2, 20, 21, 22), 2]
# cp.u[(26, 25, 24, 23), 2] = -0.01
# cp.x[(0, 1, 2, 20, 21, 22), 2] = 0.0
u_max = self.u_x
fixed_nodes_z = fix(
[0, 1, 2, 20, 21, 22], (2))
# fixed_nodes_x = fix(
# [8, 9, 10, 11, 12, 13, 14], (0))
fixed_nodes_y = fix(
[1, 21], (1)) # 5, 11, 17,
control_left = fix(
[0, 1, 2], (0),
lambda t: t * u_max)
control_right = fix(
[20, 21, 22], (0),
lambda t: -t * u_max)
front_node = fix(
[8], (1), lambda t: t * 0.03)
back_node = fix(
[14], (1), lambda t: -t * 0.03)
dof_constraints = fixed_nodes_z + fixed_nodes_y + \
control_left + control_right + front_node + back_node
gu_dof_constraints = GuDofConstraints(dof_constraints=dof_constraints)
gu_constant_length = GuConstantLength()
sim_config = SimulationConfig(goal_function_type='gravity potential energy',
gu={'cl': gu_constant_length,
'dofs': gu_dof_constraints},
acc=1e-5, MAX_ITER=500,
debug_level=0)
return SimulationTask(previous_task=self.init_displ_task,
config=sim_config, n_steps=self.n_fold_steps)
turn_task = Property(Instance(FormingTask))
'''Configure the simulation task.
'''
@cached_property
def _get_turn_task(self):
self.fold_task.x_1
fixed_nodes_z = fix(
[0, 1, 2, 20, 21, 22], (0, 2))
fixed_nodes_y = fix(
[1, 21], (1))
front_nodes = fix(
[8, 14], (0, 1, 2))
dof_constraints = fixed_nodes_z + fixed_nodes_y + \
front_nodes
gu_dof_constraints = GuDofConstraints(dof_constraints=dof_constraints)
gu_constant_length = GuConstantLength()
sim_config = SimulationConfig(goal_function_type='gravity potential energy',
gu={'cl': gu_constant_length,
'dofs': gu_dof_constraints},
acc=1e-5, MAX_ITER=500,
debug_level=0)
st = SimulationTask(previous_task=self.fold_task,
config=sim_config, n_steps=2)
cp = st.formed_object
cp.x_0 = self.fold_task.x_1
cp.x_0[:, 2] *= -1
cp.u[:, :] = 0.0
if self.stiffening_boundary:
cp.u[tuple(np.arange(47, 47 + 32)), 2] = -0.2
return st
turn_task2 = Property(Instance(FormingTask))
'''Configure the simulation task.
'''
@cached_property
def _get_turn_task2(self):
self.fold_task.x_1
u_z = 0.1
fixed_nodes_xzy = fix([7, 19], (0, 1, 2))
lift_nodes_z = fix([3, 15], (2), lambda t: t * u_z)
dof_constraints = fixed_nodes_xzy + lift_nodes_z
gu_dof_constraints = GuDofConstraints(dof_constraints=dof_constraints)
gu_constant_length = GuConstantLength()
sim_config = SimulationConfig(goal_function_type='total potential energy',
gu={'cl': gu_constant_length,
'dofs': gu_dof_constraints},
acc=1e-5, MAX_ITER=1000,
debug_level=0)
load_nodes = []
FN = lambda F: lambda t: t * F
F_ext_list = [(n, 2, FN(-10)) for n in load_nodes]
fu_tot_poteng = FuPotEngTotal(kappa=np.array([1000]),
F_ext_list=F_ext_list)
sim_config._fu = fu_tot_poteng
st = SimulationTask(previous_task=self.fold_task,
config=sim_config, n_steps=1)
fu_tot_poteng.forming_task = st
cp = st.formed_object
cp.u[(3, 15), 2] = u_z
return st
load_factor = Float(1.0, input=True, enter_set=True, auto_set=False)
load_task = Property(Instance(FormingTask))
'''Configure the simulation task.
'''
@cached_property
def _get_load_task(self):
self.turn_task.x_1
fixed_nodes_yz = fix([0, 2, 20, 22], (1, 2)) # + \
fixed_nodes_x = fix([0, 2, 20, 22], (0)) # + \
# fix([1, 21], [0, 2])
link_bnd = []
if self.stiffening_boundary:
link_bnd = link([48, 49, 50, 56, 57, 58, 64, 65, 66, 72, 73, 74],
[0, 1, 2], 1.0,
[51, 52, 53, 59, 60, 61, 67, 68, 69, 75, 76, 77],
[0, 1, 2], -1.0)
dof_constraints = fixed_nodes_x + fixed_nodes_yz + link_bnd
gu_dof_constraints = GuDofConstraints(dof_constraints=dof_constraints)
gu_constant_length = GuConstantLength()
sim_config = SimulationConfig(goal_function_type='total potential energy',
gu={'cl': gu_constant_length,
'dofs': gu_dof_constraints},
acc=1e-5, MAX_ITER=1000,
debug_level=0)
FN = lambda F: lambda t: t * F
H = 0
P = 3.5 * self.load_factor
F_ext_list = [(33, 2, FN(-P)), (34, 2, FN(-P)), (11, 2, FN(-P)), (39, 2, FN(-P)), (40, 2, FN(-P)), (4, 0, FN(0.1609 * H)), (4, 2, FN(-0.2385 * H)), (10, 2, FN(-0.3975 * H)), (16, 0, FN(-0.1609 * H)), (16, 2, FN(-0.2385 * H)),
(6, 0, FN(0.1609 * H)), (6, 2, FN(-0.2385 * H)), (12, 2, FN(-0.3975 * H)), (18, 0, FN(-0.1609 * H)), (18, 2, FN(-0.2385 * H))]
fu_tot_poteng = FuPotEngTotal(kappa=np.array([5.28]),
F_ext_list=F_ext_list)
# load_nodes = [10, 11, 12]
# FN = lambda F: lambda t: t * F
# F_ext_list = [(n, 2, FN(-10)) for n in load_nodes]
# fu_tot_poteng = FuPotEngTotal(kappa=np.array([10]),
# F_ext_list=F_ext_list) # (2 * n, 2, -1)])
sim_config._fu = fu_tot_poteng
st = SimulationTask(previous_task=self.turn_task,
config=sim_config, n_steps=self.n_load_steps)
fu_tot_poteng.forming_task = st
cp = st.formed_object
cp.x_0 = self.turn_task.x_1
cp.u[:, :] = 0.0
return st
measure_task = Property(Instance(FormingTask))
'''Configure the simulation task.
'''
@cached_property
def _get_measure_task(self):
mt = MappingTask(previous_task=self.turn_task)
mt.formed_object.reset_state()
return mt
class DoublyCurvedYoshiFormingProcessFTV(FTV):
model = Instance(DoublyCurvedYoshiFormingProcess)
if __name__ == '__main__':
bsf_process = DoublyCurvedYoshiFormingProcess(L_x=3.0, L_y=2.41, n_x=4,
n_y=12, u_x=0.1,
n_fold_steps=20,
n_load_steps=10,
load_factor=5,
stiffening_bundary=False)
ftv = DoublyCurvedYoshiFormingProcessFTV(model=bsf_process)
fa = bsf_process.factory_task
mt = bsf_process.mask_task
ab = bsf_process.add_boundary_task
if False:
import pylab as p
ax = p.axes()
ab.formed_object.plot_mpl(ax)
p.show()
it = bsf_process.init_displ_task
ft = bsf_process.fold_task
tt = bsf_process.turn_task
tt2 = bsf_process.turn_task2
lt = bsf_process.load_task
animate = False
show_init_task = False
show_fold_task = False
show_turn_task = False
show_turn_task2 = False
show_load_task = False
show_measure_task = True
export_and_show_mesh = False
export_scaffolding = False
fta = FTA(ftv=ftv)
fta.init_view(a=33.4389721223,
e=61.453898329,
d=5.0,
f=(1.58015494765,
1.12671403563,
-0.111520325399),
r=-105.783218753)
if show_init_task:
ftv.add(it.target_faces[0].viz3d['default'])
it.formed_object.viz3d['cp'].set(tube_radius=0.002)
ftv.add(it.formed_object.viz3d['cp'])
#ftv.add(it.formed_object.viz3d['node_numbers'], order=5)
it.u_1
if show_fold_task:
ft.sim_history.set(anim_t_start=0, anim_t_end=10)
ft.config.gu['dofs'].set(anim_t_start=0, anim_t_end=5)
ft.sim_history.viz3d['cp'].set(tube_radius=0.002)
ftv.add(ft.sim_history.viz3d['cp'])
# ftv.add(ft.sim_history.viz3d['node_numbers'])
ft.config.gu['dofs'].viz3d['default'].scale_factor = 0.5
ftv.add(ft.config.gu['dofs'].viz3d['default'])
ft.u_1
fta.add_cam_move(duration=10, n=20)
if show_turn_task:
tt.formed_object.set(anim_t_start=10, anim_t_end=20)
tt.formed_object.viz3d['cp'].set(tube_radius=0.002)
ftv.add(tt.formed_object.viz3d['cp'])
fta.add_cam_move(duration=10, n=20,
)
if show_turn_task2:
tt2.u_1
tt2.formed_object.set(anim_t_start=10, anim_t_end=20)
tt2.sim_history.viz3d['cp'].set(tube_radius=0.002)
ftv.add(tt2.sim_history.viz3d['cp'])
tt2.config.gu['dofs'].viz3d['default'].scale_factor = 0.5
ftv.add(tt2.config.gu['dofs'].viz3d['default'])
fta.add_cam_move(a=45, e=73, d=5,
duration=10, n=20,
azimuth_move='damped',
elevation_move='damped',
distance_move='damped')
if show_load_task == True:
lt.sim_history.set(anim_t_start=0, anim_t_end=50)
lt.config.gu['dofs'].set(anim_t_start=0, anim_t_end=50)
lt.config.fu.set(anim_t_start=0, anim_t_end=50)
lt.sim_history.viz3d['displ'].set(tube_radius=0.002,
warp_scale_factor=5.0)
# ftv.add(lt.formed_object.viz3d_dict['node_numbers'], order=5)
ftv.add(lt.sim_history.viz3d['displ'])
#lt.config.gu['dofs'].viz3d['default'].scale_factor = 0.5
ftv.add(lt.config.gu['dofs'].viz3d['default'])
ftv.add(lt.config.fu.viz3d['default'])
lt.config.fu.viz3d['default'].set(anim_t_start=00, anim_t_end=50)
ftv.add(lt.config.fu.viz3d['node_load'])
print('u_13', lt.u_1[13, 2])
n_max_u = np.argmax(lt.u_1[:, 2])
print('node max_u', n_max_u)
print('u_max', lt.u_1[n_max_u, 2])
ftv.plot()
ftv.configure_traits()
cp = lt.formed_object
iL_phi = cp.iL_psi - cp.iL_psi_0
iL_m = lt.config._fu.kappa * iL_phi
print('moments', np.max(np.fabs(iL_m)))
fta.add_cam_move(duration=10, n=20)
fta.add_cam_move(duration=10, n=20, vot_start=1.0)
fta.add_cam_move(duration=10, n=20, vot_start=1.0)
if show_measure_task:
mt = bsf_process.measure_task
import os.path as path
from os.path import expanduser
home = expanduser("~")
test_dir = path.join(home, 'simdb', 'exdata',
'shell_tests', '2016-09-09-FSH04-Canopy')
states = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']
measured_states = []
for state in states:
fname = 'KO%s.txt' % state
fname = path.join(test_dir, fname)
print('read', fname)
measured_state = np.loadtxt(fname)
x = measured_state[:, 1:]
measured_states.append(x)
x_t = np.array(measured_states)
x_0 = x_t[0, ...]
u_t = x_t[:, :, :] - x_0[np.newaxis, :, :]
cp = lt.formed_object
sh = SimulationHistory(x_0=x_0, L=cp.L, F=cp.F,
u_t=u_t)
sh.set(anim_t_start=0, anim_t_end=50)
sh.viz3d['displ'].set(tube_radius=0.002)
ftv.add(sh.viz3d['displ'])
ftv.plot()
ftv.configure_traits()
if export_and_show_mesh:
lt = bsf_process.load_task
me = InfoCadMeshExporter(forming_task=lt, n_l_e=4)
me.write()
X, F = me._get_geometry()
x, y, z = X.T
import mayavi.mlab as m
me.plot_mlab(m)
m.show()
#
if export_scaffolding:
sf = ScaffoldingExporter(forming_task=ft)
fta.plot()
fta.configure_traits()
if animate:
n_cam_move = 20
fta = FTA(ftv=ftv)
fta.init_view(a=33.4389721223,
e=61.453898329,
d=4.13223140496, f=(1.58015494765,
1.12671403563,
-0.111520325399), r=-105.783218753)
fta.add_cam_move(a=60, e=70, n=n_cam_move, d=8, r=-120,
duration=10,
vot_fn=lambda cmt: np.linspace(0.01, 0.5, n_cam_move),
azimuth_move='damped',
elevation_move='damped',
distance_move='damped')
fta.add_cam_move(a=80, e=80, d=7, n=n_cam_move, r=-132,
duration=10,
vot_fn=lambda cmt: np.linspace(0.5, 1.0, n_cam_move),
azimuth_move='damped',
elevation_move='damped',
distance_move='damped')
fta.plot()
fta.configure_traits()
|
simvisage/oricreate
|
apps/sandbox/christoph/ex04_canopy.py
|
Python
|
gpl-3.0
| 19,829
|
[
"Mayavi"
] |
c4235dc53818bdb6ef48a29202594ca436c5d8a6f59da3fa3b10d7d85b4d88f4
|
'''
@author Akshay Choche
@see LICENSE (MIT style license file).
'''
import jpype
import logging
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import warnings
import urllib2
import platform
import os,sys
'''
This scriot is responsible for calling the java code that removes tools from the
tool menu in Galaxy
'''
logger = logging.getLogger('myapp')
logger_home = str(os.environ.get('GALAXY_HOME')) + '/tools/WebServiceToolWorkflow_REST_SOAP/Logs/Remove_Tools/removetool.log'
hdlr = logging.FileHandler(logger_home)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
operationToRemove = sys.argv[1]
outputFile = sys.argv[2]
logger.info("Attempting to remove " + str(operationToRemove) + " from stand alone tools")
jar_home = str(os.environ.get('GALAXY_HOME')) + '/tools/WebServiceToolWorkflow_REST_SOAP/engine'
jarpath = os.path.join(os.path.abspath(jar_home), '')
jpype.startJVM(jpype.getDefaultJVMPath(), "-Djava.ext.dirs=%s" % jarpath)
removeClientPackage = jpype.JPackage("edu.uga.WSExtension.AddingWebServiceAsTools")
removeClient = removeClientPackage.RemoveTool(str(os.environ.get('GALAXY_HOME')))
test = removeClient.removeStandAloneTool(str(operationToRemove))
logger.info("The tool has been deleted from tool_conf.xml, personlaized list and the stub")
|
UGA-WSAG/wsextensions
|
WebServiceToolWorkflow_REST_SOAP/removestandalonetool.py
|
Python
|
mit
| 1,426
|
[
"Galaxy"
] |
19676363e5d07e3b0554c4c1893a9955d226913335ffda84cc699c39886fb797
|
#!/usr/bin/env python
from pylab import *
import os
from scipy.stats.stats import spearmanr
from scipy.stats import ks_2samp
from scipy.stats import scoreatpercentile
from scipy.stats.mstats import normaltest
from anderson import *
from astropy.stats import bootstrap
import numpy as np
pscale24=2.45#arcsec per pixel
pscalesdss=1.#arcsec per pixel
sdsspixelscale=0.396127#conversion for isophotal radii from pixels to arcseconds
mipspixelscale=pscale24
mipsconv_MJysr_to_uJy=141.086
mingalaxysize=2.*mipspixelscale
lcsramin=170.
lcsramax=250.
lcsdecmin=0.
lcsdecmax=38.
sbmin=13
sbmax=20
Mpcrad_kpcarcsec = 2. * pi/360./3600.*1000.
minsize_kpc=1.3*2 # one mips pixel at distance of hercules
clusternames=['MKW11', 'MKW8', 'AWM4', 'A2063', 'A2052', 'NGC6107', 'Coma', 'A1367', 'Hercules']
clusternamesbylx=['MKW11', 'NGC6107','MKW8', 'AWM4', 'Hercules','A1367','A2063', 'A2052', 'Coma' ]
clusternamesbydistance=['A1367','MKW11', 'Coma','MKW8', 'NGC6107', 'AWM4','A2063', 'A2052', 'Hercules']
numberofstars={'MKW11':5, 'MKW8':2, 'AWM4':5, 'A2063':5, 'A2052':4, 'NGC6107':5, 'Coma':5, 'A1367':1, 'Hercules':1}
clusterRA={'MKW11':202.3800, 'MKW8':220.1796, 'AWM4':241.2375, 'A2063':230.7578, 'A2052':229.1896, 'NGC6107':244.333750, 'Coma':194.9531, 'A1367':176.1231, 'Hercules':241.3125,'MKW10':175.5449}
clusterDec={'MKW11':11.78861, 'MKW8':3.4530, 'AWM4':23.9206, 'A2063':8.6394, 'A2052':7.0003, 'NGC6107':34.901389, 'Coma':27.9807, 'A1367':19.8391, 'Hercules':17.7485,'MKW10':10.3059}
clustervel={'MKW11':6854., 'MKW8':8100., 'AWM4':9526., 'A2063':10481., 'A2052':10647., 'NGC6107':9197., 'Coma':6900., 'A1367':8400., 'Hercules':11100.,'MKW10':6158.}
clustersigma={'MKW11':361, 'MKW8':325., 'AWM4':500., 'A2063':660., 'A2052':562., 'NGC6107':500., 'Coma':1000., 'A1367':745., 'Hercules':689.}
clusterf80MJysr={'MKW11':4., 'MKW8':3.75, 'AWM4':3.5, 'A2063':4., 'A2052':4., 'NGC6107':3.25, 'Coma':2.25, 'A1367':3.5, 'Hercules':3.25}
clusterz={'MKW11':.022849,'MKW8':.027,'AWM4':.031755,'A2063':.034937,'A2052':.035491,'NGC6107':.030658,'Coma':.023,'A1367':.028,'Hercules':.037,'MKW10':.02054}
john_prefix={'MKW11':'mkw11','MKW8':'mkw8','AWM4':'awm4','A2063':'abell2063','A2052':'abell2052','NGC6107':'ngc6107','Coma':'coma','A1367':'abell1367','Hercules':'hercules'}
xraycontourlevels={'MKW11':[.85,1.69,2.54],'MKW8':[.49,.99,1.48,1.98],'AWM4':[.8,1.6,2.4],'NGC6107':[1.43,2.85,4.27],'A2052':[.9,1.8,2.7,3.6],'A2063':[.9,1.8,2.7,3.6],'Hercules':[.9,1.92,2.9,3.8],'A1367':[.6,1.17,1.76,2.35],'Coma':[.88,1.76,2.63,3.51]}#used contour option in ds9 to derive these
coma_badobjects=[142589,104115,104020,104022,142662,142763,142797,162768,162797]
spiral_nozoo={'MKW11':[70685, 143485, 143530,143570,169997,171141], \
'MKW8':[15218,18127,145303,145586,165696], \
'AWM4':[63611, 68238, 68271, 68272, 68283, 68287, 68288, 68338,68341,68387,68430,68435, 68436,68437, 68439,68432, 146715, 166624], \
'NGC6107':[43707, 43712, 43787, 43857, 69538], \
'A2052':[79550,79593, 79646,79665,79680, 79705, 145994, 166042], \
'A2063':[72672, 72739, 72767, 72775,146137,166124], \
'Hercules':[99840, 146607,146635], \
'A1367':[140124,140145, 140176, 140194], \
'Coma':[104125, 104232,142527,142561,142563,142568,142572, 142627,142642,142653,142656,142663,142666,142668,142669,142676,142682,142687,142689,142693,142695,142706,142710,142713,142715,142716,142723,142727,142737,142740,142745,142750,142755,142758,142765,142767,142769,142774,142779,142781,142793,142795,142801,142804,142806,142808,142809,142810,142815,142819,142825,142837,142847,142855,142873,142914,162740]}#used contour option in ds9 to derive these
zoo_overide_flag = [70637, 70658, 70676, 163589, 169994] # galaxies that are clearly spirals but have low probability of being a spiral (p_cs < 0.7) according to galaxy zoo
spiral_100_nozoo={'MKW11':[70685, 143485, 143530],'MKW8':[18127],'AWM4':[68283, 68288, 68338, 68341, 166624],'NGC6107':[43707, 43712, 43857],'A2052':[79646,145994,166042],'A2063':[166124],'Hercules':[99840, 146607],'A1367':[140124,140177],'Coma':[142572, 142668,142914,162740]}#100% sure these are spirals
elliptical_nozoo={'MKW11':[143436,143514,143529],\
'MKW8':[145280],\
'AWM4':[68279,68438,146626],\
'NGC6107':[146832, 146876, 146878,146880, 166860],
'A2052':[79600, 79610, 79705,79710,146012, 146037, 146041],\
'A2063':[72751],\
'Hercules':[146638,146664],\
'A1367':[113076,113458, 140164],\
'Coma':[103978,104022,104061,104115,142531,142552,142584,142585,142604,142605,142609,142611,142614,142615,142616,142622,142623,142628,142636,142637,142638,142647,142648,142649,142651,142658,142660,142661,142675,142677,142678,142681,142684,142690,142699,142705,142717,142721,142725,142729,142741,142743,142761,142787,142803,142813,142832,142852,142866,162659]}#used contour option in ds9 to derive these
irreg_nozoo={'MKW11':[143709, 171128],'MKW8':[18255,165628],'AWM4':[],'NGC6107':[],'A2052':[],'A2063':[],'Hercules':[146673, 146680, 166679],'A1367':[140170, 140183, 140184, 140186, 160496],'Coma':[142559,142560,142578,142590,142593,142613,142620,142631,142645,142652,142667,142673,142679,142697,142718,142733,142753,142762,142771,142786,142821,142823,142826,142831,142834,142849,162689]}#used contour option in ds9 to derive these
unsure_nozoo={'MKW11':[],'MKW8':[],'AWM4':[],'NGC6107':[],'A2052':[],'A2063':[72627],'Hercules':[146680, 166679],'A1367':[140170,140176,140194, 160496],'Coma':[]}
# galaxies to cut from sample, at least initially
# galaxies that have contamination be nearby neighbor. see notes.
visual_cut={'MKW11':[70639,70694,143485,171004],'MKW8':[18111, 18171],'AWM4':[82134, 82188, 82209, 146626, 166655, 166699],'NGC6107':[43782, 43814, 69617, 69618],'A2052':[ 79388, 166086],'A2063':[72631, 72710, 72745, 72782, 146106, 146107, 146124, 146128, 146130,146135],'Hercules':[99056, 99644, 99822, 99859, 99872, 146607, 146659, 166638],'A1367':[113058, 113404, 140197],'Coma':[103612, 103628, 103648, 103784, 103831, 103833, 103844, 103924, 103933, 104001, 104004, 104035, 104126, 142655, 142840, 162793, 162831]}
#Group names
groupnames=['NRGb041','NRGb151','NRGb157','NRGb168','NRGb206','NRGb247','NRGb282','NRGb301','MKW8','NCG5846','NRGs076','NRGs272','NRGs385']
altgroupnames=['WBL226','MKW10','HCG59','WBL368','WBL404','MKW11test','Zw1400','WBL509','MKW8','NGC5846','WBL251','WBL477','NGC6107']
#location of Final images
# central biweight location as calculated from findbestbiweight code
# clusterbiweightcenter={'MKW11':6906,'MKW8':8098,'AWM4':9650,'A2063':10422,'A2052':10354.5,'NGC6107':9429,'Coma':6999,'A1367':6481,'Hercules':10957.5}
#sbi values output from +/- 4000km/s and 1 degree velocity cut from findbestbiweight code
# clusterbiweightscale={'MKW11':392.37,'MKW8':491.32,'AWM4':476.67,'A2063':727.06,'A2052':626.32,'NGC6107':616.86,'Coma':937.03,'A1367':794.61,'Hercules':772.74}
# redid biweight calculations in Jan 2015 to use NSA as base catalog
# also implemented bootstrap resampling for errors
# central biweight location as calculated from LCSbiweight code
clusterbiweightcenter={'MKW11':6904,'MKW8':8039,'AWM4':9604,'A2063':10410,'A2052':10431,'NGC6107':9397,'Coma':7011,'A1367':6505,'Hercules':10917}
clusterbiweightcenter_errp={'MKW11':38,'MKW8':40,'AWM4':61,'A2063':72,'A2052':57,'NGC6107':57,'Coma':45,'A1367':55,'Hercules':50}
clusterbiweightcenter_errm={'MKW11':49,'MKW8':38,'AWM4':55,'A2063':74,'A2052':64,'NGC6107':53,'Coma':44,'A1367':54,'Hercules':53}
#sbi values output from +/- 4000km/s and 1 degree velocity cut from findbestbiweight code
clusterbiweightscale={'MKW11':383,'MKW8':443,'AWM4':458,'A2063':862,'A2052':666,'NGC6107':578,'Coma':1054,'A1367':838,'Hercules':790}
clusterbiweightscale_errp={'MKW11':19,'MKW8':29,'AWM4':107,'A2063':42,'A2052':37,'NGC6107':47,'Coma':26,'A1367':31,'Hercules':29}
clusterbiweightscale_errm={'MKW11':27,'MKW8':31,'AWM4':95,'A2063':65,'A2052':45,'NGC6107':34,'Coma':29,'A1367':42,'Hercules':31}
# X-ray luminosity in 10^43 ergs/s
# from Bohringer et al 2000, and Mahdavi et Geller
#clusterLx={'MKW11':0.033,'MKW8':0.096,'AWM4':0.550,'A2063':1.940,'A2052':2.580,'NGC6107':0.33,'Coma':7.010,'A1367':1.510,'Hercules':0.980}
# from http://bax.ast.obs-mip.fr/servlets/omp.servlet.ClusterQueryByName#
# Lx (10^44 ergs/s) in 0.1-2.4 keV band
clusterLx={'MKW11':0.073397, # Jones & Forman 1999
'MKW8':0.096, # no Lx from bax
'AWM4':0.51799, # Bohringer + 2000
'A2063':2.196055, # Reiprich 2002
'A2052':2.521777, # Reiprich 2002
'NGC6107':0.331708, # Bohringer + 2000
'Coma':7.766525, #http://cdsads.u-strasbg.fr/cgi-bin/nph-bib_query?bibcode=2002ApJ...567..716R&db_key=AST Reiprich
'A1367':1.244663, # Reiprich 2002
'Hercules':0.900308} # Reiprich 2002
clusterTx={'MKW11':0.96, #+/- 0.4, Osmond+ 2004
'MKW8':3.29, # Cavagnolo +2009
'AWM4':2.48, #+/- .06, Gasteldello + 2008
'A2063':3.7, # no temp measurement at bax, see below
'A2052':3.12, # -.05, +.06 Ikebe + 2002
'NGC6107':-99., # no temp measurement in bax
'Coma':8.25, #+/- 0.1, Arnaud 2001A&A
'A1367':3.55, # +/- .05 Ikebe + 2002
'Hercules':2.52} #+/- .12 Ikebe + 2002
# Tx, errdown, errup
clusterTx1={'MKW11':[0],'MKW8':[3.,.12,.12],'AWM4':[0],'A2063':[3.77,.06,.06],'A2052':[3.35,.02,.02],'NGC6107':[0],'Coma':[9.15,.17,.17],'A1367':[3.58,.06,.06],'Hercules':[0]}
# X-ray temp in keV; from Mittal et al 2011
clusterTx2={'MKW11':[0],'MKW8':[2.74,.03,.03],'AWM4':[0],'A2063':[3.70,.02,.02],'A2052':[2.98,.03,.03],'NGC6107':[0],'Coma':[7.31,.06,.06],'A1367':[2.56,.02,.02],'Hercules':[0]} #Frank+2013, ApJ, 764, 46 XMM-NEWTON observations
clusterLx2={'MKW11':0.033,'MKW8':(.692,.058),'AWM4':0.550,'A2063':(2.06,.027),'A2052':(2.18,.022),'NGC6107':0.083,'Coma':(11.1,.156),'A1367':(1.13,.009),'Hercules':0.980}
# list of L500 (1.e37 W), M500(1.e14 Msun) and R500 (Mpc) from Piffaretti+ 2011
clusterXray={'MKW11':[0.065077, 0.3805, 0.5078],'MKW8':[0.192567,0.7352,0.6316],'AWM4':[0.284521,0.9289,0.6815],'A2063':[1.138819,2.1598,0.9020],'A2052':[1.442058,2.4945,0.9465],'NGC6107':[0.168099,0.6744,0.6127],'Coma':[3.455556,4.2846,1.1378],'A1367':[1.104603,2.1398,0.9032],'Hercules':[0.508824,1.3202,0.7652]}
# these correpond to area w/uniform 24um coverage
# center x,y,dx,dy,rotation E of N, all in degrees
cluster24Box={'MKW11':array([202.36239,11.752736,1.3138054,3.046197,27.0001],'f'), 'MKW8':array([220.18764,3.4955922,1.3188409,3.040413,13.5],'f'), 'AWM4':array([241.21434,23.872723,1.3441978,3.0241238,10],'f'), 'A2063':array([230.77172,8.6817732,1.3126447,3.0415136,13.5001],'f'), 'A2052':array([229.19761,7.0403283,1.3194664,3.0412907,13.25],'f'), 'NGC6107':array([244.30039,34.934184,1.3199655,3.0435265,322],'f'), 'Coma':array([194.86318,27.865896,1.5391027,1.976467,29.5002],'f'), 'A1367':array([176.1019,19.799614,.51080152,.90025557,31.5],'f'), 'Hercules':array([241.3065,17.771646,.51029561,.93431905,19.5001],'f')}
#solar magnitude in SDSS filters
SolarMag={'u':6.39,'g':5.07,'r':4.62,'i':4.52,'z':4.48}
#cosmology
H0=70
OmegaL=0.7
OmegaM=0.3
h=H0/100.
# bell+2003 stellar mass coefficients for sdss filters
# diet Salpeter IMF - 30% lower than Salpeter IMF, less mass from lower-mass stars
# log10(chabrier) = log10(Salpeter) - .25 (used in SFR estimate)
# log10(chabrier) = log10(diet Salpeter) - 0.1 (used in Stellar mass estimates)
bellug={'g':[-.221,0.485],'r':[-.099,0.345],'i':[-.053,0.268],'z':[-.105,0.226]}
bellur={'g':[-.390,0.417],'r':[-.223,0.229],'i':[-.151,0.233],'z':[-.178,0.192]}
bellui={'g':[-.375,0.359],'r':[-.212,0.257],'i':[-.144,0.201],'z':[-.171,0.165]}
belluz={'g':[-.400,0.332],'r':[-.232,0.239],'i':[-.161,0.187],'z':[-.179,0.151]}
bellgr={'g':[-.499,1.519],'r':[-.306,1.097],'i':[-.222,0.864],'z':[-.223,0.689]}
bellgi={'g':[-.379,0.914],'r':[-.220,0.661],'i':[-.152,0.518],'z':[-.175,0.421]}
bellgz={'g':[-.367,0.698],'r':[-.215,0.508],'i':[-.153,0.402],'z':[-.171,0.322]}
bellri={'g':[-.106,1.982],'r':[-.022,1.431],'i':[0.006,1.114],'z':[-.952,0.923]}
bellrz={'g':[-.124,1.067],'r':[-.041,0.780],'i':[-.018,0.623],'z':[-.041,0.463]}
snr24cut=5.
deltaCutout=100.#width of cutouts in arcsec
ramin=170.#cuts for culling the ac
ramax=250.#cuts for culling the ac
decmin=0.
decmax=38.#cuts for culling the ac
zmin=0.01366#min z cut, z(coma)-3 sigma
zmax=0.04333#max z cut, z(A2052, which is 10900 km/s)+ 4*sigma
vmin=zmin*3.e5
vmax=zmax*3.e5
#cutoutpath='/home/rfinn/research/LocalClusters/cutouts/'
cutoutpath='/home/rfinn/research/LocalClusters/cutouts/'
Lsol=3.826e33#normalize by solar luminosity
bellconv=9.8e-11#converts Lir (in L_sun) to SFR/yr
bellconv=4.5e-44#Kenn 98 conversion from erg/s to SFR/yr, assumes salpeter IMF
catalog_radial_cut = 3. # mastertable radial cut in degrees
mypath=os.getcwd()
if mypath.find('Users') > -1:
print "Running on Rose's mac pro"
homedir='/Users/rfinn/'
elif mypath.find('home') > -1:
print "Running on coma"
homedir='/home/rfinn/'
mipsflux2umJyconv=141.086
nmgy_muJy_sqarc_conv=3.631/sdsspixelscale**2
MJy_muJy_sqarc_conv=141.09/mipspixelscale**2
def uJy2ABmag(f):
mag=23.9-2.5*np.log10(f)
return mag
def ABmag2uJy(mag): # returns micro-Jy
f=10.**((mag-23.9)/(-2.5))
return f
sdss_sb_cut=.025*(sdsspixelscale**2)
sdss_sb_cut=ABmag2uJy(25.5)/nmgy_muJy_sqarc_conv
# use a lower limit for MIPS as well
mips_sb_cut=.1/2.5
def multiplotaxes(i):
ax=gca()
noylabel=[2,3,5,6,8,9]
if i < 7:
ax.set_xticklabels(([]))
if i in noylabel:
ax.set_yticklabels(([]))
def multiplotlabels(xl,yl):
ax=gca()
text(-.5,-.25,xl,fontsize=22,horizontalalignment='center',transform=ax.transAxes)
text(-2.45,1.5,yl,fontsize=22,verticalalignment='center',rotation=90,transform=ax.transAxes,family='serif')
def multiplotlabelsv2(xl,yl): # for figures with figsize=(6.5,4)
ax=gca()
text(-.5,-.3,xl,fontsize=14,horizontalalignment='center',transform=ax.transAxes)
text(-2.35,1.6,yl,fontsize=14,verticalalignment='center',rotation=90,transform=ax.transAxes,family='serif')
def spearman_boot(x,y,N=5000,cont_int=68.):
boot_rho=zeros(N,'f')
boot_p=zeros(N,'f')
for i in range(N):
indices=randint(0,len(x)-1,len(x))
xboot=x[indices]
yboot=y[indices]
boot_rho[i],boot_p[i]=spearmanr(xboot,yboot)
return scoreatpercentile(boot_rho,per=50),scoreatpercentile(boot_p,per=50)#,boot_rho,boot_p
def spearman(x,y):
#rho,pvalue=spearmanr(x,y)
rho,pvalue=spearman_boot(x,y)
print 'Spearman Rank Test:'
print 'rho = %6.2f'%(rho)
print 'p-vale = %6.5f (prob that samples are uncorrelated)'%(pvalue)
return rho,pvalue
def spearman_with_errors(x,y,yerr,Nmc=1000,plotflag=False,verbose=False):
ysim=np.zeros(Nmc,'f')
rhosim=np.zeros(Nmc,'f')
psim=np.zeros(Nmc,'f')
for i in range(Nmc):
ysim=np.random.normal(y,scale=yerr,size=len(y))
rhosim[i],psim[i] = spearmanr(x,ysim)
cave=np.mean(rhosim)
cstd=np.std(rhosim)
q1=50-34 # mean minus one std
lower=np.percentile(rhosim,q1)
q2=50+34 # mean minus one std
upper=np.percentile(rhosim,q2)
print 'mean (median) = %5.2f (%5.2f), std = %5.2f'%(cave,np.median(rhosim),cstd)
print 'confidence interval from sorted list of MC fit values:'
print 'lower = %5.2f (%5.2f), upper = %5.2f (%5.2f)'%(lower,cave-cstd, upper,cave+cstd)
k,pnorm=normaltest(rhosim)
print 'probability that distribution of slopes is normal = %5.2f'%(pnorm)
if plotflag:
plt.figure(figsize=(10,4))
plt.subplot(1,2,1)
plt.hist(rhosim,bins=10,normed=True)
plt.xlabel(r'$Spearman \ \rho $')
plt.axvline(x=cave,ls='-',color='k')
plt.axvline(x=lower,ls='--',color='k')
plt.axvline(x=upper,ls='--',color='k')
plt.subplot(1,2,2)
plt.hist(np.log10(psim),bins=10,normed=True)
plt.xlabel(r'$\log_{10}(p \ value)$')
return rhosim,psim
def ks_boot(x,y,N=1000,conf_int=68.):
boot_p=zeros(N,'f')
boot_D=zeros(N,'f')
for i in range(N):
xboot=x[randint(0,len(x)-1,len(x))]
yboot=y[randint(0,len(y)-1,len(y))]
boot_D[i],boot_p[i]=ks_2samp(xboot,yboot)
return scoreatpercentile(boot_D,per=50),scoreatpercentile(boot_p,per=50)
def ks(x,y,run_anderson=True):
#D,pvalue=ks_2samp(x,y)
D,pvalue=ks_boot(x,y)
print 'KS Test (median of bootstrap):'
print 'D = %6.2f'%(D)
print 'p-vale = %6.5f (prob that samples are from same distribution)'%(pvalue)
if run_anderson:
anderson(x,y)
return D,pvalue
def anderson(x,y):
t=anderson_ksamp([x,y])
print 'Anderson-Darling test Test:'
print 'D = %6.2f'%(t[0])
print 'p-vale = %6.5f (prob that samples are from same distribution)'%(t[2])
return t[0],t[2]
def findnearest(x1,y1,x2,y2,delta):#use where command
matchflag=1
nmatch=0
d=sqrt((x1-x2)**2 + (y1-y2)**2)#x2 and y2 are arrays
index=arange(len(d))
t=index[d<delta]
matches=t
if len(matches) > 0:
nmatch=len(matches)
if nmatch > 1:
imatch=index[(d == min(d[t]))]
else:
imatch=matches[0]
else:
imatch = 0
matchflag = 0
return imatch, matchflag,nmatch
def drawbox(data,style):#feed in center x,y,dx,dy,rotation E of N
#xcoords of unrotated box, going around CCW
xl=array([data[0]-0.5*data[2],data[0]+0.5*data[2],data[0]+0.5*data[2],data[0]-0.5*data[2],data[0]-0.5*data[2]],'d')
yl=array([data[1]-0.5*data[3],data[1]-0.5*data[3],data[1]+0.5*data[3],data[1]+0.5*data[3],data[1]-0.5*data[3] ],'d')
xl=array([-0.5*data[2],+0.5*data[2],+0.5*data[2],-0.5*data[2],-0.5*data[2]],'d')
yl=array([-0.5*data[3],-0.5*data[3],+0.5*data[3],+0.5*data[3],-0.5*data[3] ],'d')
ang=data[4]*pi/180.*-1.#convert rotation to radians
#rotate coordinates
xp=cos(ang)*xl-sin(ang)*yl
yp=sin(ang)*xl+cos(ang)*yl
#put back on absolute scale
xp=data[0]+xp
yp=data[1]+yp
#draw rotated box
plot(xp,yp,style)
def transcoords(imge,coords):
outcoords='junk.xy'
s='rm '+outcoords
os.system(s)
iraf.imcoords.wcsctran(image=self.image,input=self.incoords,output=outcoords,inwcs='world',outwcs='logical',verbose='no')
return outcoords
## convert SB in mag/sq arcsec to flux per pixel on mips
def convert_sb_to_fluxperpixel(sb):
flux_zp_AB = 3631. # in Jy
flux_zp_Vega = 7.17 # in Jy
flux_zp=flux_zp_AB
# conversion from image units of MJ/sr to micro-Jy (1 sq arcsec = 2.3504e-11 sr)
conv_MJysr_uJy = 23.5045*(2.45**2)
magzp=2.5*log10(flux_zp*1.e6/conv_MJysr_uJy)
# m2 - m1 = 2.5 log10(f1/f2)
flux_sb=10.**(-1.*sb/2.5)*flux_zp # flux (Jy) per sq arcsec
# flux in micro-Jy
flux_sb=flux_sb*1.e6
# area of a pixel in sq arcsec
parea = mipspixelscale**2
# convert to uJy per sq pixel
flux_sb=flux_sb*parea
# convert to image units of MJy/sr
flux_sb=flux_sb/conv_MJysr_uJy
return flux_sb
def binxycolor(x,y,color,nbin=5,yweights=None,yerr=True,use_median=False,equal_pop_bins=False,bins=None):
'''
- bin x in nbin equally spaced bins
- calculate the median y value in each bin
- calculate the median color in each bin
'''
if bins != None:
xbins = bins
nbin = len(xbins)
else:
xbins = np.zeros(nbin,'f')
ybins = np.zeros(nbin,'f')
ybinerr = np.zeros(len(xbins),'f')
colorbins = np.zeros(len(xbins),'f')
if equal_pop_bins:
sorted_indices = np.argsort(x)
y = y[sorted_indices]
x = x[sorted_indices]
color = color[sorted_indices]
n_per_bin = len(x)/nbin
xbin_number = np.arange(len(x))/int(n_per_bin)
#print xbin_number
#print x
else:
#xbin_number = np.array(((x-min(x))*nbin/(max(x)-min(x))),'i')
xbin_number = -1*np.ones(len(x),'i')
for i in range(len(xbins)-1):
flag = (x >= xbins[i]) & (x < xbins[i+1])
xbin_number[flag] = i*np.ones(sum(flag),'i')
xbins = xbins + 0.5*(xbins[1]-xbins[0])
for i in range(nbin):
if sum(xbin_number == i) < 1:
continue
if use_median:
if bins == None:
xbins[i] = np.median(x[xbin_number == i])
ybins[i] = np.median(y[xbin_number == i])
colorbins[i] = np.median(color[xbin_number == i])
t = bootstrap(y[xbin_number == i], bootnum=100, bootfunc = np.median)
#print t
ybinerr[i]= (scoreatpercentile(t,84) - scoreatpercentile(t,16))/2. # not worrying about asymmetric errors right now
else:
if bins == None:
xbins[i] = np.mean(x[xbin_number == i])
if yweights != None:
print i
print 'xbin = ',xbins[i]
print 'yweights = ',yweights[xbin_number == i]
print 'y = ',y[xbin_number == i]
ybins[i] = np.average(y[xbin_number ==i], weights = yweights[xbin_number == i])
ybinerr[i] = np.std(y[xbin_number == i])/np.sqrt(sum(xbin_number == i))
else:
ybins[i] = np.mean(y[xbin_number == i])
ybinerr[i] = np.std(y[xbin_number == i])/np.sqrt(sum(xbin_number == i))
colorbins[i] = np.mean(color[xbin_number == i])
if yerr:
return xbins,ybins,ybinerr,colorbins
else:
return xbins,ybins,colorbins
|
rfinn/LCS
|
python27/LCScommon.py
|
Python
|
gpl-3.0
| 21,672
|
[
"Galaxy"
] |
d86ae35ba797a258406bd51ee3b2226161b59e59e0308599b4ccbe64524319db
|
#!/usr/bin/python
import time
import os
import sys
import argparse
import MySQLdb
from homolog4 import *
# Copyright(C) 2014 David Ream
# Released under GPL version 3 licence. http://www.gnu.org/licenses/lgpl.html
# Do not remove this comment
# This program's purpose is to convert a homolog data element into an entry in our database.
# it will manage insertion of single or lists of data that the user provides.
# It is probably best to view this as an extension of the homolog class more than a seperate
# piece of coding.
# This exists to make the main function easier to read. It contains code to run the argument parser, and does nothing else.
def parser_code():
parser = argparse.ArgumentParser(description="Parse a homolog (or -m 8 BLAST result formatted by my software pipe) and save as a GBEER database.")
parser.add_argument("-i", "--infile", dest="infile", default='/home/dave/Desktop/final_code_fork/intermediate_for_debug/unfiltered_operon/atpIBEFHAGDC.txt', metavar="FILE",
help="A file that contains the information that you want to store in the GBEER format database.")
parser.add_argument("-u", "--user", dest="user", default='root', metavar="USER",
help="The user name for the GBEER database.")
parser.add_argument("-p", "--pwd", dest="pwd", default='', metavar="PASSWORD",
help="The password for the GBEER database.")
parser.add_argument("-d", "--db", dest="db", default='gene_block', metavar="DATABASE",
help="The name of the GBEER database in your installation.")
# I do not know that we need this for the current program. i will leave it in incase that i allow for batch infiles at a later time.
parser.add_argument("-n", "--num_proc", dest="num_proc", metavar="INT", default = os.sysconf("SC_NPROCESSORS_CONF"), type=int,
help="Currently unsed, but will allow the manipulation of the number processors that you want this script to run on. The default is every CPU that the OS reports.")
# I need to add the database/user/pass to this. not sure how to best acomplish this for the project, but there is a need to these data.
return parser.parse_args()
def check_options(parsed_args):
if os.path.exists(parsed_args.infile):
infile = parsed_args.infile
else:
print "The file %s does not exist." % parsed_args.infile
sys.exit()
# section of code that deals determining the number of CPU cores that will be used by the program
if parsed_args.num_proc > os.sysconf("SC_NPROCESSORS_CONF"):
num_proc = os.sysconf("SC_NPROCESSORS_CONF")
elif parsed_args.num_proc < 1:
num_proc = 1
else:
num_proc = int(parsed_args.num_proc)
user = parsed_args.user
pwd = parsed_args.pwd
db = parsed_args.db
return infile, num_proc, user, pwd, db
# This function handles the file traversal, and conversion into a list of homologs.
def file_to_homolog_list(infile):
result = []
handle = open(infile, 'r')
for item in [i.strip() for i in handle.readlines()]:
result.append(Homolog.from_blast(item))
return result
# This function will convert the homolog into a GBEER database insert + data statement, and update the database.
# I suck, so this is experimenting!
def test(usr, pwd, database):
# db=_mysql.connect(host="localhost",user="joebob", passwd="moonpie",db="thangs")
db = MySQLdb.connect("localhost", usr, pwd, database)
cursor = db.cursor()
cursor.execute("SELECT VERSION()")
data = cursor.fetchone()
print "data", data
db.close()
def main():
start = time.time()
parsed_args = parser_code()
infile, num_proc, user, pwd, db = check_options(parsed_args)
print infile, num_proc, user, pwd, db
homolog_list = file_to_homolog_list(infile)
print "got here"
#test(user, pwd, db)
print "finished"
print time.time() - start
if __name__ == '__main__':
main()
|
reamdc1/gene_block_evolution_old
|
homolog_database_conversion.py
|
Python
|
gpl-3.0
| 4,163
|
[
"BLAST"
] |
9eeae45fb585e9743c65a62913cd921bf1cfc247fc3d71cf3e0077cde2c2a6aa
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v10.common.types import tag_snippet
__protobuf__ = proto.module(
package="google.ads.googleads.v10.resources",
marshal="google.ads.googleads.v10",
manifest={"RemarketingAction",},
)
class RemarketingAction(proto.Message):
r"""A remarketing action. A snippet of JavaScript code that will
collect the product id and the type of page people visited
(product page, shopping cart page, purchase page, general site
visit) on an advertiser's website.
Attributes:
resource_name (str):
Immutable. The resource name of the remarketing action.
Remarketing action resource names have the form:
``customers/{customer_id}/remarketingActions/{remarketing_action_id}``
id (int):
Output only. Id of the remarketing action.
This field is a member of `oneof`_ ``_id``.
name (str):
The name of the remarketing action.
This field is required and should not be empty
when creating new remarketing actions.
This field is a member of `oneof`_ ``_name``.
tag_snippets (Sequence[google.ads.googleads.v10.common.types.TagSnippet]):
Output only. The snippets used for tracking
remarketing actions.
"""
resource_name = proto.Field(proto.STRING, number=1,)
id = proto.Field(proto.INT64, number=5, optional=True,)
name = proto.Field(proto.STRING, number=6, optional=True,)
tag_snippets = proto.RepeatedField(
proto.MESSAGE, number=4, message=tag_snippet.TagSnippet,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
googleads/google-ads-python
|
google/ads/googleads/v10/resources/types/remarketing_action.py
|
Python
|
apache-2.0
| 2,279
|
[
"VisIt"
] |
d9b8106d464534249351ad7380f79709478c14ae7de9c3562caf69b1deb1449f
|
from pyjade import Compiler as _Compiler
from pyjade.runtime import attrs, escape, iteration
import tornado.template
from pyjade.utils import process
from pyjade.exceptions import CurrentlyNotSupported
ATTRS_FUNC = '__pyjade_attrs'
ESCAPE_FUNC = '__pyjade_escape'
ITER_FUNC = '__pyjade_iter'
class Compiler(_Compiler):
def compile_top(self):
return '{% autoescape None %}'
def visitCodeBlock(self,block):
self.buffer('{%% block %s %%}'%block.name)
if block.mode=='append': self.buffer('{{super()}}')
self.visitBlock(block)
if block.mode=='prepend': self.buffer('{{super()}}')
self.buffer('{% end %}')
# def visitMixin(self,mixin):
# if mixin.block:
# self.buffer('{%% macro %s(%s) %%}'%(mixin.name,mixin.args))
# self.visitBlock(mixin.block)
# self.buffer('{% end %}')
# else:
# self.buffer('{{%s(%s)}}'%(mixin.name,mixin.args))
def visitMixin(self,mixin):
raise CurrentlyNotSupported('mixin')
def visitAssignment(self,assignment):
self.buffer('{%% set %s = %s %%}'%(assignment.name,assignment.val))
def visitCode(self,code):
if code.buffer:
val = code.val.lstrip()
self.buf.append((('{{%s(%%s)}}'%ESCAPE_FUNC) if code.escape else '{{%s}}')%val)
else:
self.buf.append('{%% %s %%}'%code.val)
if code.block:
# if not code.buffer: self.buf.append('{')
self.visit(code.block)
# if not code.buffer: self.buf.append('}')
if not code.buffer:
codeTag = code.val.strip().split(' ',1)[0]
if codeTag in self.autocloseCode:
self.buf.append('{%% end%s %%}'%codeTag)
def visitEach(self,each):
self.buf.append('{%% for %s in %s(%s,%s) %%}'%(','.join(each.keys),ITER_FUNC,each.obj,len(each.keys)))
self.visit(each.block)
self.buf.append('{% end %}')
def visitConditional(self,conditional):
TYPE_CODE = {
'if': lambda x: 'if %s'%x,
'unless': lambda x: 'if not %s'%x,
'elif': lambda x: 'elif %s'%x,
'else': lambda x: 'else'
}
self.buf.append('{%% %s %%}'%TYPE_CODE[conditional.type](conditional.sentence))
if conditional.block:
self.visit(conditional.block)
for next in conditional.next:
self.visitConditional(next)
if conditional.type in ['if','unless']: self.buf.append('{% end %}')
def attributes(self,attrs):
return "{{%s(%s)}}"%(ATTRS_FUNC,attrs)
class Template(tornado.template.Template):
def __init__(self, template_string, name="<string>", *args,**kwargs):
is_jade = name.endswith(".jade")
if is_jade:
template_string = process(template_string,filename=name,compiler=Compiler)
super(Template, self).__init__(template_string, name, *args,**kwargs)
if is_jade:
self.namespace.update(
{ATTRS_FUNC:attrs,
ESCAPE_FUNC:escape,
ITER_FUNC:iteration}
)
# Patch tornado template engine for preprocess jade templates
def patch_tornado():
tornado.template.Template = Template
|
glennyonemitsu/MarkupHiveServer
|
src/pyjade/ext/tornado/__init__.py
|
Python
|
mit
| 3,275
|
[
"VisIt"
] |
97346e0719b632b87779f5762adcfc54e489a5967f2b8d53afd3ba3c50cfdab5
|
from __future__ import division
from __future__ import print_function
import sys
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gam import H2OGeneralizedAdditiveEstimator
# In this test, we check and make sure that we can do scoring
def test_gam_model_predict():
print("Checking model scoring for gaussian")
h2o_data = h2o.import_file(path=pyunit_utils.locate("smalldata/glm_test/gaussian_20cols_10000Rows.csv"))
h2o_data["C1"] = h2o_data["C1"].asfactor()
h2o_data["C2"] = h2o_data["C2"].asfactor()
myY = "C21"
model_test_data = h2o.import_file(pyunit_utils.locate("smalldata/gam_test/predictGaussianGAM3.csv"))
buildModelCheckPredict(h2o_data, h2o_data, model_test_data, myY, ["C11", "C12", "C13"], 'gaussian', 'gaussian')
pred_gauss = buildModelCheckPredict(h2o_data, h2o_data, model_test_data, myY, ["C11", "C12", "C13"], 'gaussian', 'gaussian')
pred_auto_gauss = buildModelCheckPredict(h2o_data, h2o_data, model_test_data, myY, ["C11", "C12", "C13"], 'AUTO', 'gaussian')
pyunit_utils.compare_frames_local(pred_gauss, pred_auto_gauss, prob=1)
print("Checking model scoring for multinomial")
h2o_data = h2o.import_file(pyunit_utils.locate("smalldata/glm_test/multinomial_10_classes_10_cols_10000_Rows_train.csv"))
h2o_data["C1"] = h2o_data["C1"].asfactor()
h2o_data["C2"] = h2o_data["C2"].asfactor()
myY = "C11"
h2o_data["C11"] = h2o_data["C11"].asfactor()
model_test_data = h2o.import_file(pyunit_utils.locate("smalldata/gam_test/predictMultinomialGAM3.csv"))
pred_multi = buildModelCheckPredict(h2o_data, h2o_data, model_test_data, myY, ["C6", "C7", "C8"], 'multinomial', 'multinomial')
pred_auto_multi = buildModelCheckPredict(h2o_data, h2o_data, model_test_data, myY, ["C6", "C7", "C8"], 'AUTO', 'multinomial')
pyunit_utils.compare_frames_local(pred_multi, pred_auto_multi, prob=1)
print("Checking model scoring for binomial")
h2o_data = h2o.import_file(pyunit_utils.locate("smalldata/glm_test/binomial_20_cols_10KRows.csv"))
h2o_data["C1"] = h2o_data["C1"].asfactor()
h2o_data["C2"] = h2o_data["C2"].asfactor()
myY = "C21"
h2o_data["C21"] = h2o_data["C21"].asfactor()
model_test_data = h2o.import_file(pyunit_utils.locate("smalldata/gam_test/predictBinomialGAM3.csv"))
pred_bin = buildModelCheckPredict(h2o_data, h2o_data, model_test_data, myY, ["C11", "C12", "C13"], 'binomial', 'binomial')
pred_auto_bin = buildModelCheckPredict(h2o_data, h2o_data, model_test_data, myY, ["C11", "C12", "C13"], 'AUTO', 'binomial')
pyunit_utils.compare_frames_local(pred_bin, pred_auto_bin, prob=1)
print("gam coeff/varimp test completed successfully")
# add fractional binomial just to make sure it runs
print("Checking model scoring for fractionalbinomial")
h2o_data = h2o.import_file(pyunit_utils.locate("smalldata/glm_test/binomial_20_cols_10KRows.csv"))
h2o_data["C1"] = h2o_data["C1"].asfactor()
h2o_data["C2"] = h2o_data["C2"].asfactor()
h2o_model = H2OGeneralizedAdditiveEstimator(family="fractionalbinomial", gam_columns=["C11", "C12", "C13"],
scale = [1,1,1], num_knots=[5,5,5],standardize=True,solver="irlsm")
h2o_model.train(x=["C1","C2"], y="C21", training_frame=h2o_data)
predictTest = h2o_model.predict(h2o_data)
# okay not to have assert/compare here
def buildModelCheckPredict(train_data, test_data, model_test_data, myy, gamX, family, actual_family):
numKnots = [5,5,5]
x=["C1","C2"]
h2o_model = H2OGeneralizedAdditiveEstimator(family=family, gam_columns=gamX, scale = [1,1,1], num_knots=numKnots,
standardize=True, Lambda=[0], alpha=[0], max_iterations=3,
compute_p_values=False, solver="irlsm")
h2o_model.train(x=x, y=myy, training_frame=train_data)
pred = h2o_model.predict(test_data)
if pred.ncols < model_test_data.ncols:
ncolT = model_test_data.ncols-1
model_test_data = model_test_data.drop(ncolT)
model_test_data.set_names(pred.names)
if (family == 'gaussian' or (family == 'AUTO' and actual_family == 'gaussian')):
pyunit_utils.compare_frames_local(pred, model_test_data, prob=1)
else:
pred = pred.drop('predict')
model_test_data = model_test_data.drop('predict')
pyunit_utils.compare_frames_local(pred, model_test_data, prob=1)
return pred
if __name__ == "__main__":
pyunit_utils.standalone_test(test_gam_model_predict)
else:
test_gam_model_predict()
|
michalkurka/h2o-3
|
h2o-py/tests/testdir_algos/gam/pyunit_PUBDEV_7181_check_model_scoring.py
|
Python
|
apache-2.0
| 4,642
|
[
"Gaussian"
] |
680779c681959ae1e2c037d377932daf311c4474df7c7db82fe416826d128b04
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2011-2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
import gtk
class MessageBar(gtk.InfoBar):
def __init__(self, message, message_type=None):
if message_type is None:
message_type = gtk.MESSAGE_INFO
self.label = gtk.Label(message)
self.label.set_use_markup(True)
self.label.set_line_wrap(True)
self.label.set_width_chars(100)
self.label.set_alignment(0, 0)
self.label.set_padding(12, 0)
self.label.show()
gtk.InfoBar.__init__(self)
self.get_content_area().add(self.label)
self.set_message_type(message_type)
def set_message(self, message, message_type=None):
"""Sets or update a new message in the message bar. Can also be used to
change the message type
:param message: the message to be displayed
:param message_type: defines the color and urgency of a message. One of
gtk.MESSAGE_* .
"""
# If the message type changed
if message_type:
self.set_message_type(message_type)
self.label.set_text(message)
|
tiagocardosos/stoq
|
stoqlib/gui/base/messagebar.py
|
Python
|
gpl-2.0
| 1,972
|
[
"VisIt"
] |
6989be03cb35566158aff4e6ba169a1b09260bd2a1f0545f607499c902fd8140
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'PreviousSurgery.uterine_fibroids'
db.delete_column(u'patient_previoussurgery', 'uterine_fibroids')
# Deleting field 'PreviousSurgery.ovarian_cysts'
db.delete_column(u'patient_previoussurgery', 'ovarian_cysts')
# Deleting field 'PreviousSurgery.fibrocystic_breasts'
db.delete_column(u'patient_previoussurgery', 'fibrocystic_breasts')
# Deleting field 'PreviousSurgery.endometriosis'
db.delete_column(u'patient_previoussurgery', 'endometriosis')
# Deleting field 'PreviousSurgery.others_please_state'
db.delete_column(u'patient_previoussurgery', 'others_please_state')
def backwards(self, orm):
# Adding field 'PreviousSurgery.uterine_fibroids'
db.add_column(u'patient_previoussurgery', 'uterine_fibroids',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
# Adding field 'PreviousSurgery.ovarian_cysts'
db.add_column(u'patient_previoussurgery', 'ovarian_cysts',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
# Adding field 'PreviousSurgery.fibrocystic_breasts'
db.add_column(u'patient_previoussurgery', 'fibrocystic_breasts',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
# Adding field 'PreviousSurgery.endometriosis'
db.add_column(u'patient_previoussurgery', 'endometriosis',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
# Adding field 'PreviousSurgery.others_please_state'
db.add_column(u'patient_previoussurgery', 'others_please_state',
self.gf('django.db.models.fields.CharField')(default='', max_length=20),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'patient.additionalpatientinformation': {
'Meta': {'object_name': 'AdditionalPatientInformation'},
'alcohol': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'cigarettes': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'cooking_facilities': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'educational_level': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'literate': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'other_harmful_substances': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'psychological_stress': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'toilet_facilities': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'patient.familymedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'FamilyMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.guardian': {
'Meta': {'object_name': 'Guardian'},
'contact_number': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'educational_level': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'home_address': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'relation': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.gynaecologicalhistory': {
'Meta': {'object_name': 'GynaecologicalHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_of_last_pap_smear': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method_of_birth_control': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'result_pap_smear': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.immunizationhistory': {
'Meta': {'object_name': 'ImmunizationHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others_injection': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'tetanus_toxoid1': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'tetanus_toxoid2': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'tetanus_toxoid3': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'vaccination': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.laboratorytest': {
'Meta': {'object_name': 'LaboratoryTest'},
'blood_group': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'hemoglobin': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'serological_test_for_syphilis': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'urinalysis': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.medicalhistory': {
'Meta': {'object_name': 'MedicalHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'family_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.FamilyMedicalHistory']"}),
'gynaecological_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.GynaecologicalHistory']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immunization_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.ImmunizationHistory']"}),
'menstrual_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.MenstrualHistory']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'obstetric_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.ObstetricHistory']"}),
'past_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PastMedicalHistory']"}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'present_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PresentMedicalHistory']"})
},
u'patient.menstrualhistory': {
'Meta': {'object_name': 'MenstrualHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'day_of_visit': ('django.db.models.fields.DateField', [], {}),
'expected_date_of_delivery': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_menstrual_periods': ('django.db.models.fields.DateField', [], {}),
'menstrual_cycle': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'poa_by_lmp': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'patient.obstetrichistory': {
'Meta': {'object_name': 'ObstetricHistory'},
'check_if_you_have_been_miscarriages': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '2'}),
'check_if_you_have_been_pregnant': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"})
},
u'patient.pastmedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'PastMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.patientinformation': {
'Meta': {'object_name': 'PatientInformation'},
'address': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'marital_status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'operator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'telephone_number': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
u'patient.prescription': {
'Meta': {'object_name': 'Prescription'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_prescription': ('django.db.models.fields.TextField', [], {}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"})
},
u'patient.presentmedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'PresentMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.previousobstetrichistory': {
'Meta': {'object_name': 'PreviousObstetricHistory'},
'age_of_baby': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'birth_weight': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length_of_pregnancy': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_baby': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'obstetrical_operation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'periods_of_exclusive_feeding': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'problems': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'types_of_delivery': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.DateField', [], {})
},
u'patient.previoussurgery': {
'Meta': {'object_name': 'PreviousSurgery'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"})
},
u'patient.report': {
'Meta': {'object_name': 'Report'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'diabetis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hiv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pregnancy': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.routinecheckup': {
'Meta': {'object_name': 'Routinecheckup'},
'abdominal_changes': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'blood_pressure': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'chest_and_heart_auscultation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'fetal_movement': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'height': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_examiner': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'symptom_events': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'uterine_height': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'vaginal_examination': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'visit': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'weight': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'patient.signanaemia': {
'Meta': {'object_name': 'Signanaemia'},
'conjunctiva': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fingernails': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'oral_mucosa': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'others_please_state': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'pale_complexion': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'shortness_of_breath': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tip_of_tongue': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'patient.ultrasoundscanning': {
'AC': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'BPD': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'CRL': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'FL': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'HC': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'Meta': {'object_name': 'UltrasoundScanning'},
'amount_of_amniotic_fluid': ('django.db.models.fields.IntegerField', [], {'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'gestation_age': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_examiner': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'position_of_the_baby': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'position_of_the_placenta': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'saved_ultrasound_image': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['patient']
|
aazhbd/medical_info01
|
patient/migrations/0023_auto__del_field_previoussurgery_uterine_fibroids__del_field_previoussu.py
|
Python
|
bsd-3-clause
| 30,622
|
[
"VisIt"
] |
b7db1484d183d6f6a39eaf9f8116701b7fad464b79f02a7845d1044faf373ea3
|
# coding: utf-8
# # Session 4: Visualizing Representations
#
# ## Assignment: Deep Dream and Style Net
#
# <p class='lead'>
# Creative Applications of Deep Learning with Google's Tensorflow
# Parag K. Mital
# Kadenze, Inc.
# </p>
#
# # Overview
#
# In this homework, we'll first walk through visualizing the
# gradients of a trained convolutional network. Recall from the last
# session that we had trained a variational convolutional
# autoencoder. We also trained a deep convolutional network. In both
# of these networks, we learned only a few tools for understanding
# how the model performs. These included measuring the loss of the
# network and visualizing the `W` weight matrices and/or
# convolutional filters of the network.
#
# During the lecture we saw how to visualize the gradients of
# Inception, Google's state of the art network for object
# recognition. This resulted in a much more powerful technique for
# understanding how a network's activations transform or accentuate
# the representations in the input space. We'll explore this more in
# Part 1.
#
# We also explored how to use the gradients of a particular layer or
# neuron within a network with respect to its input for performing
# "gradient ascent". This resulted in Deep Dream. We'll explore this
# more in Parts 2-4.
#
# We also saw how the gradients at different layers of a
# convolutional network could be optimized for another image,
# resulting in the separation of content and style losses, depending
# on the chosen layers. This allowed us to synthesize new images that
# shared another image's content and/or style, even if they came from
# separate images. We'll explore this more in Part 5.
#
# Finally, you'll packaged all the GIFs you create throughout this
# notebook and upload them to Kadenze.
#
#
# <a name="learning-goals"></a>
# # Learning Goals
#
# * Learn how to inspect deep networks by visualizing their gradients
# * Learn how to "deep dream" with different objective functions and
# regularization techniques
# * Learn how to "stylize" an image using content and style losses
# from different images
#
#
# # Table of Contents
#
# <!-- MarkdownTOC autolink=true autoanchor=true bracket=round -->
#
# - [Part 1 - Pretrained Networks](#part-1---pretrained-networks)
# - [Graph Definition](#graph-definition)
# - [Preprocess/Deprocessing](#preprocessdeprocessing)
# - [Tensorboard](#tensorboard)
# - [A Note on 1x1 Convolutions](#a-note-on-1x1-convolutions)
# - [Network Labels](#network-labels)
# - [Using Context Managers](#using-context-managers)
# - [Part 2 - Visualizing Gradients](#part-2---visualizing-gradients)
# - [Part 3 - Basic Deep Dream](#part-3---basic-deep-dream)
# - [Part 4 - Deep Dream Extensions](#part-4---deep-dream-extensions)
# - [Using the Softmax Layer](#using-the-softmax-layer)
# - [Fractal](#fractal)
# - [Guided Hallucinations](#guided-hallucinations)
# - [Further Explorations](#further-explorations)
# - [Part 5 - Style Net](#part-5---style-net)
# - [Network](#network)
# - [Content Features](#content-features)
# - [Style Features](#style-features)
# - [Remapping the Input](#remapping-the-input)
# - [Content Loss](#content-loss)
# - [Style Loss](#style-loss)
# - [Total Variation Loss](#total-variation-loss)
# - [Training](#training)
# - [Assignment Submission](#assignment-submission)
#
# <!-- /MarkdownTOC -->
# In[ ]:
# First check the Python version
import sys
if sys.version_info < (3,4):
print('You are running an older version of Python!\n\n',
'You should consider updating to Python 3.4.0 or',
'higher as the libraries built for this course',
'have only been tested in Python 3.4 and higher.\n')
print('Try installing the Python 3.5 version of anaconda'
'and then restart `jupyter notebook`:\n',
'https://www.continuum.io/downloads\n\n')
# Now get necessary libraries
try:
import os
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
from skimage import data
from scipy.misc import imresize
from scipy.ndimage.filters import gaussian_filter
#import IPython.display as ipyd
import tensorflow as tf
from libs import utils, gif, datasets, dataset_utils, vae, dft, vgg16, nb_utils
except ImportError:
print("Make sure you have started notebook in the same directory",
"as the provided zip file which includes the 'libs' folder",
"and the file 'utils.py' inside of it. You will NOT be able",
"to complete this assignment unless you restart jupyter",
"notebook inside the directory created by extracting",
"the zip file or cloning the github repo. If you are still")
# dja
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('bmh')
import datetime
#np.set_printoptions(threshold=np.inf) # display FULL array (infinite)
plt.ion()
plt.figure(figsize=(4, 4))
TID=datetime.date.today().strftime("%Y%m%d")+"_"+datetime.datetime.now().time().strftime("%H%M%S")
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from matplotlib.cbook import MatplotlibDeprecationWarning
warnings.filterwarnings("ignore", category=MatplotlibDeprecationWarning)
gifdly=0.15
def wait(n=1):
#plt.pause(n)
plt.pause(1)
#input("(press enter)")
#
# Part 1 - Pretrained Networks
#
# In the libs module, you'll see that I've included a few modules for
# loading some state of the art networks. These include:
#
# * [Inception
# v3](https://github.com/tensorflow/models/tree/master/inception)
# - This network has been trained on ImageNet and its finaly output
# layer is a softmax layer denoting 1 of 1000 possible objects (+ 8
# for unknown categories). This network is about only 50MB!
# * [Inception
# v5](https://github.com/tensorflow/models/tree/master/inception)
# - This network has been trained on ImageNet and its finaly output
# layer is a softmax layer denoting 1 of 1000 possible objects (+ 8
# for unknown categories). This network is about only 50MB! It
# presents a few extensions to v5 which are not documented anywhere
# that I've found, as of yet...
# * [Visual Group Geometry @ Oxford's 16
# layer](http://www.robots.ox.ac.uk/~vgg/research/very_deep/)
# - This network has been trained on ImageNet and its finaly output
# layer is a softmax layer denoting 1 of 1000 possible objects. This
# model is nearly half a gigabyte, about 10x larger in size than the
# inception network. The trade off is that it is very fast.
# * [Visual Group Geometry @ Oxford's Face
# Recognition](http://www.robots.ox.ac.uk/~vgg/software/vgg_face/)
# - This network has been trained on the VGG Face Dataset and its
# final output layer is a softmax layer denoting 1 of 2622 different
# possible people.
# * [Illustration2Vec](http://illustration2vec.net)
# - This network has been trained on illustrations and manga and its
# final output layer is 4096 features.
# * [Illustration2Vec Tag](http://illustration2vec.net)
# - Please do not use this network if you are under the age of 18
# (seriously!)
# - This network has been trained on manga and its final output layer
# is one of 1539 labels.
#
# When we use a pre-trained network, we load a network's definition
# and its weights which have already been trained. The network's
# definition includes a set of operations such as convolutions, and
# adding biases, but all of their values, i.e. the weights, have
# already been trained.
#
# <a name="graph-definition"></a>
# ## Graph Definition
#
# In the libs folder, you will see a few new modules for loading the
# above pre-trained networks. Each module is structured similarly to
# help you understand how they are loaded and include example code
# for using them. Each module includes a `preprocess` function for
# using before sending the image to the network. And when using deep
# dream techniques, we'll be using the `deprocess` function to undo
# the `preprocess` function's manipulations.
#
# Let's take a look at loading one of these. Every network except for
# `i2v` includes a key 'labels' denoting what labels the network has
# been trained on. If you are under the age of 18, please do not use
# the `i2v_tag model`, as its labels are unsuitable for minors.
#
# Let's load the libaries for the different pre-trained networks:
# In[ ]:
from libs import vgg16, inception, i2v
# Now we can load a pre-trained network's graph and any labels.
# Explore the different networks in your own time.
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# In[ ]:
# Stick w/ Inception for now, and then after you see how
# the next few sections work w/ this network, come back
# and explore the other networks.
net = inception.get_inception_model(version='v5')
# net = inception.get_inception_model(version='v3')
# net = vgg16.get_vgg_model()
# net = vgg16.get_vgg_face_model()
# net = i2v.get_i2v_model()
# net = i2v.get_i2v_tag_model()
# Each network returns a dictionary with the following keys defined.
# Every network has a key for "labels" except for "i2v", since this
# is a feature only network, e.g. an unsupervised network, and does
# not have labels.
# In[ ]:
print("net.keys: ", net.keys())
#
# Preprocess/Deprocessing
#
# Each network has a preprocessing/deprocessing function which we'll
# use before sending the input to the network. This preprocessing
# function is slightly different for each network. Recall from the
# previous sessions what preprocess we had done before sending an
# image to a network. We would often normalize the input by
# subtracting the mean and dividing by the standard deviation. We'd
# also crop/resize the input to a standard size. We'll need to do
# this for each network except for the Inception network, which is a
# true convolutional network and does not require us to do this (will
# be explained in more depth later).
#
# Whenever we `preprocess` the image, and want to visualize the
# result of adding back the gradient to the input image (when we use
# deep dream), we'll need to use the `deprocess` function stored in
# the dictionary. Let's explore how these work. We'll confirm this is
# performing the inverse operation, let's try to preprocess the
# image, then I'll have you try to deprocess it.
# In[ ]:
# First, let's get an image:
og = plt.imread('clinton.png')[..., :3]
print("og min/max: ", og.min(), og.max())
#plt.title("clinton")
#plt.imshow(og)
#wait()
# Let's now try preprocessing this image. The function for
# preprocessing is inside the module we used to load it. For
# instance, for `vgg16`, we can find the `preprocess` function as
# `vgg16.preprocess`, or for `inception`, `inception.preprocess`, or
# for `i2v`, `i2v.preprocess`. Or, we can just use the key
# `preprocess` in our dictionary `net`, as this is just convenience
# for us to access the corresponding preprocess function.
# In[ ]:
# Now call the preprocess function. This will preprocess our
# image ready for being input to the network, except for changes
# to the dimensions. I.e., we will still need to convert this
# to a 4-dimensional Tensor once we input it to the network.
# We'll see how that works later.
img = net['preprocess'](og)
print("preprocessed min/max:", img.min(), img.max())
wait()
# Let's undo the preprocessing. Recall that the `net` dictionary has
# the key `deprocess` which is the function we need to use on our
# processed image, `img`.
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# In[ ]:
deprocessed = net['deprocess'](og)
plt.title("deprocessed")
#plt.imshow(deprocessed)
#plt.show()
#
# Tensorboard
#
# I've added a utility module called `nb_utils` which includes a
# function `show_graph`. This will use
# [Tensorboard](https://www.tensorflow.org/versions/r0.10/how_tos/graph_viz/index.html)
# to draw the computational graph defined by the various Tensorflow
# functions. I didn't go over this during the lecture because there
# just wasn't enough time! But explore it in your own time if it
# interests you, as it is a really unique tool which allows you to
# monitor your network's training progress via a web interface. It
# even lets you monitor specific variables or processes within the
# network, e.g. the reconstruction of an autoencoder, without having
# to print to the console as we've been doing. We'll just be using it
# to draw the pretrained network's graphs using the utility function
# I've given you.
#
# Be sure to interact with the graph and click on the various
# modules.
#
# For instance, if you've loaded the `inception` v5 network, locate
# the "input" to the network. This is where we feed the image, the
# input placeholder (typically what we've been denoting as `X` in our
# own networks). From there, it goes to the "conv2d0" variable scope
# (i.e. this uses the code: `with tf.variable_scope("conv2d0")` to
# create a set of operations with the prefix "conv2d0/". If you
# expand this scope, you'll see another scope, "pre_relu". This is
# created using another `tf.variable_scope("pre_relu")`, so that any
# new variables will have the prefix "conv2d0/pre_relu". Finally,
# inside here, you'll see the convolution operation (`tf.nn.conv2d`)
# and the 4d weight tensor, "w" (e.g. created using
# `tf.get_variable`), used for convolution (and so has the name,
# "conv2d0/pre_relu/w". Just after the convolution is the addition of
# the bias, b. And finally after exiting the "pre_relu" scope, you
# should be able to see the "conv2d0" operation which applies the
# relu nonlinearity. In summary, that region of the graph can be
# created in Tensorflow like so:
#
# ```python
# input = tf.placeholder(...)
# with tf.variable_scope('conv2d0'):
# with tf.variable_scope('pre_relu'):
# w = tf.get_variable(...)
# h = tf.nn.conv2d(input, h, ...)
# b = tf.get_variable(...)
# h = tf.nn.bias_add(h, b)
# h = tf.nn.relu(h)
# ```
# In[ ]:
# REQUIRES TENSORBOARD
# nb_utils.show_graph(net['graph_def'])
# If you open up the "mixed3a" node above (double click on it),
# you'll see the first "inception" module. This network encompasses a
# few advanced concepts that we did not have time to discuss during
# the lecture, including residual connections, feature concatenation,
# parallel convolution streams, 1x1 convolutions, and including
# negative labels in the softmax layer. I'll expand on the 1x1
# convolutions here, but please feel free to skip ahead if this isn't
# of interest to you.
#
#
# A Note on 1x1 Convolutions
#
# The 1x1 convolutions are setting the ksize parameter of the
# kernels to 1. This is effectively allowing you to change the
# number of dimensions. Remember that you need a 4-d tensor as input
# to a convolution. Let's say its dimensions are N x W x H x C(I),
# where C(I) represents the number of channels the image has. Let's
# say it is an RGB image, then C(I) would be 3. Or later in the
# network, if we have already convolved it, it might be 64 channels
# instead. Regardless, when you convolve it w/ a K(H) x K(W) x C(I)
# x C(O) filter, where K(H) is 1 and K(W) is also 1, then the
# filters size is: 1 x 1 x C(I) and this is perfomed for each output
# channel C(O). What this is doing is filtering the information only
# in the channels dimension, not the spatial dimensions. The output
# of this convolution will be a N x W x H x C(O) output tensor. The
# only thing that changes in the output is the number of output
# filters.
# The 1x1 convolution operation is essentially reducing the amount
# of information in the channels dimensions before performing a much
# more expensive operation, e.g. a 3x3 or 5x5 convolution.
# Effectively, it is a very clever trick for dimensionality
# reduction used in many state of the art convolutional networks.
# Another way to look at it is that it is preseving the spatial
# information, but at each location, there is a fully connected
# network taking all the information from every input channel, C(I),
# and reducing it down to C(O) channels (or could easily also be up,
# but that is not the typical use case for this). So it's not really
# a convolution, but we can use the convolution operation to perform
# it at every location in our image.
# If you are interested in reading more about this architecture, I
# highly encourage you to read Network in Network, Christian
# Szegedy's work on the Inception network, Highway Networks,
# Residual Networks, and Ladder Networks.
# In this course, we'll stick to focusing on the applications of
# these, while trying to delve as much into the code as possible.
#
# Network Labels
#
# Let's now look at the labels:
# In[ ]:
#print("inception net labels: ")
#print(net['labels'])
# In[ ]:
label_i = 851
print("labels[", label_i, "]", net['labels'][label_i])
#
# Using Context Managers
#
# Up until now, we've mostly used a single `tf.Session` within a
# notebook and didn't give it much thought. Now that we're using some
# bigger models, we're going to have to be more careful. Using a big
# model and being careless with our session can result in a lot of
# unexpected behavior, program crashes, and out of memory errors. The
# VGG network and the I2V networks are quite large. So we'll need to
# start being more careful with our sessions using context managers.
#
# Let's see how this works w/ VGG:
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# In[ ]:
# Load the VGG network. Scroll back up to where we loaded the
# inception
# network if you are unsure. It is inside the "vgg16" module...
net = vgg16.get_vgg_model()
assert(net['labels'][0] == (0, 'n01440764 tench, Tinca tinca'))
# In[ ]:
# Let's explicity use the CPU, since we don't gain anything using the
# GPU
# when doing Deep Dream (it's only a single image, benefits come w/
# many images).
device = '/cpu:0'
# We'll now explicitly create a graph
g = tf.Graph()
# And here is a context manager. We use the python "with" notation to
# create a context
# and create a session that only exists within this indent, as soon
# as we leave it,
# the session is automatically closed! We also tel the session which
# graph to use.
# We can pass a second context after the comma,
# which we'll use to be explicit about using the CPU instead of a
# GPU.
with tf.Session(graph=g) as sess, g.device(device):
# Now load the graph_def, which defines operations and their values into `g`
tf.import_graph_def(net['graph_def'], name='net')
# In[ ]:
# Now we can get all the operations that belong to the graph `g`:
names = [op.name for op in g.get_operations()]
print("op.names[0..5]:")
print(names[0:5])
# <a name="part-2---visualizing-gradients"></a>
# # Part 2 - Visualizing Gradients
#
# Now that we know how to load a network and extract layers from it,
# let's grab only the pooling layers:
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# In[ ]:
# First find all the pooling layers in the network. You can
# use list comprehension to iterate over all the "names" we just
# created, finding whichever ones have the name "pool" in them.
# Then be sure to append a ":0" to the names
features = [name+":0" for name in names if 'pool' in name.split()[-1]]
# Let's print them
print("features: ", features)
# This is what we want to have at the end. You could just copy this
# list
# if you are stuck!
assert(features == ['net/pool1:0', 'net/pool2:0', 'net/pool3:0', 'net/pool4:0', 'net/pool5:0'])
# Let's also grab the input layer:
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# In[ ]:
# Use the function 'get_tensor_by_name' and the 'names' array to help
# you get the first tensor in the network. Remember you have to add ":0"
# to the name to get the output of an operation which is the tensor.
x = g.get_tensor_by_name(names[0]+":0")
assert(x.name == 'net/images:0')
# We'll now try to find the gradient activation that maximizes a
# layer with respect to the input layer `x`.
# In[ ]:
def plot_gradient(img, x, feature, g, device='/cpu:0'):
"""Let's visualize the network's gradient activation
when backpropagated to the original input image. This
is effectively telling us which pixels contribute to the
predicted layer, class, or given neuron with the layer"""
# We'll be explicit about the graph and the device
# by using a context manager:
with tf.Session(graph=g) as sess, g.device(device):
saliency = tf.gradients(tf.reduce_mean(feature), x)
this_res = sess.run(saliency[0], feed_dict={x: img})
grad = this_res[0] / np.max(np.abs(this_res))
return grad
# Let's try this w/ an image now. We're going to use the
# `plot_gradient` function to help us. This is going to take our
# input image, run it through the network up to a layer, find the
# gradient of the mean of that layer's activation with respect to the
# input image, then backprop that gradient back to the input layer.
# We'll then visualize the gradient by normalizing it's values using
# the `utils.normalize` function.
# In[ ]:
"""
og = plt.imread('clinton.png')[..., :3]
img = net['preprocess'](og)[np.newaxis]
for i in range(len(features)):
plt.title("feature "+str(i))
grad = plot_gradient(img, x, g.get_tensor_by_name(features[i]), g)
plt.imshow(utils.normalize(grad))
wait(1)
"""
#
# Part 3 - Basic Deep Dream
#
# In the lecture we saw how Deep Dreaming takes the backpropagated
# gradient activations and simply adds it to the image, running the
# same process again and again in a loop. We also saw many tricks one
# can add to this idea, such as infinitely zooming into the image by
# cropping and scaling, adding jitter by randomly moving the image
# around, or adding constraints on the total activations.
#
# Have a look here for inspiration:
#
#
# https://research.googleblog.com/2015/06/inceptionism-going-deeper-into-neural.html
#
#
#
# https://photos.google.com/share/AF1QipPX0SCl7OzWilt9LnuQliattX4OUCj_8EP65_cTVnBmS1jnYgsGQAieQUc1VQWdgQ?key=aVBxWjhwSzg2RjJWLWRuVFBBZEN1d205bUdEMnhB
#
#
# https://mtyka.github.io/deepdream/2016/02/05/bilateral-class-vis.html
#
# Let's stick the necessary bits in a function and try exploring how
# deep dream amplifies the representations of the chosen layers:
# In[ ]:
def dream(img, gradient, step, net, x, n_iterations=50, plot_step=10, name='dream'):
print("Dreaming "+name+"...")
# Copy the input image as we'll add the gradient to it in a loop
img_copy = img.copy()
#fig, axs = plt.subplots(1, n_iterations // plot_step, figsize=(10, 5))
with tf.Session(graph=g) as sess, g.device(device):
for it_i in range(n_iterations):
print("dream it: ",it_i,"/",n_iterations)
# This will calculate the gradient of the layer we chose with respect to the input image.
this_res = sess.run(gradient[0], feed_dict={x: img_copy})[0]
# Let's normalize it by the maximum activation
this_res /= (np.max(np.abs(this_res) + 1e-8))
# Or alternatively, we can normalize by standard deviation
# this_res /= (np.std(this_res) + 1e-8)
# Or we could use the `utils.normalize function:
# this_res = utils.normalize(this_res)
# Experiment with all of the above options. They will drastically
# effect the resulting dream, and really depend on the network
# you use, and the way the network handles normalization of the
# input image, and the step size you choose! Lots to explore!
# Then add the gradient back to the input image
# Think about what this gradient represents?
# It says what direction we should move our input
# in order to meet our objective stored in "gradient"
img_copy += this_res * step
# Plot the image
if (it_i + 1) % plot_step == 0:
m = net['deprocess'](img_copy[0])
plt.title(name+", it: "+str(it_i))
plt.imshow(m)
wait(1)
# In[ ]:
# We'll run it for 3 iterations
n_iterations = 3
# Think of this as our learning rate. This is how much of
# the gradient we'll add back to the input image
step = 1.0
# Every 1 iterations, we'll plot the current deep dream
plot_step = 1
# Let's now try running Deep Dream for every feature, each of our 5
# pooling layers. We'll need to get the layer corresponding to our
# feature. Then find the gradient of this layer's mean activation
# with respect to our input, `x`. Then pass these to our `dream`
# function. This can take awhile (about 10 minutes using the CPU on
# my Macbook Pro).
# In[ ]:
"""
for feature_i in range(len(features)):
with tf.Session(graph=g) as sess, g.device(device):
# Get a feature layer
layer = g.get_tensor_by_name(features[feature_i])
# Find the gradient of this layer's mean activation
# with respect to the input image
gradient = tf.gradients(tf.reduce_mean(layer), x)
# Dream w/ our image
dream(img, gradient, step, net, x, n_iterations=n_iterations, plot_step=plot_step, name=features[feature_i])
wait(1)
#input("press...")
"""
# Instead of using an image, we can use an image of noise and see how
# it "hallucinates" the representations that the layer most responds
# to:
# In[ ]:
noise = net['preprocess'](
np.random.rand(256, 256, 3) * 0.1 + 0.45)[np.newaxis]
plt.title("noise")
plt.imshow(net['deprocess'](noise[0]))
wait(1)
# We'll do the same thing as before, now w/ our noise image:
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# In[ ]:
"""
for feature_i in range(len(features)):
with tf.Session(graph=g) as sess, g.device(device):
# Get a feature layer
layer = g.get_tensor_by_name(features[feature_i])
# Find the gradient of this layer's mean activation
# with respect to the input image
gradient = tf.gradients(tf.reduce_mean(layer), x)
# Dream w/ the noise image. Complete this!
dream(noise, gradient, step, net, x, n_iterations=n_iterations, plot_step=plot_step, name=features[feature_i])
wait(1)
#input("press...")
"""
# <a name="part-4---deep-dream-extensions"></a>
# # Part 4 - Deep Dream Extensions
#
# As we saw in the lecture, we can also use the final softmax layer
# of a network to use during deep dream. This allows us to be
# explicit about the object we want hallucinated in an image.
#
# <a name="using-the-softmax-layer"></a>
# ## Using the Softmax Layer
#
# Let's get another image to play with, preprocess it, and then make
# it 4-dimensional.
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# In[ ]:
# Load your own image here
og = plt.imread("anu455eye.jpg")
plt.title("orig")
plt.imshow(og)
wait(1)
# Preprocess the image and make sure it is 4-dimensional by adding a
# new axis to the 0th dimension:
img = net['preprocess'](og)[np.newaxis]
assert(img.ndim == 4)
# In[ ]:
# Let's get the softmax layer
print(names[-2])
layer = g.get_tensor_by_name(names[-2] + ":0")
# And find its shape
with tf.Session(graph=g) as sess, g.device(device):
layer_shape = tf.shape(layer).eval(feed_dict={x:img})
# We can find out how many neurons it has by feeding it an image and
# calculating the shape. The number of output channels is the last
# dimension.
n_els = layer_shape[-1]
# In[ ]:
# Let's pick a label. First let's print out every label and then find
# one we like:
#print("vgg net labels:")
#print(net['labels'])
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# In[ ]:
# Pick a neuron. Or pick a random one. This should be 0-n_els
neuron_i = 208 # 208=lab dog, 949=strawberry
print("net label["+str(neuron_i)+": ", net['labels'][neuron_i])
assert(neuron_i >= 0 and neuron_i < n_els)
# In[ ]:
# And we'll create an activation of this layer which is very close to
# 0
layer_vec = np.ones(layer_shape) / 100.0
# Except for the randomly chosen neuron which will be very close to 1
layer_vec[..., neuron_i] = 0.99
# Let's decide on some parameters of our deep dream. We'll need to
# decide how many iterations to run for. And we'll plot the result
# every few iterations, also saving it so that we can produce a GIF.
# And at every iteration, we need to decide how much to ascend our
# gradient.
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# In[ ]:
# Explore different parameters for this section.
n_iterations = 51
plot_step = 5
# If you use a different network, you will definitely need to
# experiment
# with the step size, as each network normalizes the input image
# differently.
step = 0.2
# Now let's dream. We're going to define a context manager to create
# a session and use our existing graph, and make sure we use the CPU
# device, as there is no gain in using GPU, and we have much more CPU
# memory than GPU memory.
# In[ ]:
"""
imgs = []
with tf.Session(graph=g) as sess, g.device(device):
gradient = tf.gradients(tf.reduce_max(layer), x)
# Copy the input image as we'll add the gradient to it in a loop
img_copy = img.copy()
with tf.Session(graph=g) as sess, g.device(device):
for it_i in range(n_iterations):
print("softmax it: ", it_i,"/",n_iterations)
# This will calculate the gradient of the layer we chose with respect to the input image.
this_res = sess.run(gradient[0], feed_dict={
x: img_copy, layer: layer_vec})[0]
# Let's normalize it by the maximum activation
this_res /= (np.max(np.abs(this_res) + 1e-8))
# Or alternatively, we can normalize by standard deviation
# this_res /= (np.std(this_res) + 1e-8)
# Then add the gradient back to the input image
# Think about what this gradient represents?
# It says what direction we should move our input
# in order to meet our objective stored in "gradient"
img_copy += this_res * step
# Plot the image
if (it_i + 1) % plot_step == 0:
m = net['deprocess'](img_copy[0])
plt.imsave(fname='s4_dream_vgg_last_'+TID+'.png', arr=m)
#plt.figure(figsize=(5, 5))
#plt.grid('off')
plt.title("softmax it: "+str(it_i))
plt.imshow(m)
#plt.show()
imgs.append(m)
wait(1)
# In[ ]:
# Save the gif
gif.build_gif(imgs, saveto='s4_softmax_'+TID+'.gif', interval=gifdly)
"""
#
# Fractal
#
# During the lecture we also saw a simple trick for creating an
# infinite fractal: crop the image and then resize it. This can
# produce some lovely aesthetics and really show some strong object
# hallucinations if left long enough and with the right parameters
# for step size/normalization/regularization. Feel free to experiment
# with the code below, adding your own regularizations as shown in
# the lecture to produce different results!
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# In[ ]:
"""
n_iterations = 300
plot_step = 5
step = 0.1
crop = 1
imgs = []
n_imgs, height, width, *ch = img.shape
with tf.Session(graph=g) as sess, g.device(device):
# Explore changing the gradient here from max to mean
# or even try using different concepts we learned about
# when creating style net, such as using a total variational
# loss on `x`.
gradient = tf.gradients(tf.reduce_max(layer), x)
# Copy the input image as we'll add the gradient to it in a loop
img_copy = img.copy()
with tf.Session(graph=g) as sess, g.device(device):
for it_i in range(n_iterations):
print("fractal it: ", it_i,"/",n_iterations)
# This will calculate the gradient of the layer
# we chose with respect to the input image.
this_res = sess.run(gradient[0], feed_dict={
x: img_copy, layer: layer_vec})[0]
# This is just one way we could normalize the
# gradient. It helps to look at the range of your image's
# values, e.g. if it is 0 - 1, or -115 to +115,
# and then consider the best way to normalize the gradient.
# For some networks, it might not even be necessary
# to perform this normalization, especially if you
# leave the dream to run for enough iterations.
# this_res = this_res / (np.std(this_res) + 1e-10)
this_res = this_res / (np.max(np.abs(this_res)) + 1e-10)
# Then add the gradient back to the input image
# Think about what this gradient represents?
# It says what direction we should move our input
# in order to meet our objective stored in "gradient"
img_copy += this_res * step
# Optionally, we could apply any number of regularization
# techniques... Try exploring different ways of regularizing
# gradient. ascent process. If you are adventurous, you can
# also explore changing the gradient above using a
# total variational loss, as we used in the style net
# implementation during the lecture. I leave that to you
# as an exercise!
# Crop a 1 pixel border from height and width
img_copy = img_copy[:, crop:-crop, crop:-crop, :]
# Resize (Note: in the lecture, we used scipy's resize which
# could not resize images outside of 0-1 range, and so we had
# to store the image ranges. This is a much simpler resize
# method that allows us to `preserve_range`.)
img_copy = resize(img_copy[0], (height, width), order=3,
clip=False, preserve_range=True
)[np.newaxis].astype(np.float32)
# Plot the image
if (it_i + 1) % plot_step == 0:
m = net['deprocess'](img_copy[0])
#plt.grid('off')
plt.title("fractal it: "+str(it_i))
plt.imshow(m)
#plt.show()
imgs.append(m)
wait(1)
# Create a GIF
gif.build_gif(imgs, saveto='s4_fractal_'+TID+'.gif', interval=gifdly)
"""
#
# Guided Hallucinations
#
# Instead of following the gradient of an arbitrary mean or max of a
# particular layer's activation, or a particular object that we want
# to synthesize, we can also try to guide our image to look like
# another image. One way to try this is to take one image, the guide,
# and find the features at a particular layer or layers. Then, we
# take our synthesis image and find the gradient which makes it's own
# layers activations look like the guide image.
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# In[ ]:
# Replace these with your own images!
# (DREAM~ORIGIN; GUIDE~TARGET)
guide_og = plt.imread("loremipsum.png")[..., :3]
dream_og = plt.imread(os.path.expanduser("~/fot2.jpg"))[..., :3]
#guide_og = plt.imread(os.path.expanduser("~/fot2.jpg"))[..., :3]
#dream_og = plt.imread("loremipsum.png")[..., :3]
assert(guide_og.ndim == 3 and guide_og.shape[-1] == 3)
assert(dream_og.ndim == 3 and dream_og.shape[-1] == 3)
# Preprocess both images:
# In[ ]:
guide_img = net['preprocess'](guide_og, dsize=(448,448))[np.newaxis]
dream_img = net['preprocess'](dream_og, dsize=(448,448))[np.newaxis]
#fig, axs = plt.subplots(1, 2, figsize=(7, 4))
plt.title("guide_og")
plt.imshow(guide_og)
wait(3)
plt.title("dream_og")
plt.imshow(dream_og)
wait(3)
# Like w/ Style Net, we are going to measure how similar the features
# in the guide image are to the dream images. In order to do that,
# we'll calculate the dot product. Experiment with other measures
# such as l1 or l2 loss to see how this impacts the resulting Dream!
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# In[ ]:
x = g.get_tensor_by_name(names[0] + ":0")
# Experiment with the weighting
feature_loss_weight = 1.0
with tf.Session(graph=g) as sess, g.device(device):
feature_loss = tf.Variable(0.0)
# Explore different layers/subsets of layers. This is just an example.
for feature_i in features[3:5]:
# Get the activation of the feature
layer = g.get_tensor_by_name(feature_i)
# Do the same for our guide image
guide_layer = sess.run(layer, feed_dict={x: guide_img})
# Now we need to measure how similar they are!
# We'll use the dot product, which requires us to first reshape both
# features to a 2D vector. But you should experiment with other ways
# of measuring similarity such as l1 or l2 loss.
# Reshape each layer to 2D vector
layer = tf.reshape(layer, [-1, 1])
guide_layer = guide_layer.reshape(-1, 1)
# Now calculate their dot product
correlation = tf.matmul(guide_layer.T, layer)
# And weight the loss by a factor so we can control its influence
feature_loss += feature_loss_weight * correlation
# We'll now use another measure that we saw when developing Style Net
# during the lecture. This measure the pixel to pixel difference of
# neighboring pixels. What we're doing when we try to optimize a
# gradient that makes the mean differences small is saying, we want
# the difference to be low. This allows us to smooth our image in the
# same way that we did using the Gaussian to blur the image.
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# In[ ]:
n_img, height, width, ch = dream_img.shape
# We'll weight the overall contribution of the total variational loss
# Experiment with this weighting
tv_loss_weight = 1.0
with tf.Session(graph=g) as sess, g.device(device):
# Penalize variations in neighboring pixels, enforcing smoothness
dx = tf.square(x[:, :height - 1, :width - 1, :] - x[:, :height - 1, 1:, :])
dy = tf.square(x[:, :height - 1, :width - 1, :] - x[:, 1:, :width - 1, :])
# We will calculate their difference raised to a power to push smaller
# differences closer to 0 and larger differences higher.
# Experiment w/ the power you raise this to to see how it effects the result
tv_loss = tv_loss_weight * tf.reduce_mean(tf.pow(dx + dy, 1.2))
# Now we train just like before, except we'll need to combine our two
# loss terms, `feature_loss` and `tv_loss` by simply adding them! The
# one thing we have to keep in mind is that we want to minimize the
# `tv_loss` while maximizing the `feature_loss`. That means we'll
# need to use the negative `tv_loss` and the positive `feature_loss`.
# As an experiment, try just optimizing the `tv_loss` and removing
# the `feature_loss` from the `tf.gradients` call. What happens?
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# In[ ]:
# Experiment with the step size!
n_iterations = 600
plot_step=6
step = 0.1
imgs = []
with tf.Session(graph=g) as sess, g.device(device):
# Experiment with just optimizing the tv_loss or negative tv_loss to understand what it is doing!
gradient = tf.gradients(-tv_loss + feature_loss, x)
# Copy the input image as we'll add the gradient to it in a loop
img_copy = dream_img.copy()
with tf.Session(graph=g) as sess, g.device(device):
sess.run(tf.initialize_all_variables())
for it_i in range(n_iterations):
print("guided it: ", it_i,"/",n_iterations)
# This will calculate the gradient of the layer we chose with respect to the input image.
this_res = sess.run(gradient[0], feed_dict={x: img_copy})[0]
# Let's normalize it by the maximum activation
this_res /= (np.max(np.abs(this_res) + 1e-8))
# Or alternatively, we can normalize by standard deviation
# this_res /= (np.std(this_res) + 1e-8)
# Then add the gradient back to the input image
# Think about what this gradient represents?
# It says what direction we should move our input
# in order to meet our objective stored in "gradient"
img_copy += this_res * step
# Plot the image
if (it_i + 1) % plot_step == 0:
m = net['deprocess'](img_copy[0])
#plt.figure(figsize=(5, 5))
#plt.grid('off')
plt.title("guided it: "+str(it_i))
plt.imshow(m)
#plt.show()
imgs.append(m)
plt.imsave(fname='s4_guided_last_'+TID+'.png', arr=m)
wait(1)
gif.build_gif(imgs, saveto='s4_guided_'+TID+'.gif', interval=gifdly)
#
# Further Explorations
#
# In the `libs` module, I've included a `deepdream` module which has
# two functions for performing Deep Dream and the Guided Deep Dream.
# Feel free to explore these to create your own deep dreams.
#
# <a name="part-5---style-net"></a>
# # Part 5 - Style Net
#
# We'll now work on creating our own style net implementation. We've
# seen all the steps for how to do this during the lecture, and you
# can always refer to the [Lecture Transcript](lecture-4.ipynb) if
# you need to. I want to you to explore using different networks and
# different layers in creating your content and style losses. This is
# completely unexplored territory so it can be frustrating to find
# things that work. Think of this as your empty canvas! If you are
# really stuck, you will find a `stylenet` implementation under the
# `libs` module that you can use instead.
#
# Have a look here for inspiration:
#
#
# https://mtyka.github.io/code/2015/10/02/experiments-with-style-transfer.html
#
# http://kylemcdonald.net/stylestudies/
#
# <a name="network"></a>
# ## Network
#
# Let's reset the graph and load up a network. I'll include code here
# for loading up any of our pretrained networks so you can explore
# each of them!
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# In[ ]:
sess.close()
tf.reset_default_graph()
# Stick w/ VGG for now, and then after you see how
# the next few sections work w/ this network, come back
# and explore the other networks.
net = vgg16.get_vgg_model()
# net = vgg16.get_vgg_face_model()
# net = inception.get_inception_model(version='v5')
# net = inception.get_inception_model(version='v3')
# net = i2v.get_i2v_model()
# net = i2v.get_i2v_tag_model()
# In[ ]:
# Let's explicity use the CPU, since we don't gain anything using the
# GPU
# when doing Deep Dream (it's only a single image, benefits come w/
# many images).
device = '/cpu:0'
# We'll now explicitly create a graph
g = tf.Graph()
# Let's now import the graph definition into our newly created Graph
# using a context manager and specifying that we want to use the CPU.
# In[ ]:
# And here is a context manager. We use the python "with" notation to
# create a context
# and create a session that only exists within this indent, as soon
# as we leave it,
# the session is automatically closed! We also tel the session which
# graph to use.
# We can pass a second context after the comma,
# which we'll use to be explicit about using the CPU instead of a
# GPU.
with tf.Session(graph=g) as sess, g.device(device):
# Now load the graph_def, which defines operations and their values into `g`
tf.import_graph_def(net['graph_def'], name='net')
# Let's then grab the names of every operation in our network:
# In[ ]:
names = [op.name for op in g.get_operations()]
# Now we need an image for our content image and another one for our
# style image.
# In[ ]:
content_og = plt.imread(os.path.expanduser("~/fot2.jpg"))[..., :3]
#content_og = plt.imread("anu455.jpg")[..., :3]
style_og = plt.imread('loremipsum.png')[..., :3]
#fig, axs = plt.subplots(1, 2)
#axs[0].grid('off')
plt.title('Content Image')
plt.imshow(content_og)
wait(3)
plt.title('Style Image')
plt.imshow(style_og)
wait(3)
# We'll save these with a specific name to include in your submission
#plt.imsave(arr=content_og, fname='s4_content_'+TID+'.png')
#plt.imsave(arr=style_og, fname='s4_style_'+TID+'.png')
# In[ ]:
content_img = net['preprocess'](content_og, dsize=(448,448))[np.newaxis]
style_img = net['preprocess'](style_og, dsize=(448,448))[np.newaxis]
# Let's see what the network classifies these images as just for fun:
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# In[ ]:
# Grab the tensor defining the input to the network
x = g.get_tensor_by_name(names[0] + ':0')
# And grab the tensor defining the softmax layer of the network
softmax = g.get_tensor_by_name(names[-2] + ':0')
for img in [content_img, style_img]:
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
# Remember from the lecture that we have to set the dropout
# "keep probability" to 1.0.
res = softmax.eval(feed_dict={x: img,
'net/dropout_1/random_uniform:0': [[1.0]],
'net/dropout/random_uniform:0': [[1.0]]})[0]
print([(res[idx], net['labels'][idx])
for idx in res.argsort()[-5:][::-1]])
# <a name="content-features"></a>
# ## Content Features
#
# We're going to need to find the layer or layers we want to use to
# help us define our "content loss". Recall from the lecture when we
# used VGG, we used the 4th convolutional layer.
# In[ ]:
print("graph names:")
print(names)
# Pick a layer for using for the content features. If you aren't
# using VGG remember to get rid of the dropout stuff!
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# In[ ]:
# Experiment w/ different layers here. You'll need to change this if
# you
# use another network!
content_layer = 'net/conv3_2/conv3_2:0'
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
content_features = g.get_tensor_by_name(content_layer).eval(
session=sess,
feed_dict={x: content_img,
'net/dropout_1/random_uniform:0': [[1.0]],
'net/dropout/random_uniform:0': [[1.0]]})
# <a name="style-features"></a>
# ## Style Features
#
# Let's do the same thing now for the style features. We'll use more
# than 1 layer though so we'll append all the features in a list. If
# you aren't using VGG remember to get rid of the dropout stuff!
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# In[ ]:
# Experiment with different layers and layer subsets. You'll need to
# change these
# if you use a different network!
style_layers = ['net/conv1_1/conv1_1:0',
'net/conv2_1/conv2_1:0',
'net/conv3_1/conv3_1:0',
'net/conv4_1/conv4_1:0',
'net/conv5_1/conv5_1:0']
style_activations = []
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
for style_i in style_layers:
style_activation_i = g.get_tensor_by_name(style_i).eval(
feed_dict={x: style_img,
'net/dropout_1/random_uniform:0': [[1.0]],
'net/dropout/random_uniform:0': [[1.0]]})
style_activations.append(style_activation_i)
# Now we find the gram matrix which we'll use to optimize our
# features.
# In[ ]:
style_features = []
for style_activation_i in style_activations:
s_i = np.reshape(style_activation_i, [-1, style_activation_i.shape[-1]])
gram_matrix = np.matmul(s_i.T, s_i) / s_i.size
style_features.append(gram_matrix.astype(np.float32))
# <a name="remapping-the-input"></a>
# ## Remapping the Input
#
# We're almost done building our network. We just have to change the
# input to the network to become "trainable". Instead of a
# placeholder, we'll have a `tf.Variable`, which allows it to be
# trained. We could set this to the content image, another image
# entirely, or an image of noise. Experiment with all three options!
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# In[ ]:
tf.reset_default_graph()
g = tf.Graph()
# Get the network again
net = vgg16.get_vgg_model()
# Load up a session which we'll use to import the graph into.
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
# We can set the `net_input` to our content image
# or perhaps another image
# or an image of noise
# net_input = tf.Variable(content_img / 255.0)
# net_input = tf.get_variable(name='input', shape=content_img.shape, dtype=tf.float32, initializer=tf.random_normal_initializer(mean=np.mean(content_img), stddev=np.std(content_img)))
net_input = tf.Variable(content_img / 255.0)
# Now we load the network again, but this time replacing our placeholder
# with the trainable tf.Variable
tf.import_graph_def(
net['graph_def'],
name='net',
input_map={'images:0': net_input})
# <a name="content-loss"></a>
# ## Content Loss
#
# In the lecture we saw that we'll simply find the l2 loss between
# our content layer features.
# In[ ]:
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
content_loss = tf.nn.l2_loss((g.get_tensor_by_name(content_layer) -
content_features) /
content_features.size)
# <a name="style-loss"></a>
# ## Style Loss
#
# Instead of straight l2 loss on the raw feature activations, we're
# going to calculate the gram matrix and find the loss between these.
# Intuitively, this is finding what is common across all convolution
# filters, and trying to enforce the commonality between the
# synthesis and style image's gram matrix.
# In[ ]:
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
style_loss = np.float32(0.0)
for style_layer_i, style_gram_i in zip(style_layers, style_features):
layer_i = g.get_tensor_by_name(style_layer_i)
layer_shape = layer_i.get_shape().as_list()
layer_size = layer_shape[1] * layer_shape[2] * layer_shape[3]
layer_flat = tf.reshape(layer_i, [-1, layer_shape[3]])
gram_matrix = tf.matmul(tf.transpose(layer_flat), layer_flat) / layer_size
style_loss = tf.add(style_loss, tf.nn.l2_loss((gram_matrix - style_gram_i) / np.float32(style_gram_i.size)))
# <a name="total-variation-loss"></a>
# ## Total Variation Loss
#
# And just like w/ guided hallucinations, we'll try to enforce some
# smoothness using a total variation loss.
# In[ ]:
def total_variation_loss(x):
h, w = x.get_shape().as_list()[1], x.get_shape().as_list()[1]
dx = tf.square(x[:, :h-1, :w-1, :] - x[:, :h-1, 1:, :])
dy = tf.square(x[:, :h-1, :w-1, :] - x[:, 1:, :w-1, :])
return tf.reduce_sum(tf.pow(dx + dy, 1.25))
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
tv_loss = total_variation_loss(net_input)
# <a name="training"></a>
# ## Training
#
# We're almost ready to train! Let's just combine our three loss
# measures and stick it in an optimizer.
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# In[ ]:
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
# Experiment w/ the weighting of these! They produce WILDLY different
# results.
loss = 5.0 * content_loss + 1.0 * style_loss + 0.001 * tv_loss
optimizer = tf.train.AdamOptimizer(0.05).minimize(loss)
# And now iterate! Feel free to play with the number of iterations or
# how often you save an image. If you use a different network to VGG,
# then you will not need to feed in the dropout parameters like I've
# done here.
# In[ ]:
"""
imgs = []
n_iterations = 200
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
sess.run(tf.initialize_all_variables())
# map input to noise (or other image)
og_img = net_input.eval()
for it_i in range(n_iterations):
print("stylenet it: ", it_i,"/",n_iterations, end=" ")
_, this_loss, synth = sess.run([optimizer, loss, net_input], feed_dict={
'net/dropout_1/random_uniform:0': np.ones(
g.get_tensor_by_name(
'net/dropout_1/random_uniform:0'
).get_shape().as_list()),
'net/dropout/random_uniform:0': np.ones(
g.get_tensor_by_name(
'net/dropout/random_uniform:0'
).get_shape().as_list())
})
print("loss: %f, min/max: %f - %f)" %
(this_loss, np.min(synth), np.max(synth)))
if it_i % 5 == 0:
m = vgg16.deprocess(synth[0])
imgs.append(m)
plt.title("stylenet it: "+str(it_i))
plt.imshow(m)
#plt.show()
wait(1)
plt.imsave(fname='s4_stylenet_last_'+TID+'.png', arr=m)
gif.build_gif(imgs, saveto='s4_stylenet_'+TID+'.gif', interval=gifdly)
"""
print("END.")
# eop
|
dariox2/CADL
|
session-4/s4b01.py
|
Python
|
apache-2.0
| 52,997
|
[
"Gaussian",
"NEURON"
] |
91b684810c046dfc9ee1ea922ce1febe80d339ab9d2fce8af0bb9f0f67d8ae3e
|
#!/usr/bin/env python
#
# Connect4
#
# Just did this for the fun. Have fun like i did. :)
# I did this in Geany, a small and fast IDE. http://www.geany.org/
#
# Copyright 2009 Diogo Nuno dos Santos Silva <promzao@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
connect4_version = "0.51"
last_update = "10 Oct/2009"
import random
import curses
from sys import exit
def InitCurses():
'''Curses related stuff'''
global screen
screen = curses.initscr()
curses.noecho()
curses.start_color()
screen.keypad(1)
curses.init_pair(1, curses.COLOR_BLUE, curses.COLOR_BLACK) # Create the colors
curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_WHITE, curses.COLOR_RED)
curses.init_pair(4, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(5, curses.COLOR_RED, curses.COLOR_BLACK)
# Screen information message position
info_x = 0
info_y = 1
def ScreenInfo(info_msg, info_color):
'''Prints an information message in the position given up there'''
last_pos = curses.getsyx()
screen.move(info_x,info_y)
screen.addstr("%s" % info_msg , curses.color_pair(info_color) | curses.A_BOLD)
screen.move(last_pos[0],last_pos[1])
def Quit():
'''Quiiiiiiiit!!!'''
curses.endwin()
quit()
class MoveCursor:
'''
An object to move the cursor with rules
Usage: MoveCursor(initial x position, initial y position, move left jump size, move right jump size, go up jump size, go down jump size, up limit size, down limit size, left limit size, right limit size)
'''
def __init__(self,initial_x,initial_y,left,right,up,down,x_up_max,x_down_max,y_left_max,y_right_max):
self.x = initial_x
self.y = initial_y
self.initial_x = initial_x
self.initial_y = initial_x
self.move_left = left
self.move_right = right
self.move_up = up
self.move_down = down
self.x_up_max = x_up_max
self.y_left_max = y_left_max
self.x_down_max = x_down_max
self.y_right_max = y_right_max
def MoveLeft(self):
self.y = self.y-self.move_left
if self.y < self.y_left_max:
self.y = self.y_right_max
def MoveRight(self):
self.y = self.y+self.move_right
if self.y > self.y_right_max:
self.y = self.y_left_max
def MoveUp(self):
self.x = self.x-self.move_up
if self.x < self.x_up_max:
self.x = self.x_down_max
def MoveDown(self):
self.x = self.x+self.move_down
if self.x > self.x_down_max:
self.x = self.x_up_max
def MoveInitial(self):
self.x = self.initial_x
self.y = self.initial_x
def MoveActual(self):
screen.move(self.x,self.y)
def Move(self,option):
if option == 'left':
self.MoveLeft()
elif option == 'right':
self.MoveRight()
elif option == 'up':
self.MoveUp()
elif option == 'down':
self.MoveDown()
elif option == 'initial':
self.MoveInitial()
elif option == 'actual':
self.MoveActual()
else:
Quit()
def get_x(self):
'''Return X position'''
return self.x
def get_y(self):
'''Return Y position'''
return self.y
def About():
'''About Connect4'''
screen.clear()
screen.move(0,0)
screen.addstr(" Connect 4 \n\n", curses.color_pair(3))
screen.addstr(" Started in 2009/08/18, a hot day\n");
screen.addstr(" Version %s ( last update: %s )\n\n" % (connect4_version,last_update));
screen.addstr(" Made by Diogo Nuno\n Visit")
screen.addstr(" http://www.diogonuno.com\n\n ", curses.color_pair(1))
event = screen.getch()
def Help():
'''Help me dear Connect4'''
screen.clear()
screen.move(0,0)
screen.addstr(" Connect 4 \n\n", curses.color_pair(3))
screen.addstr(" Just get 4 in a row in horizontal or in diagonal and everything will be fine.\n")
screen.addstr(" Use \"Q\" in game to quit.\n\n ")
event = screen.getch()
class Board():
'''The game board'''
def __init__(self,y,x):
self.Board = [] # create the Board
self.Board_x = x # lines
self.Board_y = y # columns
for i in range(y): # create the Board columns
self.Board.append([])
self.Fill()
def Fill(self):
'''Fill the board with ghost Coins'''
for y in range(0,self.Board_y):
for x in range(0,self.Board_x):
self.Board[y].append(Coin('G')) # the ghost coin
def Print(self):
'''Method to print our game board'''
screen.addstr("\n\n"); # get a space for the information message
for x in reversed(range(0,self.Board_x)):
screen.addstr(" | ", curses.color_pair(4) | curses.A_BOLD)
for y in range(0,self.Board_y):
self.Board[y][x].printCoin()
screen.addstr(" | \n", curses.color_pair(4) | curses.A_BOLD)
def SomebodyWonPopcorn(self):
'''Method to check if somebody has won and give its deserved price'''
for x in range(0,self.Board_x-3): # check vertical
for y in range(0,self.Board_y):
if self.Board[y][x].getCoin() != 'G' and self.Board[y][x].getCoin() == self.Board[y][x+1].getCoin() and self.Board[y][x+1].getCoin() == self.Board[y][x+2].getCoin() and self.Board[y][x+2].getCoin() == self.Board[y][x+3].getCoin():
self.Board[y][x].changeColor() ; self.Board[y][x+1].changeColor() ; self.Board[y][x+2].changeColor() ; self.Board[y][x+3].changeColor()
return True
for x in range(0,self.Board_x): # check horizontal
for y in range(0,self.Board_y-3):
if self.Board[y][x].getCoin() != 'G' and self.Board[y][x].getCoin() == self.Board[y+1][x].getCoin() and self.Board[y+1][x].getCoin() == self.Board[y+2][x].getCoin() and self.Board[y+2][x].getCoin() == self.Board[y+3][x].getCoin():
self.Board[y][x].changeColor() ; self.Board[y+1][x].changeColor() ; self.Board[y+2][x].changeColor() ; self.Board[y+3][x].changeColor()
return True
for x in range(0,self.Board_x-3): # check diagonal
for y in range(0,self.Board_y-3):
if (self.Board[y][x].getCoin() == 'X' and self.Board[y+1][x+1].getCoin() == 'X' and self.Board[y+2][x+2].getCoin() == 'X' and self.Board[y+3][x+3].getCoin() == 'X') or (self.Board[y][x].getCoin() == 'O' and self.Board[y+1][x+1].getCoin() == 'O' and self.Board[y+2][x+2].getCoin() == 'O' and self.Board[y+3][x+3].getCoin() == 'O'):
self.Board[y][x].changeColor() ; self.Board[y+1][x+1].changeColor() ; self.Board[y+2][x+2].changeColor() ; self.Board[y+3][x+3].changeColor()
return True
for x in range(self.Board_x-3,self.Board_x): # check diagonal
for y in range(0,self.Board_y-3):
if (self.Board[y][x].getCoin() == 'X' and self.Board[y+1][x-1].getCoin() == 'X' and self.Board[y+2][x-2].getCoin() == 'X' and self.Board[y+3][x-3].getCoin() == 'X') or (self.Board[y][x].getCoin() == 'O' and self.Board[y+1][x-1].getCoin() == 'O' and self.Board[y+2][x-2].getCoin() == 'O' and self.Board[y+3][x-3].getCoin() == 'O'):
self.Board[y][x].changeColor() ; self.Board[y+1][x-1].changeColor() ; self.Board[y+2][x-2].changeColor() ; self.Board[y+3][x-3].changeColor()
return True
return False
def Play(self, Player, Column):
'''We get the player and the column, check if everything is all right and play'''
if self.Board[Column][self.Board_x-1].getCoin() == 'G': # check if the last position is clean if so, put the Coin, if not its full.
CoinID=0
del self.Board[Column][self.Board_x-1] # delete the clean position
if not self.Board[Column][0].getCoin() == 'G': # if its not the first play lets check for the last Coin position index
LastCoinID=0
for i in range(0,self.Board_x-1):
if self.Board[Column][LastCoinID].getCoin() == 'X' or self.Board[Column][LastCoinID].getCoin() == 'O':
LastCoinID+=1
CoinID=LastCoinID
if Player == 1:
self.Board[Column].insert(CoinID,Coin('X'))
else:
self.Board[Column].insert(CoinID,Coin('O'))
return True
else:
return False
class Coin:
'''Object to play'''
def __init__(self, suit):
self.suit = suit
if suit == 'X': # give the fashion color to the coin
self.color = 1
elif suit == 'O':
self.color = 2
def getCoin(self):
'''Return the coin'''
return self.suit
def changeColor(self):
'''Change my fashion color to the winner color'''
self.color = 5
def printCoin(self):
'''Print me'''
if self.getCoin() == 'G': # if its not a ghost coin, show it :)
screen.addstr(" ")
elif self.getCoin() == 'X': # X Coin
screen.addstr(" %s " % self.getCoin(), curses.color_pair(self.color) | curses.A_BOLD)
else: # O Coin
screen.addstr(" %s " % self.getCoin(), curses.color_pair(self.color) | curses.A_BOLD)
class Player:
'''The player 1 or 2'''
def __init__(self, Opponent):
self.CurrentPlayer = 1
self.Opponent = Opponent
def ChangePlayer(self):
''''Change player's turn'''
if self.CurrentPlayer == 1:
self.CurrentPlayer = 2
else:
self.CurrentPlayer = 1
def TheHand(self, Board, ChosenColumn):
'''Player's Hand'''
if (Board.Play(self.getPlayerTurn(),ChosenColumn)): # If he plays
if not Board.SomebodyWonPopcorn(): # checks if he won, if so he celebrates if not changes turn
self.ChangePlayer()
if self.Opponent == 'CPU':
if (Board.Play(self.getPlayerTurn(),random.randint(0,Board.Board_x))):
if Board.SomebodyWonPopcorn():
return
self.CurrentPlayer = 1
def getPlayerTurn(self):
'''Return the current Player'''
return self.CurrentPlayer
class Table:
'''Table where we play. The board is in the table and the players are sitting right next to it :)'''
def __init__(self, opponent):
self.player = Player(opponent)
self.Board = Board(7,6)
self.Cursor = MoveCursor(9,4,3,3,0,0,0,0,4,22) # give the rules to MoveCursor Object
self.ChosenColumn = 0 # my chosen column
self.Think() # The main
def ColumnColumnCursor(self, column):
'''A method to get the right Column to play from the Y position of the cursor'''
if self.Cursor.get_y() > 22:
self.ChosenColumn = 0
elif self.Cursor.get_y() < 4:
self.ChosenColumn = 6
else:
self.ChosenColumn = column
def Think(self):
'''Method where we read the keyboard keys and think in the game :P'. We think then we use the hands'''
while True:
screen.clear()
self.Board.Print()
if self.Board.SomebodyWonPopcorn(): # checks if he won :P
ScreenInfo("YOU WIIIIIIIIN!!! :)",self.player.getPlayerTurn())
screen.getch()
break
else:
ScreenInfo("Player's turn\n\n",self.player.getPlayerTurn())
self.Cursor.Move('actual')
event = screen.getch()
if event == ord("q"):
break
elif event == curses.KEY_LEFT:
self.Cursor.Move('left')
self.ColumnColumnCursor(self.ChosenColumn-1)
elif event == curses.KEY_RIGHT:
self.Cursor.Move('right')
self.ColumnColumnCursor(self.ChosenColumn+1)
elif event == 10:
self.player.TheHand(self.Board,self.ChosenColumn)
class Menu:
''''Where everything begins, the Menu (main too)'''
def __init__(self):
self.Cursor = MoveCursor(2,0,0,0,1,1,2,6,0,0) # give the rules to MoveCursor Object
self.main()
def henshin_a_gogo_baby(self):
'''A name inspired in Viewtiful Joe game, lol. It checks the cursor position and HENSHIN A GOGO BABY'''
if self.Cursor.get_x() == 2:
gogo = Table('CPU')
if self.Cursor.get_x() == 3:
gogo = Table('Player')
elif self.Cursor.get_x() == 4:
Help()
elif self.Cursor.get_x() == 5:
About()
elif self.Cursor.get_x() == 6:
Quit()
def main(self):
'''The main :|'''
while True:
screen.clear()
screen.addstr(" Connect 4 \n\n", curses.color_pair(3))
screen.addstr(" Play against dumb CPU\n")
screen.addstr(" Play against Player\n")
screen.addstr(" Help\n")
screen.addstr(" About\n")
screen.addstr(" Quit\n")
self.Cursor.Move('actual')
event = screen.getch()
if event == ord("q"):
Quit()
elif event == curses.KEY_UP:
self.Cursor.Move('up')
elif event == curses.KEY_DOWN:
self.Cursor.Move('down')
elif event == 10:
self.henshin_a_gogo_baby()
if __name__ == '__main__':
try:
InitCurses()
run_for_your_life = Menu() # The menu
except:
Quit()
else:
print "Connect4 - ??"
|
prom/Python-Connect-4
|
connect4.py
|
Python
|
gpl-3.0
| 12,610
|
[
"VisIt"
] |
d265b26b43c4b5d4b19d2be5f4e6e36c8dfb9fa855d077b095868a58243c1026
|
# This file is part of cldoc. cldoc is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -*- coding: utf-8 -*-
from .clang import cindex
import tempfile
import functools
from .defdict import Defdict
from . import comment
from . import nodes
from . import includepaths
from . import documentmerger
from . import example
from . import utf8
from . import log
from .cmp import cmp
import os, sys, re, glob, platform
from ctypes.util import find_library
if platform.system() == 'Darwin':
libclangs = [
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/libclang.dylib',
'/Library/Developer/CommandLineTools/usr/lib/libclang.dylib'
]
found = False
for libclang in libclangs:
if os.path.exists(libclang):
cindex.Config.set_library_path(os.path.dirname(libclang))
found = True
break
if not found:
lname = find_library("clang")
if not lname is None:
cindex.Config.set_library_file(lname)
else:
versions = [None, '7.0', '6.0', '5.0', '4.0', '3.9', '3.8', '3.7', '3.6', '3.5', '3.4', '3.3', '3.2']
for v in versions:
name = 'clang'
if not v is None:
name += '-' + v
lname = find_library(name)
if not lname is None:
cindex.Config.set_library_file(lname)
break
testconf = cindex.Config()
try:
testconf.get_cindex_library()
except cindex.LibclangError as e:
sys.stderr.write("\nFatal: Failed to locate libclang library. cldoc depends on libclang for parsing sources, please make sure you have libclang installed.\n" + str(e) + "\n\n")
sys.exit(1)
class Tree(documentmerger.DocumentMerger):
def __init__(self, files, flags):
self.processed = {}
self.files, ok = self.expand_sources([os.path.realpath(f) for f in files])
if not ok:
sys.exit(1)
self.flags = includepaths.flags(flags)
# Sort files on sources, then headers
self.files.sort(key=functools.cmp_to_key(lambda a, b: cmp(self.is_header(a), self.is_header(b))))
self.processing = {}
self.kindmap = {}
# Things to skip
self.kindmap[cindex.CursorKind.USING_DIRECTIVE] = None
# Create a map from CursorKind to classes representing those cursor
# kinds.
for cls in nodes.Node.subclasses():
if hasattr(cls, 'kind'):
self.kindmap[cls.kind] = cls
self.root = nodes.Root()
self.all_nodes = []
self.cursor_to_node = Defdict()
self.usr_to_node = Defdict()
self.qid_to_node = Defdict()
# Map from category name to the nodes.Category for that category
self.category_to_node = Defdict()
# Map from filename to comment.CommentsDatabase
self.commentsdbs = Defdict()
self.qid_to_node[None] = self.root
self.usr_to_node[None] = self.root
def _lookup_node_from_cursor_despecialized(self, cursor):
template = cursor.specialized_cursor_template
if template is None:
parent = self.lookup_node_from_cursor(cursor.semantic_parent)
else:
return self.lookup_node_from_cursor(template)
if parent is None:
return None
for child in parent.children:
if child.name == cursor.spelling:
return child
return None
def lookup_node_from_cursor(self, cursor):
if cursor is None:
return None
# Try lookup by direct cursor reference
node = self.cursor_to_node[cursor]
if not node is None:
return node
node = self.usr_to_node[cursor.get_usr()]
if not node is None:
return node
return self._lookup_node_from_cursor_despecialized(cursor)
def filter_source(self, path):
return path.endswith('.c') or path.endswith('.cpp') or path.endswith('.h') or path.endswith('.cc') or path.endswith('.hh') or path.endswith('.hpp')
def expand_sources(self, sources, filter=None):
ret = []
ok = True
for source in sources:
if not filter is None and not filter(source):
continue
if os.path.isdir(source):
retdir, okdir = self.expand_sources([os.path.join(source, x) for x in os.listdir(source)], self.filter_source)
if not okdir:
ok = False
ret += retdir
elif not os.path.exists(source):
sys.stderr.write("The specified source `" + source + "` could not be found\n")
ok = False
else:
ret.append(source)
return (ret, ok)
def is_header(self, filename):
return filename.endswith('.hh') or filename.endswith('.hpp') or filename.endswith('.h')
def find_node_comment(self, node):
for location in node.comment_locations:
db = self.commentsdbs[location.file.name]
if db:
cm = db.lookup(location)
if cm:
return cm
return None
def process(self):
"""
process processes all the files with clang and extracts all relevant
nodes from the generated AST
"""
self.index = cindex.Index.create()
self.headers = {}
for f in self.files:
if f in self.processed:
continue
print('Processing {0}'.format(os.path.basename(f)))
tu = self.index.parse(f, self.flags)
if len(tu.diagnostics) != 0:
fatal = False
for d in tu.diagnostics:
sys.stderr.write(d.format())
sys.stderr.write("\n")
if d.severity == cindex.Diagnostic.Fatal or \
d.severity == cindex.Diagnostic.Error:
fatal = True
if fatal:
sys.stderr.write("\nCould not generate documentation due to parser errors\n")
sys.exit(1)
if not tu:
sys.stderr.write("Could not parse file %s...\n" % (f,))
sys.exit(1)
# Extract comments from files and included files that we are
# supposed to inspect
extractfiles = [f]
for inc in tu.get_includes():
filename = str(inc.include)
self.headers[filename] = True
if filename in self.processed or (not filename in self.files) or filename in extractfiles:
continue
extractfiles.append(filename)
for e in extractfiles:
db = comment.CommentsDatabase(e, tu)
self.add_categories(db.category_names)
self.commentsdbs[e] = db
self.visit(tu.cursor.get_children())
for f in self.processing:
self.processed[f] = True
self.processing = {}
# Construct hierarchy of nodes.
for node in self.all_nodes:
q = node.qid
if node.parent is None:
par = self.find_parent(node)
# Lookup categories for things in the root
if (par is None or par == self.root) and (not node.cursor is None):
location = node.cursor.extent.start
db = self.commentsdbs[location.file.name]
if db:
par = self.category_to_node[db.lookup_category(location)]
if par is None:
par = self.root
par.append(node)
# Resolve comment
cm = self.find_node_comment(node)
if cm:
node.merge_comment(cm)
# Keep track of classes to resolve bases and subclasses
classes = {}
# Map final qid to node
for node in self.all_nodes:
q = node.qid
self.qid_to_node[q] = node
if isinstance(node, nodes.Class):
classes[q] = node
# Resolve bases and subclasses
for qid in classes:
classes[qid].resolve_bases(classes)
def markup_code(self, index):
for node in self.all_nodes:
if node.comment is None:
continue
if not node.comment.doc:
continue
comps = node.comment.doc.components
for i in range(len(comps)):
component = comps[i]
if not isinstance(component, comment.Comment.Example):
continue
text = str(component)
tmpfile = tempfile.NamedTemporaryFile(delete=False)
tmpfile.write(text)
filename = tmpfile.name
tmpfile.close()
tu = index.parse(filename, self.flags, options=1)
tokens = tu.get_tokens(extent=tu.get_extent(filename, (0, os.stat(filename).st_size)))
os.unlink(filename)
hl = []
incstart = None
for token in tokens:
start = token.extent.start.offset
end = token.extent.end.offset
if token.kind == cindex.TokenKind.KEYWORD:
hl.append((start, end, 'keyword'))
continue
elif token.kind == cindex.TokenKind.COMMENT:
hl.append((start, end, 'comment'))
cursor = token.cursor
if cursor.kind == cindex.CursorKind.PREPROCESSING_DIRECTIVE:
hl.append((start, end, 'preprocessor'))
elif cursor.kind == cindex.CursorKind.INCLUSION_DIRECTIVE and incstart is None:
incstart = cursor
elif (not incstart is None) and \
token.kind == cindex.TokenKind.PUNCTUATION and \
token.spelling == '>':
hl.append((incstart.extent.start.offset, end, 'preprocessor'))
incstart = None
ex = example.Example()
lastpos = 0
for ih in range(len(hl)):
h = hl[ih]
ex.append(text[lastpos:h[0]])
ex.append(text[h[0]:h[1]], h[2])
lastpos = h[1]
ex.append(text[lastpos:])
comps[i] = ex
def match_ref(self, child, name):
if isinstance(name, utf8.string):
return name == child.name
else:
return name.match(child.name)
def find_ref(self, node, name, goup):
if node is None:
return []
ret = []
for child in node.resolve_nodes:
if self.match_ref(child, name):
ret.append(child)
if goup and len(ret) == 0:
return self.find_ref(node.parent, name, True)
else:
return ret
def cross_ref_node(self, node):
if not node.comment is None:
node.comment.resolve_refs(self.find_ref, node)
for child in node.children:
self.cross_ref_node(child)
def cross_ref(self):
self.cross_ref_node(self.root)
self.markup_code(self.index)
def decl_on_c_struct(self, node, tp):
n = self.cursor_to_node[tp.decl]
if isinstance(n, nodes.Struct) or \
isinstance(n, nodes.Typedef) or \
isinstance(n, nodes.Enum):
return n
return None
def c_function_is_constructor(self, node):
hints = ['new', 'init', 'alloc', 'create']
for hint in hints:
if node.name.startswith(hint + "_") or \
node.name.endswith("_" + hint):
return True
return False
def node_on_c_struct(self, node):
if isinstance(node, nodes.Method) or \
not isinstance(node, nodes.Function):
return None
decl = None
if self.c_function_is_constructor(node):
decl = self.decl_on_c_struct(node, node.return_type)
if not decl:
args = node.arguments
if len(args) > 0:
decl = self.decl_on_c_struct(node, args[0].type)
return decl
def find_parent(self, node):
cursor = node.cursor
# If node is a C function, then see if we should group it to a struct
parent = self.node_on_c_struct(node)
if parent:
return parent
while cursor:
cursor = cursor.semantic_parent
parent = self.cursor_to_node[cursor]
if parent:
return parent
return self.root
def register_node(self, node, parent=None):
self.all_nodes.append(node)
self.usr_to_node[node.cursor.get_usr()] = node
self.cursor_to_node[node.cursor] = node
# Typedefs in clang are not parents of typedefs, but we like it better
# that way, explicitly set the parent directly here
if parent and isinstance(parent, nodes.Typedef):
parent.append(node)
if parent and hasattr(parent, 'current_access'):
node.access = parent.current_access
def register_anon_typedef(self, node, parent):
node.typedef = parent
node.add_comment_location(parent.cursor.extent.start)
self.all_nodes.remove(parent)
# Map references to the typedef directly to the node
self.usr_to_node[parent.cursor.get_usr()] = node
self.cursor_to_node[parent.cursor] = node
def cursor_is_exposed(self, cursor):
# Only cursors which are in headers are exposed.
filename = str(cursor.location.file)
return filename in self.headers or self.is_header(filename)
def is_unique_anon_struct(self, node, parent):
if not node:
return False
if not isinstance(node, nodes.Struct):
return False
if not (node.is_anonymous or not node.name):
return False
return not isinstance(parent, nodes.Typedef)
def visit(self, citer, parent=None):
"""
visit iterates over the provided cursor iterator and creates nodes
from the AST cursors.
"""
if not citer:
return
while True:
try:
item = next(citer)
except StopIteration:
return
# Check the source of item
if not item.location.file:
self.visit(item.get_children())
continue
# Ignore files we already processed
if str(item.location.file) in self.processed:
continue
# Ignore files other than the ones we are scanning for
if not str(item.location.file) in self.files:
continue
# Ignore unexposed things
if item.kind == cindex.CursorKind.UNEXPOSED_DECL:
self.visit(item.get_children(), parent)
continue
self.processing[str(item.location.file)] = True
if item.kind in self.kindmap:
cls = self.kindmap[item.kind]
if not cls:
# Skip
continue
# see if we already have a node for this thing
node = self.usr_to_node[item.get_usr()]
if not node or self.is_unique_anon_struct(node, parent):
# Only register new nodes if they are exposed.
if self.cursor_is_exposed(item):
node = cls(item, None)
self.register_node(node, parent)
elif isinstance(parent, nodes.Typedef) and isinstance(node, nodes.Struct):
# Typedefs are handled a bit specially because what happens
# is that clang first exposes an unnamed struct/enum, and
# then exposes the typedef, with as a child again the
# cursor to the already defined struct/enum. This is a
# bit reversed as to how we normally process things.
self.register_anon_typedef(node, parent)
else:
self.cursor_to_node[item] = node
node.add_ref(item)
if node and node.process_children:
self.visit(item.get_children(), node)
else:
par = self.cursor_to_node[item.semantic_parent]
if not par:
par = parent
if par:
ret = par.visit(item, citer)
if not ret is None:
for node in ret:
self.register_node(node, par)
ignoretop = [cindex.CursorKind.TYPE_REF, cindex.CursorKind.PARM_DECL]
if (not par or ret is None) and not item.kind in ignoretop:
log.warning("Unhandled cursor: %s", item.kind)
# vi:ts=4:et
|
jessevdk/cldoc
|
cldoc/tree.py
|
Python
|
gpl-2.0
| 17,804
|
[
"VisIt"
] |
2f0d4d442a9c430afd868e4268515f90567bf29185490eb701956faa8bd0f459
|
# Orca
#
# Copyright 2004-2008 Sun Microsystems Inc.
# Copyright 2010 Joanmarie Diggs
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom script for pidgin."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs."
__license__ = "LGPL"
import pyatspi
import orca.messages as messages
import orca.scripts.default as default
import orca.speech as speech
from .chat import Chat
from .script_utilities import Utilities
from .speech_generator import SpeechGenerator
########################################################################
# #
# The Pidgin script class. #
# #
########################################################################
class Script(default.Script):
def __init__(self, app):
"""Creates a new script for the given application.
Arguments:
- app: the application to create a script for.
"""
# So we can take an educated guess at identifying the buddy list.
#
self._buddyListAncestries = [[pyatspi.ROLE_TREE_TABLE,
pyatspi.ROLE_SCROLL_PANE,
pyatspi.ROLE_FILLER,
pyatspi.ROLE_PAGE_TAB,
pyatspi.ROLE_PAGE_TAB_LIST,
pyatspi.ROLE_FILLER,
pyatspi.ROLE_FRAME]]
default.Script.__init__(self, app)
def getChat(self):
"""Returns the 'chat' class for this script."""
return Chat(self, self._buddyListAncestries)
def getSpeechGenerator(self):
"""Returns the speech generator for this script. """
return SpeechGenerator(self)
def getUtilities(self):
"""Returns the utilites for this script."""
return Utilities(self)
def setupInputEventHandlers(self):
"""Defines InputEventHandler fields for this script that can be
called by the key and braille bindings. Here we need to add the
handlers for chat functionality.
"""
default.Script.setupInputEventHandlers(self)
self.inputEventHandlers.update(self.chat.inputEventHandlers)
def getAppKeyBindings(self):
"""Returns the application-specific keybindings for this script."""
return self.chat.keyBindings
def getAppPreferencesGUI(self):
"""Return a GtkGrid containing the application unique configuration
GUI items for the current application. The chat-related options get
created by the chat module."""
return self.chat.getAppPreferencesGUI()
def setAppPreferences(self, prefs):
"""Write out the application specific preferences lines and set the
new values. The chat-related options get written out by the chat
module.
Arguments:
- prefs: file handle for application preferences.
"""
self.chat.setAppPreferences(prefs)
def onChildrenChanged(self, event):
"""Called whenever a child object changes in some way.
Arguments:
- event: the text inserted Event
"""
# Check to see if a new chat room tab has been created and if it
# has, then announce its name. See bug #469098 for more details.
#
if event.type.startswith("object:children-changed:add"):
rolesList = [pyatspi.ROLE_PAGE_TAB_LIST,
pyatspi.ROLE_FILLER,
pyatspi.ROLE_FRAME]
if self.utilities.hasMatchingHierarchy(event.source, rolesList):
# As it's possible to get this component hierarchy in other
# places than the chat room (i.e. the Preferences dialog),
# we check to see if the name of the frame is the same as one
# of its children. If it is, then it's a chat room tab event.
# For a final check, we only announce the new chat tab if the
# last child has a name.
#
nameFound = False
frameName = event.source.parent.parent.name
for child in event.source:
if frameName and (frameName == child.name):
nameFound = True
if nameFound:
child = event.source[-1]
if child.name:
line = messages.CHAT_NEW_TAB % child.name
speech.speak(line)
def onNameChanged(self, event):
"""Called whenever a property on an object changes.
Arguments:
- event: the Event
"""
if self.chat.isInBuddyList(event.source):
return
else:
default.Script.onNameChanged(self, event)
def onTextDeleted(self, event):
"""Called whenever text is deleted from an object.
Arguments:
- event: the Event
"""
if self.chat.isInBuddyList(event.source):
return
else:
default.Script.onTextDeleted(self, event)
def onTextInserted(self, event):
"""Called whenever text is added to an object."""
if self.chat.presentInsertedText(event):
return
default.Script.onTextInserted(self, event)
def onValueChanged(self, event):
"""Called whenever an object's value changes. Currently, the
value changes for non-focused objects are ignored.
Arguments:
- event: the Event
"""
if self.chat.isInBuddyList(event.source):
return
else:
default.Script.onValueChanged(self, event)
def onWindowActivated(self, event):
"""Called whenever a toplevel window is activated."""
# Hack to "tickle" the accessible hierarchy. Otherwise, the
# events we need to present text added to the chatroom are
# missing.
#
allPageTabs = self.utilities.descendantsWithRole(
event.source, pyatspi.ROLE_PAGE_TAB)
default.Script.onWindowActivated(self, event)
def onExpandedChanged(self, event):
"""Callback for object:state-changed:expanded accessibility events."""
# Overridden here because the event.source is in a hidden column.
obj = event.source
if self.chat.isInBuddyList(obj):
obj = obj.parent[obj.getIndexInParent() + 1]
self.updateBraille(obj)
speech.speak(self.speechGenerator.generateSpeech(obj, alreadyFocused=True))
return
default.Script.onExpandedChanged(self, event)
|
h4ck3rm1k3/orca-sonar
|
src/orca/scripts/apps/pidgin/script.py
|
Python
|
lgpl-2.1
| 7,523
|
[
"ORCA"
] |
fe415c32bbc403ee88e8883dee7bdb96c91f93599a78d17f5b600216a7cd2909
|
import argparse
import json
import os
import pandas as pd
import requests
def get_parser():
parser = argparse.ArgumentParser(description=__doc__)
input_group = parser.add_mutually_exclusive_group(required=True)
input_group.add_argument('-i', "--infile", action='store',
help="""Path to .txt file containing accessions of experiments to process. The txt file must contain two columns with 1 header row, one labeled 'accession' and another labeled 'align_only'. It can optionally include 'custom_message' and 'custom_crop_length' columns.""")
parser.add_argument('-o', '--outputpath', action='store', default='',
help="""Optional path to output folder. Defaults to current path.""")
parser.add_argument('-g', '--gcpath', action='store', default='',
help="""Optional path where the input.json will be uploaded to the Google Cloud instance. Only affects the list of caper commands that is generated.""")
parser.add_argument('--wdl', action='store', default=False,
help="""Path to .wdl file.""")
parser.add_argument('-s', '--server', action='store', default='https://www.encodeproject.org',
help="""Optional specification of server using the full URL. Defaults to production server.""")
parser.add_argument('--use-s3-uris', action='store_true', default=False,
help="""Optional flag to use s3_uri links. Otherwise, defaults to using @@download links from the ENCODE portal.""")
input_group.add_argument("--accessions", action='store',
help="""List of accessions separated by commas.""")
parser.add_argument('--align-only', action='store', default=False,
help="""Pipeline will end after alignments step if True.""")
parser.add_argument('--custom-message', action='store',
help="""An additional custom string to be appended to the messages in the caper submit commands.""")
parser.add_argument('--caper-commands-file-message', action='store', default='',
help="""An additional custom string to be appended to the file name of the caper submit commands.""")
parser.add_argument('--custom-crop-length', action='store', default='',
help="""Custom value for the crop length.""")
parser.add_argument('--multiple-controls', action='store', default='',
help="""Pipeline will assume multiple controls should be used.""")
parser.add_argument('--force-se', action='store', default='',
help="""Pipeline will map as single-ended regardless of input fastqs.""")
parser.add_argument('--redacted', action='store', default='',
help="""Control experiment has redacted alignments.""")
return parser
def check_path_trailing_slash(path):
if path.endswith('/'):
return path.rstrip('/')
else:
return path
def build_experiment_report_query(experiment_list, server):
joined_list = '&accession='.join(experiment_list)
return server + '/report/?type=Experiment' + \
f'&accession={joined_list}' + \
'&field=@id' + \
'&field=accession' + \
'&field=assay_title' + \
'&field=control_type' + \
'&field=possible_controls' + \
'&field=replicates.antibody.targets' + \
'&field=files.s3_uri' + \
'&field=files.href' + \
'&field=replicates.library.biosample.organism.scientific_name' + \
'&limit=all' + \
'&format=json'
def build_file_report_query(experiment_list, server, file_format):
joined_list = '&dataset='.join(experiment_list)
if file_format == 'fastq':
format_parameter = '&file_format=fastq'
award_parameter = ''
output_type_parameter = '&output_type=reads'
elif file_format == 'bam':
format_parameter = '&file_format=bam'
award_parameter = '&award.rfa=ENCODE4'
output_type_parameter = '&output_type=alignments&output_type=redacted alignments'
return server + '/report/?type=File' + \
f'&dataset={joined_list}' + \
'&status=released' + \
'&status=in+progress' + \
award_parameter + \
'&assembly!=hg19' + \
'&assembly!=mm9' + \
format_parameter + \
output_type_parameter + \
'&field=@id' + \
'&field=dataset' + \
'&field=file_format' + \
'&field=biological_replicates' + \
'&field=paired_end' + \
'&field=paired_with' + \
'&field=run_type' + \
'&field=mapped_run_type' + \
'&field=read_length' + \
'&field=mapped_read_length' + \
'&field=status' + \
'&field=s3_uri' + \
'&field=href' + \
'&field=replicate.status' + \
'&limit=all' + \
'&format=json'
def parse_infile(infile):
try:
infile_df = pd.read_csv(infile, '\t')
infile_df['align_only'].astype('bool')
infile_df['multiple_controls'].astype('bool')
infile_df['force_se'].astype('bool')
return infile_df
except FileNotFoundError as e:
print(e)
exit()
except KeyError:
print('Missing required align_only column in input file.')
exit()
def strs2bool(strings):
out = []
for string in strings:
if string == "True":
out.append(True)
elif string == "False":
out.append(False)
return out
def get_data_from_portal(infile_df, server, keypair, link_prefix, link_src):
# Retrieve experiment report view json with necessary fields and store as DataFrame.
experiment_input_df = pd.DataFrame()
experiment_accessions = infile_df['accession'].tolist()
# Chunk the list to avoid sending queries longer than the character limit
chunked_experiment_accessions = [experiment_accessions[x:x+100] for x in range(0, len(experiment_accessions), 100)]
for chunk in chunked_experiment_accessions:
experiment_report = requests.get(
build_experiment_report_query(chunk, server),
auth=keypair,
headers={'content-type': 'application/json'})
experiment_report_json = json.loads(experiment_report.text)
experiment_df_temp = pd.json_normalize(experiment_report_json['@graph'])
experiment_input_df = experiment_input_df.append(experiment_df_temp, ignore_index=True, sort=True)
experiment_input_df.sort_values(by=['accession'], inplace=True)
# Fill in columns that may be missing
if 'control_type' not in experiment_input_df:
experiment_input_df['control_type'] = None
# Retrieve list of wildtype controls
wildtype_ctl_query_res = requests.get(
link_prefix+'/search/?type=Experiment&assay_title=Control+ChIP-seq&replicates.library.biosample.applied_modifications%21=%2A&limit=all',
auth=keypair,
headers={'content-type': 'application/json'})
wildtype_ctl_ids = [ctl['@id'] for ctl in json.loads(wildtype_ctl_query_res.text)['@graph']]
# Gather list of controls from the list of experiments to query for their files.
datasets_to_retrieve = experiment_input_df.get('@id').tolist()
for ctl in experiment_input_df.get('possible_controls'):
for item in ctl:
datasets_to_retrieve.append(item['@id'])
# Retrieve file report view json with necessary fields and store as DataFrame.
file_input_df = pd.DataFrame()
chunked_dataset_accessions = [datasets_to_retrieve[x:x+100] for x in range(0, len(datasets_to_retrieve), 100)]
for chunk in chunked_dataset_accessions:
for file_format in ['fastq', 'bam']:
file_report = requests.get(
build_file_report_query(chunk, server, file_format),
auth=keypair,
headers={'content-type': 'application/json'})
file_report_json = json.loads(file_report.text)
file_df_temp = pd.json_normalize(file_report_json['@graph'])
file_input_df = file_input_df.append(file_df_temp, ignore_index=True, sort=True)
file_input_df.set_index(link_src, inplace=True)
file_df_required_fields = ['paired_end', 'paired_with', 'mapped_run_type']
for field in file_df_required_fields:
if field not in file_input_df:
file_input_df[field] = None
file_input_df['biorep_scalar'] = [x[0] for x in file_input_df['biological_replicates']]
return experiment_input_df, wildtype_ctl_ids, file_input_df
# Simple function to count the number of replicates per input.json
def count_reps(row):
x = 0
for value in row:
if None in value or value == []:
continue
else:
x = x+1
return x
def main():
keypair = (os.environ.get('DCC_API_KEY'), os.environ.get('DCC_SECRET_KEY'))
parser = get_parser()
args = parser.parse_args()
allowed_statuses = ['released', 'in progress']
output_path = check_path_trailing_slash(args.outputpath)
wdl_path = args.wdl
gc_path = args.gcpath
caper_commands_file_message = args.caper_commands_file_message
server = check_path_trailing_slash(args.server)
use_s3 = args.use_s3_uris
if use_s3:
link_prefix = ''
link_src = 's3_uri'
else:
link_prefix = server
link_src = 'href'
if args.infile:
infile_df = parse_infile(args.infile)
infile_df.sort_values(by=['accession'], inplace=True)
infile_df.drop_duplicates(subset=['accession'],inplace=True)
elif args.accessions:
accession_list = args.accessions.split(',')
align_only = strs2bool(args.align_only.split(','))
message = args.custom_message.split(',')
custom_crop_length = args.custom_crop_length.split(',')
multiple_controls = strs2bool(args.multiple_controls.split(','))
force_se = strs2bool(args.force_se.split(','))
redacted = strs2bool(args.redacted.split(','))
infile_df = pd.DataFrame({
'accession': accession_list,
'align_only': align_only,
'custom_message': message,
'crop_length': custom_crop_length,
'multiple_controls': multiple_controls,
'force_se': force_se,
'redacted': redacted
})
infile_df.sort_values(by=['accession'], inplace=True)
use_custom_crop_length_flag = False
if 'custom_crop_length' in infile_df:
use_custom_crop_length_flag = True
custom_crop_lengths = infile_df['custom_crop_length'].tolist()
else:
custom_crop_lengths = [None] * len(infile_df['accession'])
force_se_flag = False
if 'force_se' in infile_df:
force_se_flag = True
force_ses = infile_df['force_se'].tolist()
else:
force_ses = False * len(infile_df['accession'])
if 'redacted' in infile_df:
redacted_flags = [x if x is True else None for x in infile_df['redacted'].tolist()]
else:
redacted_flags = [None] * len(infile_df['accession'])
if 'multiple_controls' in infile_df:
multiple_controls = infile_df['multiple_controls'].tolist()
else:
multiple_controls = False * len(infile_df['accession'])
# Arrays to store lists of potential errors.
ERROR_no_fastqs = []
ERROR_missing_fastq_pairs = []
ERROR_control_error_detected = []
ERROR_not_matching_endedness = []
# Fetch data from the ENCODE portal
experiment_input_df, wildtype_ctl_ids, file_input_df = get_data_from_portal(infile_df, server, keypair, link_prefix, link_src)
# Create output_df to store all data for the final input.json files.
output_df = pd.DataFrame()
output_df['chip.title'] = infile_df['accession']
output_df['chip.align_only'] = infile_df['align_only']
if 'custom_message' in infile_df:
output_df['custom_message'] = infile_df['custom_message']
output_df['custom_message'].fillna('', inplace=True)
else:
output_df['custom_message'] = ''
output_df.set_index('chip.title', inplace=True, drop=False)
output_df['assay_title'] = experiment_input_df['assay_title'].to_list()
'''
Experiment sorting section
'''
# Assign blacklist(s) and genome reference file.
blacklist = []
blacklist2 = []
genome_tsv = []
chrom_sizes = []
ref_fa = []
bwa_index = []
for assay, replicates in zip(experiment_input_df.get('assay_title'), experiment_input_df.get('replicates')):
organism = set()
for rep in replicates:
organism.add(rep['library']['biosample']['organism']['scientific_name'])
if ''.join(organism) == 'Homo sapiens':
genome_tsv.append('https://storage.googleapis.com/encode-pipeline-genome-data/genome_tsv/v3/hg38.tsv')
chrom_sizes.append('https://www.encodeproject.org/files/GRCh38_EBV.chrom.sizes/@@download/GRCh38_EBV.chrom.sizes.tsv')
ref_fa.append('https://www.encodeproject.org/files/GRCh38_no_alt_analysis_set_GCA_000001405.15/@@download/GRCh38_no_alt_analysis_set_GCA_000001405.15.fasta.gz')
if assay in ['Mint-ChIP-seq', 'Control Mint-ChIP-seq']:
blacklist.append('https://www.encodeproject.org/files/ENCFF356LFX/@@download/ENCFF356LFX.bed.gz')
blacklist2.append('https://www.encodeproject.org/files/ENCFF023CZC/@@download/ENCFF023CZC.bed.gz')
bwa_index.append('https://www.encodeproject.org/files/ENCFF643CGH/@@download/ENCFF643CGH.tar.gz')
elif ''.join(organism) == 'Mus musculus':
genome_tsv.append('https://storage.googleapis.com/encode-pipeline-genome-data/genome_tsv/v3/mm10.tsv')
chrom_sizes.append('https://www.encodeproject.org/files/mm10_no_alt.chrom.sizes/@@download/mm10_no_alt.chrom.sizes.tsv')
ref_fa.append('https://www.encodeproject.org/files/mm10_no_alt_analysis_set_ENCODE/@@download/mm10_no_alt_analysis_set_ENCODE.fasta.gz')
if assay in ['Mint-ChIP-seq', 'Control Mint-ChIP-seq']:
blacklist.append(None)
blacklist2.append(None)
bwa_index.append(None)
output_df['chip.blacklist'] = blacklist
output_df['chip.blacklist2'] = blacklist2
output_df['chip.genome_tsv'] = genome_tsv
output_df['chip.chrsz'] = chrom_sizes
output_df['chip.ref_fa'] = ref_fa
output_df['chip.bwa_idx_tar'] = bwa_index
# Determine pipeline types and bwa related properties for Mint
pipeline_types = []
aligners = []
use_bwa_mem_for_pes = []
bwa_mem_read_len_limits = []
for assay, ctl_type in zip(experiment_input_df.get('assay_title'), experiment_input_df.get('control_type')):
if pd.notna(ctl_type) and assay == 'Control Mint-ChIP-seq':
pipeline_types.append('control')
aligners.append('bwa')
use_bwa_mem_for_pes.append(True)
bwa_mem_read_len_limits.append(0)
elif assay == 'Mint-ChIP-seq':
pipeline_types.append('histone')
aligners.append('bwa')
use_bwa_mem_for_pes.append(True)
bwa_mem_read_len_limits.append(0)
# Arrays which will be added to the master Dataframe for all experiments
crop_length = []
fastqs_by_rep_R1_master = {
1: [], 2: [],
3: [], 4: [],
5: [], 6: [],
7: [], 8: [],
9: [], 10: []
}
fastqs_by_rep_R2_master = {
1: [], 2: [],
3: [], 4: [],
5: [], 6: [],
7: [], 8: [],
9: [], 10: []
}
# Store experiment read lengths and run types for comparison against controls
experiment_min_read_lengths = []
experiment_run_types = []
for experiment_files, experiment_id, custom_crop_length, map_as_SE in zip(
experiment_input_df['files'],
experiment_input_df['accession'],
custom_crop_lengths,
force_ses
):
# Arrays for files within each experiment
fastqs_by_rep_R1 = {
1: [], 2: [],
3: [], 4: [],
5: [], 6: [],
7: [], 8: [],
9: [], 10: []
}
fastqs_by_rep_R2 = {
1: [], 2: [],
3: [], 4: [],
5: [], 6: [],
7: [], 8: [],
9: [], 10: []
}
experiment_read_lengths = []
run_types = set()
for file in experiment_files:
link = file[link_src]
if link.endswith('fastq.gz') \
and link in file_input_df.index \
and file_input_df.loc[link].at['status'] in allowed_statuses \
and file_input_df.loc[link].at['replicate.status'] in allowed_statuses:
if file_input_df.loc[link].at['paired_end'] == '1':
# Collect read length. Only consider read 1 for Mint
experiment_read_lengths.append(file_input_df.loc[link].at['read_length'])
pair = file_input_df.loc[link].at['paired_with']
for rep_num in fastqs_by_rep_R1:
if file_input_df.loc[link].at['biorep_scalar'] == rep_num:
fastqs_by_rep_R1[rep_num].append(link_prefix + link)
if not map_as_SE:
try:
fastqs_by_rep_R2[rep_num].append(link_prefix + file_input_df[file_input_df['@id'] == pair].index.values[0])
except IndexError:
print(f'ERROR: Metadata error (missing expected read 2 fastq) in {experiment_id}.')
ERROR_missing_fastq_pairs.append(experiment_id)
elif pd.isnull(file_input_df.loc[link].at['paired_end']):
for rep_num in fastqs_by_rep_R1:
if file_input_df.loc[link].at['biorep_scalar'] == rep_num:
fastqs_by_rep_R1[rep_num].append(link_prefix + link)
# Collect run_types
run_types.add(file_input_df.loc[link].at['run_type'])
# Record error if no fastqs for found for any replicate.
if all(val == [] for val in fastqs_by_rep_R1.values()):
print(f'ERROR: no fastqs were found for {experiment_id}.')
ERROR_no_fastqs.append(experiment_id)
# Fix ordering of reps to prevent non-consecutive numbering.
for k in list(range(1, 11)):
if fastqs_by_rep_R1[k] == []:
for i in list(range(k+1, 11)):
if fastqs_by_rep_R1[i] != []:
fastqs_by_rep_R1[k] = fastqs_by_rep_R1[i]
fastqs_by_rep_R2[k] = fastqs_by_rep_R2[i]
fastqs_by_rep_R1[i] = []
fastqs_by_rep_R2[i] = []
break
else:
continue
# Add the replicates to the master list.
for rep_num in fastqs_by_rep_R1_master:
fastqs_by_rep_R1_master[rep_num].append(fastqs_by_rep_R1[rep_num])
fastqs_by_rep_R2_master[rep_num].append(fastqs_by_rep_R2[rep_num])
if use_custom_crop_length_flag:
experiment_min_read_lengths.append(custom_crop_length)
else:
experiment_min_read_lengths.append(min(experiment_read_lengths))
if 'single-ended' in run_types:
experiment_run_types.append('single-ended')
elif next(iter(run_types)) == 'paired-ended':
experiment_run_types.append('paired-ended')
'''
Control sorting section
'''
ctl_nodup_bams = []
final_run_types = []
for controls, experiment, pipeline_type, experiment_run_type, replicates, experiment_read_length, use_multiple_controls, map_as_SE in zip(
experiment_input_df['possible_controls'],
experiment_input_df['accession'],
pipeline_types,
experiment_run_types,
experiment_input_df['replicates'],
experiment_min_read_lengths,
multiple_controls,
force_ses
):
try:
if pipeline_type == 'control':
ctl_nodup_bams.append(None)
final_run_types.append(False if experiment_run_type == 'single-ended' or map_as_SE else True)
crop_length.append(experiment_read_length)
elif controls == []:
print(f'ERROR: No controls in possible_controls for experiment {experiment}.')
raise Warning
else:
if len(controls) > 1 and not use_multiple_controls:
# Only check TF ChIP if the antibody is eGFP; otherwise throw
# an error if there are more than one control specified.
antibody = set()
for rep in replicates:
if 'antibody' in rep:
for target in rep['antibody']['targets']:
antibody.add(target)
else:
print(f'ERROR: Replicate in {experiment} is missing metadata about the antibody used.')
raise Warning
if ''.join(antibody) == '/targets/eGFP-avictoria/' and pipeline_type == 'tf':
for ctl in controls:
if ctl['@id'] in wildtype_ctl_ids:
controls = [ctl]
break
if len(controls) == 0:
print(f'ERROR: Could not locate wildtype control for {experiment}.')
raise Warning
else:
print(f'ERROR: Too many controls for experiment {experiment}.')
raise Warning
control_run_types = set()
control_read_lengths = list()
for control in controls:
# Identify run_types in the control(s)
control_run_types.update(file_input_df[
(file_input_df['dataset'] == control['@id']) &
(file_input_df['file_format'] == 'fastq')
].get('run_type'))
# Collect read_lengths in the control(s)
control_read_lengths.extend(file_input_df[
(file_input_df['dataset'] == control['@id']) &
(file_input_df['file_format'] == 'fastq') &
(file_input_df['paired_end'] == '1')
].get('read_length').tolist())
# Determine endedness based on the run types of the control(s) and experiment.
if 'single-ended' in control_run_types or experiment_run_type == 'single-ended' or map_as_SE:
final_run_type = 'single-ended'
final_run_types.append(False)
elif next(iter(control_run_types)) == 'paired-ended' and experiment_run_type == 'paired-ended':
final_run_type = 'paired-ended'
final_run_types.append(True)
else:
ERROR_not_matching_endedness.append(experiment)
print(f'ERROR: Could not determine correct endedness for experiment {experiment} and its control.')
raise Warning
# Select the minimum read length out of the files in the experiment
# and its control, and store the value.
combined_minimum_read_length = min([experiment_read_length] + control_read_lengths)
if use_custom_crop_length_flag:
crop_length.append(experiment_read_length)
else:
crop_length.append(combined_minimum_read_length)
# Gather control bams based on matching read_length
ctl_nodup_temp_collector = []
for control in controls:
matching_bam_found = False
for rep_num in list(range(1, 11)):
ctl_search = file_input_df[
(file_input_df['dataset'] == control['@id']) &
(file_input_df['biorep_scalar'] == rep_num) &
(file_input_df['file_format'] == 'bam') &
(file_input_df['mapped_run_type'] == final_run_type) &
(file_input_df['mapped_read_length'] <= combined_minimum_read_length + 2) &
(file_input_df['mapped_read_length'] >= combined_minimum_read_length - 2)
]
if not ctl_search.empty:
ctl_nodup_temp_collector.append(link_prefix + ctl_search.index.values[0])
matching_bam_found = True
# If the experiment has multiple controls that should be used,
# we expect each control to have at least one matching bam. Otherwise, treat it as an error.
if not matching_bam_found:
print(f'ERROR: no bams found in control of {experiment}.')
ERROR_control_error_detected.append(experiment)
if not ctl_nodup_temp_collector:
print(f'ERROR: no bams found for {experiment}.')
ctl_nodup_bams.append(None)
ERROR_control_error_detected.append(experiment)
elif None in ctl_nodup_temp_collector:
ctl_nodup_bams.append(None)
ERROR_control_error_detected.append(experiment)
else:
ctl_nodup_bams.append(ctl_nodup_temp_collector)
except Warning:
ERROR_control_error_detected.append(experiment)
ctl_nodup_bams.append(None)
final_run_types.append(None)
crop_length.append(None)
'''
Assign all remaining missing properties in the master dataframe.
'''
output_df['chip.paired_end'] = final_run_types
output_df['chip.crop_length'] = [int(x) if x is not None else '' for x in crop_length]
output_df['chip.ctl_nodup_bams'] = ctl_nodup_bams
output_df['chip.aligner'] = aligners
output_df['chip.use_bwa_mem_for_pe'] = use_bwa_mem_for_pes
output_df['chip.bwa_mem_read_len_limit'] = bwa_mem_read_len_limits
output_df['chip.pipeline_type'] = pipeline_types
output_df['chip.always_use_pooled_ctl'] = [True if x != 'control' else None for x in output_df['chip.pipeline_type']]
output_df['chip.redact_nodup_bam'] = redacted_flags
# Populate the lists of fastqs.
for val in list(range(1, 11)):
output_df[f'chip.fastqs_rep{val}_R1'] = fastqs_by_rep_R1_master[val]
output_df[f'chip.fastqs_rep{val}_R2'] = fastqs_by_rep_R2_master[val]
R1_cols = [col for col in output_df.columns if col.endswith('_R1')]
output_df['number_of_replicates'] = output_df[R1_cols].apply(lambda x: count_reps(x), axis=1)
# Build descriptions using the other parameters.
description_strings = []
for accession, crop_length, is_paired_end, pipeline_type, align_only, num_reps, assay in zip(
output_df['chip.title'],
output_df['chip.crop_length'],
output_df['chip.paired_end'],
output_df['chip.pipeline_type'],
output_df['chip.align_only'],
output_df['number_of_replicates'],
output_df['assay_title']
):
description_strings.append('{}_{}_no_crop_{}rep_{}_{}'.format(
accession,
('PE' if is_paired_end else 'SE'),
num_reps,
pipeline_type,
('alignonly' if align_only else 'peakcall')
))
output_df['chip.description'] = description_strings
# Clean up the pipeline_type data - flag cases where controls are not 'align_only', then submit all 'controls' as 'tf'
ERROR_controls_not_align_only = output_df[
(output_df['chip.pipeline_type'] == 'control') &
(output_df['chip.align_only'] == False)].get('chip.title').tolist()
for expt in ERROR_controls_not_align_only:
print(f'ERROR: {expt} is a control but was not align_only.')
# Remove any experiments with errors from the table.
output_df.drop(
ERROR_control_error_detected +
ERROR_no_fastqs +
ERROR_missing_fastq_pairs +
ERROR_not_matching_endedness +
ERROR_controls_not_align_only,
inplace=True)
# Output rows of dataframes as input json files.
output_dict = output_df.to_dict('index')
command_output = ''
# Order for parameters in the input.jsons
desired_key_order = [
'custom_message',
'assay_title',
'chip.title',
'chip.description',
'chip.pipeline_type',
'chip.align_only',
'chip.paired_end',
'chip.genome_tsv',
'chip.ref_fa',
'chip.bwa_idx_tar',
'chip.chrsz',
'chip.blacklist',
'chip.blacklist2',
'chip.ctl_nodup_bams',
'chip.redact_nodup_bam',
'chip.always_use_pooled_ctl',
'chip.aligner',
'chip.use_bwa_mem_for_pe',
'chip.bwa_mem_read_len_limit'
]
for val in list(range(1, 11)):
desired_key_order.extend([f'chip.fastqs_rep{val}_R1', f'chip.fastqs_rep{val}_R2'])
for experiment in output_dict:
output_dict[experiment] = {key: output_dict[experiment][key] for key in desired_key_order}
# Build strings of caper commands.
command_output = command_output + 'caper submit {} -i {}{} -s {}{}\nsleep 1\n'.format(
wdl_path,
(gc_path + '/' if not gc_path.endswith('/') else gc_path),
output_dict[experiment]['chip.description'] + '.json',
output_dict[experiment]['chip.description'],
('_' + output_dict[experiment]['custom_message'] if output_dict[experiment]['custom_message'] != '' else ''))
# Remove empty properties and the custom message property.
# All "read 2" properties should be removed if the experiment will be run as single-ended.
if output_dict[experiment]['chip.paired_end'] is False:
for prop in [x for x in list(output_dict[experiment]) if x.endswith('_R2')]:
output_dict[experiment].pop(prop)
for prop in list(output_dict[experiment]):
if output_dict[experiment][prop] in (None, [], '') or (type(output_dict[experiment][prop]) == list and None in output_dict[experiment][prop]):
output_dict[experiment].pop(prop)
output_dict[experiment].pop('custom_message')
output_dict[experiment].pop('assay_title')
file_name = f'{output_path}{"/" if output_path else ""}{output_dict[experiment]["chip.description"]}.json'
with open(file_name, 'w') as output_file:
output_file.write(json.dumps(output_dict[experiment], indent=4))
# Output .txt with caper commands.
if command_output != '':
with open(f'{output_path}{"/" if output_path else ""}caper_submit{"_" if caper_commands_file_message else ""}{caper_commands_file_message}.sh', 'w') as command_output_file:
command_output_file.write(command_output)
if __name__ == '__main__':
main()
|
ENCODE-DCC/pyencoded-tools
|
pipeline_input_scripts/generate_mint_chip_input_json.py
|
Python
|
mit
| 31,462
|
[
"BWA"
] |
e0f15c5a0ff4a0b6e2c38453b3836af9a8e7650cb8d7aaf3d60645366ec4f2e9
|
# Jackie's Map
# CMDR Jackie Silver, DISC
# Kay Johnston 2017 / 3303
# Thanks are due to everyone who's collected data for the various source lists.
# Special thanks to Lucienn and Anthor for beacon data and edsm landmarks.
version = '3t'
# Standard Python imports. Might need to change PIL to pillow on some versions of Python?
from tkinter import *
import PIL
from PIL import ImageTk, Image, ImageDraw, ImageFont
import math
# From Alot's excellent edts suite.
import pgnames
# Wants, needs, options:
############
# Sort out mousewheel zoom bindings on Linux. Should be <Button-4> and <Button-5> but ought to rework the whole event handler doodah.
# Maybe something to show the sphere of (presumed) Thargoid hyperspace interdictions. Do we even know its extent?
# Does the hyperdiction sphere intersect with the UA sphere?
# UPs supposed to be at "Ammonia planets in the Pleiades sector" and a number of convoys near Sol. I've added known examples, but can't verify yet.
# Add in proc-gen nebulae - at least the major ones like Colonia. Serious effort needed to run all them down though. Can I get this from edsm?
# Can we increase speed by drawing only those objects which are inside the canvas area?
# Add some kind of scale? And maybe a compass pointer towards the Core? Can lump this into Misc.
# Add rough indicator for hyperdiction sphere extent.
# Check Imperial Bubble extent and centrepoint. Perhaps add something to show Agri (terraformed ELW) and High Tech radii.
# Add category indicators for tourist POI.
# An "approximate density" function; would need to plug in the spiral approximation for the galaxy's shape and do a bunch of other stuff.
# Continue to update the various data files.
# Some better way of displaying the details when there are many POI in the same system. Scrolling display box?
# Need to read shipyard-present status for POIs and add an indicator for that (possibly, red circle outline?)
# Want to separate out drawing of asteroid bases, megaships, alien stuff?
# Player factions list should include the new Colonia factions.
# Need to add the 'Conda graveyard from Distant Stars.
# Other
# I should probably use setattr, and I could certainly lump all the different classes together into one uberclass.
# Could farm the cross-drawing bits out to separate method like the hats.
# Changelog
############
# 3i changes:
# Added scaling buttons, sector name check with Alot's edts, display of galmap style coordinates.
# Added display of body and latitude + longitude for POI.
# 3j changes:
# Added permit-locked HA stars to POI list and appropriate display. Added distance display. Added rare goods. Minor tweaks.
# Added some known UP locations. Corrected error with NGC 752 / IC 1848.
# 3k changes: (released version)
# Added pulsars. Updated tourist file. Changed mouseover to take account of scaling and only return objects which are drawn.
# Moved display of rare goods distance and tonnage into mouseover. Removed redundant indicator text and associated button.
# 3l changes:
# Added player factions. Updated tourist file, POI file.
# 3m changes:
# General tidying up, some UI changes. Updated data. Added toggle for Reorte-Riedquat line. Finished first pass of checking player factions.
# Changed player factions sheet to include a validity option, so details for some are loaded but not used.
# Reworked to show central nebulae and stars of ha sectors; the list of central stars is incomplete, and needs work.
# 3n changes:
# Moved RR line and UA sphere into a single Misc category. Added rough indicator of the Bubble's size into the same category.
# Added PRE logistics and Colonia stations to POIs. Added possible boundary lines for Guardian sites towards Regor. Added EAFOTS box to misc.
# Added distance indicators for tourist destinations. Improved drawing of sector fills.
# 3o changes: (released version)
# Adding non-sector HA star clusters gleaned from edsm (as the galmap search is a little strange);
# Changed handling of sectors from original list which don't exist as sectors. Introduced category for sectors which are nominally clusters but aren't.
# Added option to display the full list of individual stars known to edsm. Very interesting to see.
# 3p changes:
# Added search and highlight/filter functions. Added suppression corridor boundaries to misc toggle. Enabled PG sector name finding in search.
# Enabled filtering by sector including PG sectors. Added output .csv of filtered stars. Draws PG sector boundaries if searched.
# ...and disabled suppression corridor boundaries again.
# 3q / 3q2 changes:
# Improved name matching on HA sector filtering. Cleaned up line drawing stuff a bit. Added many Grauniad sites to POI list. Updated tourist sites.
# Updated edsm star list. Added new Colonia systems to POI list.
# 3r changes:
# Added asteroid bases, megaships into POI list.
# 3r2 changes:
# Added barnacles and many more POI. Removed EAFOTS box (obsolete post-Zurara), Guardians lines (toggling POI gives a good enough idea.)
# Removed Bright Star progress line. Left RR line in for nostalgia purposes. Added shipyard data for asteroid bases and landmarks (incomplete.)
# That still needs work, it just draws them in a slightly different colour; should be more obvious.
# 3r2,3r3,+ changes:
# Added Generation Ship bubble extent. Added various extra landmarks, updated tourist beacons &c.
# 3s,3s2,3s3 changes:
# Started integrating edsm landmarks. Added option to toggle drawing all POI or all POI that aren't landmarks/jumponium, to cut down clutter.
# Added a load of new Thargoid sites. Added more megaships.
# Updated list of Guardians ruins and adopted Canonn's numbering system.
# 3t changes:
# Added many new POI, megaships, tourist etc. etc.
# Added brain trees.
class App():
def __init__(self,master):
# Create a frame for the controls.
self.control_frame = Frame(master)
self.control_frame.pack()
# Defaults for offsets and scaling.
self.x_offset = 0
self.y_offset = 0
self.z_offset = 0
self.scaling = 2
# Search defaults.
self.search_x = 0
self.search_y = 0
self.search_performed = False
self.search_target = ''
self.highlight_target = ''
self.search_is_pg_sector = False
self.search_is_pg_x = 0
self.search_is_pg_y = 0
self.search_is_pg_z = 0 # Won't be needed, I suppose.
# Filtering default.
self.deferred = [] # Holds all stars that match the filter *and* match filtering by sector.
self.deferred_alpha = [] # Holds all stars that match the filter. (This is the first pass done, hence alpha. Go with it.)
# Create entry boxes for the controls.
self.x_co_box = Entry_Box(self.control_frame,'X:',str(self.x_offset),2,9)
self.y_co_box = Entry_Box(self.control_frame,'Y:',str(self.y_offset),2,9)
self.z_co_box = Entry_Box(self.control_frame,'Z:',str(self.z_offset),2,9)
self.scaling_box = Entry_Box(self.control_frame,'Scaling:',str(self.scaling),7,5)
# Bind the control entry boxes to the automatic update.
self.x_co_box.entry.bind('<Return>', self.auto_calculate)
self.y_co_box.entry.bind('<Return>', self.auto_calculate)
self.z_co_box.entry.bind('<Return>', self.auto_calculate)
self.scaling_box.entry.bind('<Return>', self.auto_calculate)
# Create a "save png" button.
self.save_button = Button(self.control_frame, text = 'Output', command = self.save, padx = 1)
self.save_button.pack(side = LEFT)
# Create buttons for moving z levels.
self.z_up_button = Button(self.control_frame, text = 'Z+', command = self.z_up, padx = 1)
self.z_up_button.pack(side = LEFT)
self.z_down_button = Button(self.control_frame, text = 'Z-', command = self.z_down, padx = 1)
self.z_down_button.pack(side = LEFT)
# Create buttons for changing scaling.
self.s_up_button = Button(self.control_frame, text = 'Zm Out', command = self.s_up, padx = 1)
self.s_up_button.pack(side = LEFT)
self.s_down_button = Button(self.control_frame, text = 'Zm In', command = self.s_down, padx = 1)
self.s_down_button.pack(side = LEFT)
# Create a frame to hold toggle buttons.
self.toggle_frame = Frame(master)
self.toggle_frame.pack()
# Create toggle buttons.
self.draw_crosses = IntVar()
self.draw_crosses.set(0)
self.toggle_crosses = Checkbutton(self.toggle_frame, text = 'Crosses', variable = self.draw_crosses, command = self.update_image)
self.toggle_crosses.pack(side = LEFT)
self.draw_fills = IntVar()
self.toggle_fills = Checkbutton(self.toggle_frame, text = 'Fills', variable = self.draw_fills, command = self.update_image)
self.toggle_fills.pack(side = LEFT)
self.draw_names = IntVar()
self.draw_names.set(1)
self.toggle_names = Checkbutton(self.toggle_frame, text = 'Names', variable = self.draw_names, command = self.update_image)
self.toggle_names.pack(side = LEFT)
self.draw_indicators = IntVar()
self.draw_indicators.set(1)
self.toggle_indicators = Checkbutton(self.toggle_frame, text = 'Indics', variable = self.draw_indicators, command = self.update_image)
self.toggle_indicators.pack(side = LEFT)
self.draw_poi = IntVar()
self.draw_poi.set(0)
self.toggle_poi = Checkbutton(self.toggle_frame, text = 'POI', variable = self.draw_poi, command = self.update_image)
self.toggle_poi.pack(side = LEFT)
self.draw_landmark = IntVar()
self.draw_landmark.set(1)
self.toggle_landmark = Checkbutton(self.toggle_frame, text = 'POI-L', variable = self.draw_landmark, command = self.update_image)
self.toggle_landmark.pack(side = LEFT)
self.draw_tourist = IntVar()
self.draw_tourist.set(0)
self.toggle_tourist = Checkbutton(self.toggle_frame, text = 'Tour', variable = self.draw_tourist, command = self.update_image)
self.toggle_tourist.pack(side = LEFT)
self.draw_rares = IntVar()
self.draw_rares.set(0)
self.toggle_rares = Checkbutton(self.toggle_frame, text = 'Rares', variable = self.draw_rares, command = self.update_image)
self.toggle_rares.pack(side = LEFT)
self.draw_pulsars = IntVar()
self.draw_pulsars.set(0)
self.toggle_pulsars = Checkbutton(self.toggle_frame, text = 'PSR', variable = self.draw_pulsars, command = self.update_image)
self.toggle_pulsars.pack(side = LEFT)
self.draw_player = IntVar()
self.draw_player.set(0)
self.toggle_players = Checkbutton(self.toggle_frame, text = 'Plyr', variable = self.draw_player, command = self.update_image)
self.toggle_players.pack(side = LEFT)
self.draw_misc = IntVar()
self.draw_misc.set(0)
self.toggle_misc = Checkbutton(self.toggle_frame, text = 'Misc', variable = self.draw_misc, command = self.update_image)
self.toggle_misc.pack(side = LEFT)
self.draw_findiv = IntVar()
self.draw_findiv.set(0)
self.toggle_findiv = Checkbutton(self.toggle_frame, text = 'F!', variable = self.draw_findiv, command = self.update_image)
self.toggle_findiv.pack(side = LEFT)
# Create a frame to hold search and highlight controls.
self.search_frame = Frame(master)
self.search_frame.pack()
# Create highlight - well, filter - and search boxes.
self.highlight_box = Entry_Box(self.search_frame,'Filter','',6,10)
self.filter_by_box = Entry_Box(self.search_frame,'by Sector','',8,10)
self.search_box = Entry_Box(self.search_frame,'Search','',6,10)
# Create a label to show the search result.
self.search_result = StringVar()
self.search_result.set('')
self.search_result_label = Label(self.search_frame, textvariable = self.search_result,width = 32)
self.search_result_label.pack()
# Bind highlight and search to update functions.
self.highlight_box.entry.bind('<Return>', self.auto_calculate)
self.filter_by_box.entry.bind('<Return>', self.auto_calculate)
self.search_box.entry.bind('<Return>', self.update_search_target)
# Create a frame to display data.
self.data_frame = Frame(master)
self.data_frame.pack()
# Create a label for mouse coordinates.
self.data_mouse = StringVar()
mousetext = 'X: --- ly, Y: --- ly, Z: --- ly.'
self.data_mouse.set(mousetext)
self.data_mouse_label = Label(self.data_frame, textvariable = self.data_mouse)
self.data_mouse_label.pack()
# Create a frame to display current sectors.
self.current_sector_frame = Frame(master)
self.current_sector_frame.pack()
# Create a label to display current sectors.
self.current_sectors = StringVar()
self.current_sectors.set('')
self.current_sectors_label = Label(self.current_sector_frame, textvariable = self.current_sectors, width = 82)
self.current_sectors_label.pack()
# Create a frame to display current tourist destinations. (Holds current POI as well to save UI space.)
self.current_tourist_frame = Frame(master)
self.current_tourist_frame.pack()
# Create a label to display current tourist destinations.
self.current_tourists = StringVar()
self.current_tourists.set('')
self.current_tourists_label = Label(self.current_tourist_frame, textvariable = self.current_tourists, width = 82)
self.current_tourists_label.pack()
# Create a frame to show the map.
self.map_frame = Frame(master)
self.map_frame.pack()
# Load in a font.
self.fnt = ImageFont.truetype('Quicksand-Regular.otf', FONTSIZE)
# Create a canvas to show the map image.
self.map_canvas = Canvas(self.map_frame, width = XDIM, height = YDIM)
self.map_canvas.pack()
self.map_canvas_mx = 0
self.map_canvas_my = 0
# Bind mouse actions to the canvas.
self.map_canvas.bind('<Motion>',self.motion)
self.map_canvas.bind('<Button-1>',self.click)
self.map_canvas.bind_all('<MouseWheel>',self.mousewheel_zoom)
# Once everything else is done, call a function to update the display.
self.update_image()
def motion(self,event):
self.map_canvas_mx, self.map_canvas_my = event.x, event.y
# Arcane maneouvres to convert mouse position to map position.
mx_min = self.x_offset - (XDIM / 2 * self.scaling)
my_max = self.y_offset + (YDIM / 2 * self.scaling)
mx_calc = mx_min + (self.map_canvas_mx * self.scaling)
my_calc = my_max - (self.map_canvas_my * self.scaling)
mx_calc = round(mx_calc,1)
my_calc = round(my_calc,1)
# Display the calculated position.
mousetext = 'X: ' + str(mx_calc) + ' ly, Y: ' + str(my_calc)
mousetext += ' ly, Z: ' + str(self.z_offset) + ' ly.'
mousetext += ' (Galmap: ' + str(mx_calc) + ', ' + str(self.z_offset) + ', ' + str(my_calc) + ' )'
# Calculate distance from Sol for the display.
d_from_sol = ((mx_calc ** 2) + (my_calc ** 2) + (self.z_offset ** 2)) ** 0.5
if d_from_sol < 1000:
d_text = str(int(d_from_sol)) + ' ly from Sol.'
else:
d_text = str(round(d_from_sol / 1000,1)) + ' Kylies from Sol.'
mousetext += ' ' + d_text
self.data_mouse.set(mousetext)
# Clear search box.
self.search_result.set('')
# Reworked section; find the single primary ha sector at the current position.
current = single_member_of(mx_calc, my_calc, self.z_offset)
# Use edts to get the sector name at the current position.
vector_alot = pgnames.vector3.Vector3(mx_calc, self.z_offset, my_calc)
# If the coordinates are too far out this can start to return odd values or fail, hence try-except.
try:
sector_alot = pgnames.get_sector_name(vector_alot) # as (x,z,y)
sector_alot = str(sector_alot).upper()
except:
sector_alot = ''
# We have a list of known ha sectors. If edts would give an ha sector, ignore it.
# Ideally I'd like one proc-gen name and all HA names, sorted in order.
if sector_alot not in known_ha_secs:
builttext = sector_alot
else:
builttext = ''
builttext += current
# Clunky.
for sector in ha_sec_list:
if sector.name == builttext:
if sector.a_nebula != '':
builttext += ' - Nebula: ' + sector.a_nebula
if sector.a_star != '':
builttext += ' - Search: ' + sector.a_star
self.current_sectors.set(builttext)
# Work out which tourist POI are at the current position. (2d only)
d_lr = self.draw_poi.get()
d_pr = self.draw_pulsars.get()
d_ra = self.draw_rares.get()
d_to = self.draw_tourist.get()
d_pf = self.draw_player.get()
d_fi = self.draw_findiv.get()
d_ld = self.draw_landmark.get()
ht = self.highlight_target
current = current_tourist(mx_calc, my_calc, self.scaling, d_lr, d_pr, d_ra, d_to, d_pf, d_ld, d_fi,ht,self.deferred)
# For goodness sake move this inside the class!
builttext = ''
for destination in current:
if destination != '':
builttext += destination
builttext += ', '
builttext = builttext.rstrip(', ')
self.current_tourists.set(builttext[:110])
def mousewheel_zoom(self,event):
# Check that this works under Linux (&Mac OS if possible)
# At the moment, this is zooming in or out by one level each time.
# Could change it to take account of the full delta given.
if event.delta > 0:
self.scaling = self.scaling / ZOOMSPEED
else:
self.scaling = self.scaling * ZOOMSPEED
# Update the scaling box to show the new value.
self.scaling_box.entry.delete(0,END)
self.scaling_box.entry.insert(0,self.scaling)
self.update_image()
# Move down z levels when the button is pressed.
def z_down(self):
self.z_offset -= Z_MOVE_RATE
self.z_co_box.entry.delete(0,END)
self.z_co_box.entry.insert(0,self.z_offset)
self.update_image()
# Move up z levels when the button is pressed.
def z_up(self):
self.z_offset += Z_MOVE_RATE
self.z_co_box.entry.delete(0,END)
self.z_co_box.entry.insert(0,self.z_offset)
self.update_image()
# Increase scaling factor (zoom out) when the button is pressed.
def s_up(self):
self.scaling *= S_MOVE_RATE
# Update the scaling box to show the new value.
self.scaling_box.entry.delete(0,END)
self.scaling_box.entry.insert(0,self.scaling)
self.update_image()
# Decrease scaling factor (zoom in) when the button is pressed.
def s_down(self):
self.scaling /= S_MOVE_RATE
# Update the scaling box to show the new value.
self.scaling_box.entry.delete(0,END)
self.scaling_box.entry.insert(0,self.scaling)
self.update_image()
def click(self,event):
self.map_canvas_mx, self.map_canvas_my = event.x, event.y
# Arcane maneouvres to convert mouse position to map position.
mx_min = self.x_offset - (XDIM / 2 * self.scaling)
my_max = self.y_offset + (YDIM / 2 * self.scaling)
mx_calc = mx_min + (self.map_canvas_mx * self.scaling)
my_calc = my_max - (self.map_canvas_my * self.scaling)
# In this case, we are moving to the new position.
# So I'm rounding to 1 dp in the interests of common sense.
mx_calc = round(mx_calc,1)
my_calc = round(my_calc,1)
self.x_offset = mx_calc
self.y_offset = my_calc
self.x_co_box.entry.delete(0,END)
self.x_co_box.entry.insert(0,mx_calc)
self.y_co_box.entry.delete(0,END)
self.y_co_box.entry.insert(0,my_calc)
mousetext = 'X: ' + str(mx_calc) + ' ly, Y: ' + str(my_calc)
mousetext += ' ly, Z: ' + str(self.z_offset) + ' ly.'
self.data_mouse.set(mousetext)
self.update_image()
def update_search_target(self,A):
self.search_is_pg_sector = False
self.search_is_pg_x = 0
self.search_is_pg_y = 0
self.search_is_pg_z = 0
self.search_target = str(self.search_box.entry.get())
stu = self.search_target.upper()
found_rough = False
found_exact = False
rx = 0
ry = 0
rz = 0
rn = ''
ex = 0
ey = 0
ez = 0
en = ''
cx = 0
cy = 0
cz = 0
# A list of lists - we will search through each of these in turn looking for a match.
search_lists = [[findiv_list,'Sys'],[pulsar_list,'Psr'],[tourist_list,'Trst'],[player_list,'Plyr'],[rares_list,'RG'],[poi_list,'POI'],[ha_sec_list,'Sct']]
for sl in search_lists:
# Search through this list.
for f in sl[0]:
if stu == f.name.upper():
ex = f.x
ey = f.y
ez = f.z
en = 'Found: ' + f.name + ' (' + sl[1] + ')'
found_exact = True
elif stu in f.name.upper():
rx = f.x
ry = f.y
rz = f.z
rn = 'Try: ' + f.name + ' (' + sl[1] + ')'
found_rough = True
# If we have an exact match, update the entry boxes.
if found_exact == True:
self.x_co_box.entry.delete(0,END)
self.x_co_box.entry.insert(0,ex)
self.y_co_box.entry.delete(0,END)
self.y_co_box.entry.insert(0,ey)
self.z_co_box.entry.delete(0,END)
self.z_co_box.entry.insert(0,ez)
cx = ex
cy = ey
cz = ez
self.search_result.set(en)
self.search_x = cx
self.search_y = cy
self.search_performed = True
elif found_rough == True:
self.x_co_box.entry.delete(0,END)
self.x_co_box.entry.insert(0,rx)
self.y_co_box.entry.delete(0,END)
self.y_co_box.entry.insert(0,ry)
self.z_co_box.entry.delete(0,END)
self.z_co_box.entry.insert(0,rz)
self.search_result.set(rn)
self.search_x = rx
self.search_y = ry
self.search_performed = True
cx = rx
cy = ry
cz = rz
else:
# Might want to move this to an earlier point, so that rough matches in other names don't take precedence.
try:
pg_sector = pgnames.get_sector(stu,False)
# Offsets as the pg sectors ain't centred on Sol.
wx = (pg_sector.x * 1280) - 65
wy = (pg_sector.z * 1280) - 1065
wz = (pg_sector.y * 1280) - 25
wx += 640
wy += 640
wz += 640
self.x_co_box.entry.delete(0,END)
self.x_co_box.entry.insert(0,wx)
self.y_co_box.entry.delete(0,END)
self.y_co_box.entry.insert(0,wy)
self.z_co_box.entry.delete(0,END)
self.z_co_box.entry.insert(0,wz)
self.search_result.set('Found: ' + pg_sector.name + ' (PG)')
self.search_x = wx
self.search_y = wy
self.search_performed = True
self.search_is_pg_sector = True
self.search_is_pg_x = wx - 640
self.search_is_pg_y = wy + 640
self.search_is_pg_z = wz - 640
cx = wx
cy = wy
cz = wz
except:
self.search_result.set('No match found.')
cx = round(cx,1)
cy = round(cy,1)
cz = round(cz,1)
# Clunky bit, as we need to update the position shown to reflect the new coordinates.
# Display the calculated position.
mousetext = 'X: ' + str(cx) + ' ly, Y: ' + str(cy)
mousetext += ' ly, Z: ' + str(cz) + ' ly.'
mousetext += ' (Galmap: ' + str(cx) + ', ' + str(cz) + ', ' + str(cy) + ' )'
# Calculate distance from Sol for the display.
d_from_sol = ((cx ** 2) + (cy ** 2) + (cz ** 2)) ** 0.5
if d_from_sol < 1000:
d_text = str(int(d_from_sol)) + ' ly from Sol.'
else:
d_text = str(round(d_from_sol / 1000,1)) + ' Kylies from Sol.'
mousetext += ' ' + d_text
self.data_mouse.set(mousetext)
self.auto_calculate(A)
def auto_calculate(self,A):
self.x_offset = float(self.x_co_box.entry.get())
self.y_offset = float(self.y_co_box.entry.get())
self.z_offset = float(self.z_co_box.entry.get())
self.scaling = float(self.scaling_box.entry.get())
self.search_target = str(self.search_box.entry.get()) # Redundant now?
self.highlight_target = str(self.highlight_box.entry.get())
self.filter_by_target = str(self.filter_by_box.entry.get())
# Check to see which stars fall within the highlight and filtering parameters.
dp = self.draw_pulsars.get() # Moved here for speed.
self.deferred_alpha = []
self.deferred = []
# First we refine the list to only those stars whose name fits the filter.
for f in findiv_list:
if self.highlight_target != '':
if self.highlight_target.upper() in f.name.upper():
self.deferred_alpha.append(f)
elif self.highlight_target == '*':
self.deferred_alpha.append(f)
# Look to see if we have a proc-gen sector.
found_pg = False
wname = ''
try:
pg_sector = pgnames.get_sector(self.filter_by_target,False)
# Offsets as the pg sectors ain't centred on Sol.
wname = pg_sector.name
# Gets the south-west-down corner.
wx_swd = (pg_sector.x * 1280) - 65
wy_swd = (pg_sector.z * 1280) - 1065
wz_swd = (pg_sector.y * 1280) - 25
found_pg = True
# Get the north-east-up corner. Or possible NEU!
wx_neu = wx_swd + 1280
wy_neu = wy_swd + 1280
wz_neu = wz_swd + 1280
except:
found_pg = False
if found_pg == False:
# Now refine by sector. This only checks through HA sectors.
for d in self.deferred_alpha:
if self.filter_by_target != '':
d_is_in = single_member_of(d.x,d.y,d.z)
if self.filter_by_target.upper() in d_is_in.upper():
self.deferred.append(d)
else:
self.deferred.append(d)
else:
# This checks if we are in the boundaries of the given PG sector.
# Need to make sure that the stars are not in an HA sector instead.
for d in self.deferred_alpha:
if d.x >= wx_swd and d.x <= wx_neu:
if d.y >= wy_swd and d.y <= wy_neu:
if d.z >= wz_swd and d.z <= wz_neu:
# Use edts to get the sector name at the current position.
vector_alot = pgnames.vector3.Vector3(d.x, d.z, d.y)
# If the coordinates are too far out this can start to return odd values or fail, hence try-except.
try:
sector_alot = pgnames.get_sector_name(vector_alot) # as (x,z,y)
except:
sector_alot = ''
if sector_alot.upper() == wname.upper():
self.deferred.append(d)
if self.highlight_target != '':
self.draw_findiv.set(1)
self.update_image()
def update_image(self):
# Create a new image in PIL.
self.pil_image = Image.new('RGBA',(XDIM,YDIM),'white')
self.draw = ImageDraw.Draw(self.pil_image)
# Use galmap image as background? - could do, but confusing tbh. Make a toggle?
# Want to add axis lines for x or y = 0
x_axis = self.x_offset / self.scaling
y_axis = self.y_offset / self.scaling
self.draw.line(((XDIM/2 - x_axis,0),(XDIM/2 - x_axis,YDIM)), fill = 'gray', width = 1)
self.draw.line(((0,YDIM/2 + y_axis),(XDIM,YDIM/2 + y_axis)), fill = 'gray', width = 1)
# Want to draw the UA sphere. (UA shell.)
if self.draw_misc.get() == 1:
cp_x = -78.6 - self.x_offset
cp_y = -340.5 - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
# Need to get "r on this z level"; draw inner boundary at 130 ly (?) - needs rechecking
r_z = radius_on_plane(-149.6,130,self.z_offset)
if r_z > 0:
adj_r = r_z / self.scaling
self.draw.ellipse(((adj_x-adj_r,adj_y-adj_r),(adj_x+adj_r,adj_y+adj_r)), outline = (255,0,255,255))
# Need to get "r on this z level"; draw outer boundary at 150 ly (?) - needs rechecking
r_z = radius_on_plane(-149,150,self.z_offset)
if r_z > 0:
adj_r = r_z / self.scaling
self.draw.ellipse(((adj_x-adj_r,adj_y-adj_r),(adj_x+adj_r,adj_y+adj_r)), outline = (255,0,255,255))
# Want to draw the Bubble extent.
if self.draw_misc.get() == 1:
cp_x = 0 - self.x_offset
cp_y = 0 - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
r_z = radius_on_plane(0,200,self.z_offset)
if r_z > 0:
adj_r = r_z / self.scaling
self.draw.ellipse(((adj_x-adj_r,adj_y-adj_r),(adj_x+adj_r,adj_y+adj_r)), outline = (0,0,255,255))
# And let's add one for Generation Ships.
cp_x = 0 - self.x_offset
cp_y = 0 - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
r_z = radius_on_plane(0,175,self.z_offset)
if r_z > 0:
adj_r = r_z / self.scaling
self.draw.ellipse(((adj_x-adj_r,adj_y-adj_r),(adj_x+adj_r,adj_y+adj_r)), outline = (0,255,0,255))
# And let's add one around Achenar, see if that works...
cp_x = 67.5 - self.x_offset
cp_y = 24.8 - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
r_z = radius_on_plane(-119.5,100,self.z_offset)
if r_z > 0:
adj_r = r_z / self.scaling
self.draw.ellipse(((adj_x-adj_r,adj_y-adj_r),(adj_x+adj_r,adj_y+adj_r)), outline = (0,0,255,255))
# And a little one for Colonia. If I need many more of these, should do them with a list.
cp_x = -9530.5 - self.x_offset
cp_y = 19808.1 - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
r_z = radius_on_plane(-910.3,40,self.z_offset)
if r_z > 0:
adj_r = r_z / self.scaling
self.draw.ellipse(((adj_x-adj_r,adj_y-adj_r),(adj_x+adj_r,adj_y+adj_r)), outline = (0,0,255,255))
# Draw the Reorte-Riedquat line.
if self.draw_misc.get() == 1:
# Riedquat (68.84375, 69.75, 48.75)
# Reorte (75.75, 75.15625, 48.75)
# Get the midpoint between Reorte and Riedquat.
midpoint_x = 72.296875
midpoint_y = 72.453125
# Get the slope of the line between Reorte and Riedquat.
x_diff = 75.75 - 68.84375
y_diff = 75.15625 - 69.75
line_start_x = midpoint_x - (RR_LENGTH * x_diff)
line_start_y = midpoint_y - (RR_LENGTH * y_diff)
line_end_x = midpoint_x + (RR_LENGTH * x_diff)
line_end_y = midpoint_y + (RR_LENGTH * y_diff)
line_start_x -= self.x_offset
line_start_y -= self.y_offset
line_end_x -= self.x_offset
line_end_y -= self.y_offset
ri_x = line_start_x
ri_y = line_start_y
re_x = line_end_x
re_y = line_end_y
adj_ls_x = XDIM/2 + (line_start_x / self.scaling)
adj_ls_y = YDIM/2 - (line_start_y / self.scaling)
adj_le_x = XDIM/2 + (line_end_x / self.scaling)
adj_le_y = YDIM/2 - (line_end_y / self.scaling)
self.draw.line(((adj_ls_x,adj_ls_y),(adj_le_x,adj_le_y)), fill = (0,0,255,255))
## # Draw (possible!) Guardians lines to Regor.
## if self.draw_misc.get() == 1:
## # Regor north about (1100,-30,-150), Regor south about (1100,-150,-150)
## self.doline(290,-7.9,1100,-30,(255,0,255,255))
## self.doline(290,-62.2,1100,-236,(255,0,255,255))
## # Draw current progress of Bright Star survey project.
## if self.draw_misc.get() == 1:
## self.doline(0,0,-8000,10000,(255,0,0,255))
# Draw Suppression corridor boundaries. (~x,y +/- 1100 ly Sol relative) Possibly add "Neutron field" rough extent markers?
# Disabled for the moment - need a better grasp on the shape.
## if self.draw_misc.get() == 1:
## x_axis_l = -380 - self.x_offset # This narrow boundary is roughly the distance from Sadge you need to go to see stellar remnants.
## x_axis_r = 410 - self.x_offset
#### y_axis_l = -1100 + self.y_offset
#### y_axis_r = 1100 + self.y_offset
##
## adj_x_l = XDIM/2 + (x_axis_l / self.scaling)
## adj_x_r = XDIM/2 + (x_axis_r / self.scaling)
#### adj_y_l = YDIM/2 + (y_axis_l / self.scaling)
#### adj_y_r = YDIM/2 + (y_axis_r / self.scaling)
##
## self.draw.line(((adj_x_l,0),(adj_x_l,YDIM)), fill = 'gray', width = 1)
## self.draw.line(((adj_x_r,0),(adj_x_r,YDIM)), fill = 'gray', width = 1)
##
#### self.draw.line(((0,adj_y_l),(XDIM,adj_y_l)), fill = 'gray', width = 1)
#### self.draw.line(((0,adj_y_r),(XDIM,adj_y_r)), fill = 'gray', width = 1)
## # Draw EAFOTS box.
## if self.draw_misc.get() == 1:
## # Southwest (-6466,-6186), northeast (-5186,-4906)
## ne_x = -5186 - self.x_offset
## ne_y = -4906 - self.y_offset
##
## sl = 1280
##
## sw_x = ne_x - sl
## sw_y = ne_y - sl
##
## adj_ne_x = XDIM/2 + (ne_x / self.scaling)
## adj_ne_y = YDIM/2 - (ne_y / self.scaling)
##
## adj_sw_x = XDIM/2 + (sw_x / self.scaling)
## adj_sw_y = YDIM/2 - (sw_y / self.scaling)
##
## box = ((adj_ne_x,adj_ne_y), (adj_sw_x,adj_sw_y))
## self.draw.rectangle(box, outline = (255,0,255,255))
# Draw Orion box.
if self.draw_misc.get() == 1:
# Southwest (463,-1430), northeast (823,-1070)
ne_x = 823 - self.x_offset
ne_y = -1070 - self.y_offset
sl = 360
sw_x = ne_x - sl
sw_y = ne_y - sl
adj_ne_x = XDIM/2 + (ne_x / self.scaling)
adj_ne_y = YDIM/2 - (ne_y / self.scaling)
adj_sw_x = XDIM/2 + (sw_x / self.scaling)
adj_sw_y = YDIM/2 - (sw_y / self.scaling)
box = ((adj_ne_x,adj_ne_y), (adj_sw_x,adj_sw_y))
self.draw.rectangle(box, outline = (255,0,255,255))
# Iterates through drawing known pulsars.
if self.draw_pulsars.get() == 1:
for psr in pulsar_list:
cp_x = psr.x - self.x_offset
cp_y = psr.y - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
nametext = psr.name
if psr.status == 'Invisible':
psr_colour = (200,100,100,255)
elif psr.status == 'Permit-locked':
psr_colour = (255,0,0,255)
else:
psr_colour = (10,140,190,255)
star_colour = (160,160,160,255)
if abs(self.z_offset - psr.z) < PSR_Z_RANGE:
self.draw.ellipse(((adj_x-PSRSIZE,adj_y-PSRSIZE),(adj_x+PSRSIZE,adj_y+PSRSIZE)), fill = psr_colour)
self.draw.line(((adj_x - 2,adj_y - 2),(adj_x + 2,adj_y + 2)), fill = star_colour, width = 1)
self.draw.line(((adj_x - 2,adj_y + 2),(adj_x + 2,adj_y - 2)), fill = star_colour, width = 1)
self.draw.line(((adj_x,adj_y - 3),(adj_x,adj_y + 3)), fill = star_colour, width = 1)
self.draw.line(((adj_x - 3,adj_y),(adj_x + 3,adj_y)), fill = star_colour, width = 1)
if self.draw_names.get() == 1: # Could control this with a separate button.
self.draw.text((adj_x + FONTSIZE/2,adj_y - FONTSIZE/2),nametext,font = self.fnt,fill = psr_colour)
else:
self.draw.ellipse(((adj_x-PSRSIZE,adj_y-PSRSIZE),(adj_x+PSRSIZE,adj_y+PSRSIZE)), fill = psr_colour)
self.draw.line(((adj_x - 2,adj_y - 2),(adj_x + 2,adj_y + 2)), fill = star_colour, width = 1)
self.draw.line(((adj_x - 2,adj_y + 2),(adj_x + 2,adj_y - 2)), fill = star_colour, width = 1)
self.draw.line(((adj_x,adj_y - 3),(adj_x,adj_y + 3)), fill = star_colour, width = 1)
self.draw.line(((adj_x - 3,adj_y),(adj_x + 3,adj_y)), fill = star_colour, width = 1)
self.draw_hat(psr.z,adj_x,adj_y,psr_colour)
# Reworked bit for drawing filtered stars.
dp = self.draw_pulsars.get()
if self.draw_findiv.get() == 1:
if self.highlight_target != '':
for d in self.deferred:
cp_x = d.x - self.x_offset
cp_y = d.y - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
fc = (0,200,0,255)
if 'PSR' in d.name:
if dp == 0:
self.draw.line(((adj_x,adj_y - CROSSSIZE),(adj_x,adj_y + CROSSSIZE)),fill = fc)
self.draw.line(((adj_x - CROSSSIZE,adj_y),(adj_x + CROSSSIZE,adj_y)),fill = fc)
else:
self.draw.line(((adj_x,adj_y - CROSSSIZE),(adj_x,adj_y + CROSSSIZE)),fill = fc)
self.draw.line(((adj_x - CROSSSIZE,adj_y),(adj_x + CROSSSIZE,adj_y)),fill = fc)
# If no filter is set, draw all stars from the full individual list.
else:
for f in findiv_list:
cp_x = f.x - self.x_offset
cp_y = f.y - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
fc = (180,180,0,255)
if 'PSR' in f.name:
if dp == 0:
self.draw.line(((adj_x,adj_y - CROSSSIZE),(adj_x,adj_y + CROSSSIZE)),fill = fc)
self.draw.line(((adj_x - CROSSSIZE,adj_y),(adj_x + CROSSSIZE,adj_y)),fill = fc)
else:
self.draw.line(((adj_x,adj_y - CROSSSIZE),(adj_x,adj_y + CROSSSIZE)),fill = fc)
self.draw.line(((adj_x - CROSSSIZE,adj_y),(adj_x + CROSSSIZE,adj_y)),fill = fc)
# Iterates through drawing known POI.
if self.draw_poi.get() == 1:
for landmark in poi_list:
cp_x = landmark.x - self.x_offset
cp_y = landmark.y - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
nametext = landmark.name
draw_this = True # Kludge
if landmark.poi_type == 'Powerplay':
lm_colour = (150,20,230,255)
elif landmark.poi_type == 'Landmark':
if landmark.shipyard == 'Shipyard.':
lm_colour = (100,50,220,255)
else:
lm_colour = (50,50,220,255)
if self.draw_landmark.get() == 0:
draw_this = False
elif landmark.poi_type == 'Alien' or landmark.poi_type == 'Fungal':
lm_colour = (190,20,180,255)
elif landmark.poi_type == 'Flora':
lm_colour = (150,90,140,255)
elif landmark.poi_type == 'Permit':
lm_colour = (255,0,0,255)
elif landmark.poi_type == 'Jumponium':
lm_colour = (0,255,0,255)
if self.draw_landmark.get() == 0:
draw_this = False
elif landmark.poi_type == 'Asteroid Base':
if landmark.shipyard == 'Shipyard.':
lm_colour = (200,50,0,255)
else:
lm_colour = (150,50,0,255)
elif landmark.poi_type == 'Megaship':
lm_colour = (0,200,0,255)
elif landmark.poi_type == 'Station':
lm_colour = (150,100,0,255)
else:
lm_colour = (45,180,225,255)
if draw_this == True:
if abs(self.z_offset - landmark.z) < POI_Z_RANGE:
self.draw.ellipse(((adj_x-POISIZE,adj_y-POISIZE),(adj_x+POISIZE,adj_y+POISIZE)), fill = lm_colour)
if self.draw_names.get() == 1: # Could control this with a separate button.
self.draw.text((adj_x + FONTSIZE/2,adj_y - FONTSIZE/2),nametext,font = self.fnt,fill = lm_colour)
else:
self.draw.ellipse(((adj_x-POISIZE,adj_y-POISIZE),(adj_x+POISIZE,adj_y+POISIZE)), fill = lm_colour)
self.draw_hat(landmark.z,adj_x,adj_y,lm_colour)
## # Iterates through drawing known edsm landmarks.
## if self.draw_edsm.get() == 1:
## for landmark in edsm_list:
## cp_x = landmark.x - self.x_offset
## cp_y = landmark.y - self.y_offset
##
## adj_x = XDIM/2 + (cp_x / self.scaling)
## adj_y = YDIM/2 - (cp_y / self.scaling)
##
## nametext = landmark.name
##
## lm_colour = (45,180,225,255)
##
## if abs(self.z_offset - landmark.z) < POI_Z_RANGE:
## self.draw.ellipse(((adj_x-POISIZE,adj_y-POISIZE),(adj_x+POISIZE,adj_y+POISIZE)), fill = lm_colour)
##
## if self.draw_names.get() == 1: # Could control this with a separate button.
## self.draw.text((adj_x + FONTSIZE/2,adj_y - FONTSIZE/2),nametext,font = self.fnt,fill = lm_colour)
##
## else:
## self.draw.ellipse(((adj_x-POISIZE,adj_y-POISIZE),(adj_x+POISIZE,adj_y+POISIZE)), fill = lm_colour)
##
## self.draw_hat(landmark.z,adj_x,adj_y,lm_colour)
# Iterates through drawing known player factions.
if self.draw_player.get() == 1:
for pf in player_list:
if pf.valid == 'Yes':
cp_x = pf.x - self.x_offset
cp_y = pf.y - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
nametext = str(pf.name)
t_colour = (130,160,40,255)
if abs(self.z_offset - pf.z) < PF_Z_RANGE:
self.draw.line(((adj_x,adj_y - CROSSSIZE),(adj_x,adj_y + CROSSSIZE)),fill = t_colour)
self.draw.line(((adj_x - CROSSSIZE,adj_y),(adj_x + CROSSSIZE,adj_y)),fill = t_colour)
if self.draw_names.get() == 1:
self.draw.text((adj_x + FONTSIZE/4,adj_y - FONTSIZE/2),nametext,font = self.fnt,fill = t_colour)
else:
self.draw.line(((adj_x,adj_y - CROSSSIZE),(adj_x,adj_y + CROSSSIZE)),fill = t_colour)
self.draw.line(((adj_x - CROSSSIZE,adj_y),(adj_x + CROSSSIZE,adj_y)),fill = t_colour)
self.draw_hat(pf.z,adj_x,adj_y,t_colour)
# Iterates through drawing known tourist locations.
if self.draw_tourist.get() == 1:
for destination in tourist_list:
cp_x = destination.x - self.x_offset
cp_y = destination.y - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
nametext = str(destination.number)
if nametext == '0':
nametext = '?'
t_colour = (10,110,10,255)
if abs(self.z_offset - destination.z) < TOURIST_Z_RANGE:
self.draw.ellipse(((adj_x-TOURISTSIZE,adj_y-TOURISTSIZE),(adj_x+TOURISTSIZE,adj_y+TOURISTSIZE)), fill = t_colour)
if self.draw_names.get() == 1: # This is slow.
self.draw.text((adj_x + FONTSIZE/4,adj_y - FONTSIZE/2),nametext,font = self.fnt,fill = t_colour)
else:
self.draw.ellipse(((adj_x-TOURISTSIZE,adj_y-TOURISTSIZE),(adj_x+TOURISTSIZE,adj_y+TOURISTSIZE)), fill = t_colour)
self.draw_hat(destination.z,adj_x,adj_y,t_colour)
# Iterates through drawing known rare goods.
if self.draw_rares.get() == 1:
for rare in rares_list:
cp_x = rare.x - self.x_offset
cp_y = rare.y - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
nametext = str(rare.name)
if rare.distance < RARE_MAX_DISTANCE:
t_colour = (240,90,30,255)
else:
t_colour = (100,100,100,255)
if abs(self.z_offset - rare.z) < RARE_Z_RANGE:
self.draw.ellipse(((adj_x-RARESIZE,adj_y-RARESIZE),(adj_x+RARESIZE,adj_y+RARESIZE)), fill = t_colour)
if self.draw_names.get() == 1: # This is slow.
self.draw.text((adj_x + FONTSIZE/4,adj_y - FONTSIZE/2),nametext,font = self.fnt,fill = t_colour)
else:
self.draw.ellipse(((adj_x-RARESIZE,adj_y-RARESIZE),(adj_x+RARESIZE,adj_y+RARESIZE)), fill = t_colour)
self.draw_hat(rare.z,adj_x,adj_y,t_colour)
# Iterate through drawing sector fills first.
if self.draw_fills.get() == 1:
for sector in ha_sec_list:
if sector.state == 'Open':
fc = (0,0,0,255)
elif sector.state == 'Permit-locked.':
fc = (255,0,0,255)
else:
fc = (130,80,60,255) # Don't really need this but whatever.
cp_x = sector.x - self.x_offset
cp_y = sector.y - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
# Need to get "r on this z level"
r_z = radius_on_plane(sector.z,sector.r,self.z_offset)
if r_z > 0:
adj_r = r_z / self.scaling
if sector.state != 'Not found':
if sector.state == 'Open':
self.draw.ellipse(((adj_x-adj_r,adj_y-adj_r),(adj_x+adj_r,adj_y+adj_r)), fill = (255,255,255,255))
else:
self.draw.ellipse(((adj_x-adj_r,adj_y-adj_r),(adj_x+adj_r,adj_y+adj_r)), fill = fc)
# Iterates through drawing known sectors.
for sector in ha_sec_list:
if sector.state == 'Open':
fc = (0,0,0,255)
elif sector.state == 'Permit-locked.':
fc = (255,0,0,255)
else:
fc = (130,80,60,255) # Don't really need this but whatever.
cp_x = sector.x - self.x_offset
cp_y = sector.y - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
# Need to get "r on this z level"
r_z = radius_on_plane(sector.z,sector.r,self.z_offset)
if r_z > 0:
adj_r = r_z / self.scaling
if sector.state != 'Not found':
self.draw.ellipse(((adj_x-adj_r,adj_y-adj_r),(adj_x+adj_r,adj_y+adj_r)), outline = fc)
# Placeholder indicators for object types.
# Not drawn are LM (Landmark) and OS (Open Cluster that is sparse or non-existent on the map)
if self.draw_indicators.get() == 1:
if sector.sec_type == 'NB': # Ordinary emission nebula.
self.draw.ellipse(((adj_x-NEBSIZE,adj_y-NEBSIZE),(adj_x+NEBSIZE,adj_y+NEBSIZE)), fill = (230,170,50,255))
elif sector.sec_type == 'NX': # Ordinary emission nebula known to host barnacles.
self.draw.ellipse(((adj_x-NEBSIZE,adj_y-NEBSIZE),(adj_x+NEBSIZE,adj_y+NEBSIZE)), fill = (230,170,50,255), outline = (190,20,180,255))
elif sector.sec_type == 'PN': # Planetary nebula.
self.draw.ellipse(((adj_x-NEBSIZE,adj_y-NEBSIZE),(adj_x+NEBSIZE,adj_y+NEBSIZE)), fill = (70,240,240,255))
self.draw.line(((adj_x - 2,adj_y - 2),(adj_x + 2,adj_y + 2)), fill = (30,180,190,255), width = 1)
self.draw.line(((adj_x - 2,adj_y + 2),(adj_x + 2,adj_y - 2)), fill = (30,180,190,255), width = 1)
self.draw.line(((adj_x,adj_y - 3),(adj_x,adj_y + 3)), fill = (30,180,190,255), width = 1)
self.draw.line(((adj_x - 3,adj_y),(adj_x + 3,adj_y)), fill = (30,180,190,255), width = 1)
elif sector.sec_type == 'DN': # Dark nebula.
self.draw.ellipse(((adj_x-NEBSIZE,adj_y-NEBSIZE),(adj_x+NEBSIZE,adj_y+NEBSIZE)), fill = (35,30,0,255))
elif sector.sec_type == 'OC': # Open Cluster of stars.
self.draw.line(((adj_x - 2,adj_y - 2),(adj_x + 2,adj_y + 2)), fill = 'black', width = 1)
self.draw.line(((adj_x - 2,adj_y + 2),(adj_x + 2,adj_y - 2)), fill = 'black', width = 1)
self.draw.line(((adj_x,adj_y - 3),(adj_x,adj_y + 3)), fill = 'black', width = 1)
self.draw.line(((adj_x - 3,adj_y),(adj_x + 3,adj_y)), fill = 'black', width = 1)
# Draw an indicator if we have a 'sector' which contains only a number of named stars.
r_solo = radius_on_plane(sector.z,SOLO_ASSUMED_RADIUS,self.z_offset)
if r_solo > 0:
adj_r = r_solo / self.scaling
if self.draw_indicators.get() == 1:
# Should I use a different indicator here, to avoid confusion with OC sectors? Or not?
if sector.sec_type == 'ST':
self.draw.line(((adj_x - 2,adj_y - 2),(adj_x + 2,adj_y + 2)), fill = 'black', width = 1)
self.draw.line(((adj_x - 2,adj_y + 2),(adj_x + 2,adj_y - 2)), fill = 'black', width = 1)
self.draw.line(((adj_x,adj_y - 3),(adj_x,adj_y + 3)), fill = 'black', width = 1)
self.draw.line(((adj_x - 3,adj_y),(adj_x + 3,adj_y)), fill = 'black', width = 1)
for sector in ha_sec_list:
if sector.state == 'Open':
fc = (0,0,0,255)
elif sector.state == 'Permit-locked.':
fc = (255,0,0,255)
else:
fc = (130,80,60,255)
cp_x = sector.x - self.x_offset
cp_y = sector.y - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
# Only draw text for sectors which are present on this z level.
r_z = radius_on_plane(sector.z,sector.r,self.z_offset)
nametext = sector.name
if r_z > 0:
if self.draw_names.get() == 1:
self.draw.text((adj_x + FONTSIZE/2,adj_y - FONTSIZE/2),nametext,font = self.fnt,fill = fc)
if self.draw_crosses.get() == 1 and sector.state == 'Not found':
self.draw.line(((adj_x - CROSSSIZE,adj_y - CROSSSIZE),(adj_x + CROSSSIZE,adj_y + CROSSSIZE)), fill = fc, width = CROSSWIDTH)
self.draw.line(((adj_x - CROSSSIZE,adj_y + CROSSSIZE),(adj_x + CROSSSIZE,adj_y - CROSSSIZE)), fill = fc, width = CROSSWIDTH)
self.draw_hat(sector.z,adj_x,adj_y,fc)
else:
if self.draw_crosses.get() == 1:
self.draw.line(((adj_x - CROSSSIZE,adj_y - CROSSSIZE),(adj_x + CROSSSIZE,adj_y + CROSSSIZE)), fill = fc, width = CROSSWIDTH)
self.draw.line(((adj_x - CROSSSIZE,adj_y + CROSSSIZE),(adj_x + CROSSSIZE,adj_y - CROSSSIZE)), fill = fc, width = CROSSWIDTH)
if sector.sec_type != 'ST':
self.draw_hat(sector.z,adj_x,adj_y,fc)
# Draw text if we have a 'sector' which contains only a number of named stars.
r_solo = radius_on_plane(sector.z,SOLO_ASSUMED_RADIUS,self.z_offset)
if r_solo > 0:
if self.draw_names.get() == 1:
if sector.sec_type == 'ST':
self.draw.text((adj_x + FONTSIZE/2,adj_y - FONTSIZE/2),nametext,font = self.fnt,fill = fc)
else:
if self.draw_crosses.get() == 1:
if sector.sec_type == 'ST':
self.draw.line(((adj_x - CROSSSIZE,adj_y - CROSSSIZE),(adj_x + CROSSSIZE,adj_y + CROSSSIZE)), fill = fc, width = CROSSWIDTH)
self.draw.line(((adj_x - CROSSSIZE,adj_y + CROSSSIZE),(adj_x + CROSSSIZE,adj_y - CROSSSIZE)), fill = fc, width = CROSSWIDTH)
self.draw_hat(sector.z,adj_x,adj_y,fc)
# Draw a marker at the latest search location.
if self.search_performed == True:
s_x = self.search_x - self.x_offset
s_y = self.search_y - self.y_offset
adj_x = XDIM/2 + (s_x / self.scaling)
adj_y = YDIM/2 - (s_y / self.scaling)
s_col = (150,0,0,255)
self.draw.ellipse(((adj_x-SEARCH_SIZE_I,adj_y-SEARCH_SIZE_I),(adj_x+SEARCH_SIZE_I,adj_y+SEARCH_SIZE_I)), outline = s_col)
self.draw.ellipse(((adj_x-SEARCH_SIZE_O,adj_y-SEARCH_SIZE_O),(adj_x+SEARCH_SIZE_O,adj_y+SEARCH_SIZE_O)), outline = s_col)
self.draw.line(((adj_x,adj_y-SEARCH_SIZE_I),(adj_x,adj_y-SEARCH_SIZE_I-S_S_EXT)),fill = s_col,width = 2)
self.draw.line(((adj_x,adj_y+SEARCH_SIZE_I),(adj_x,adj_y+SEARCH_SIZE_I+S_S_EXT)),fill = s_col,width = 2)
self.draw.line(((adj_x-SEARCH_SIZE_I,adj_y),(adj_x-SEARCH_SIZE_I-S_S_EXT,adj_y)),fill = s_col,width = 2)
self.draw.line(((adj_x+SEARCH_SIZE_I,adj_y),(adj_x+SEARCH_SIZE_I+S_S_EXT,adj_y)),fill = s_col,width = 2)
# If we have a pg sector, draw a box showing its outlines.
if self.search_is_pg_sector == True:
nw_x = self.search_is_pg_x - self.x_offset
nw_y = self.search_is_pg_y - self.y_offset
sl = 1280
se_x = nw_x + sl
se_y = nw_y - sl
adj_nw_x = XDIM/2 + (nw_x / self.scaling)
adj_nw_y = YDIM/2 - (nw_y / self.scaling)
adj_se_x = XDIM/2 + (se_x / self.scaling)
adj_se_y = YDIM/2 - (se_y / self.scaling)
box = ((adj_nw_x,adj_nw_y), (adj_se_x,adj_se_y))
self.draw.rectangle(box, outline = (150,0,0,255))
# Convert the image to one that tkinter can use, and draw it to the canvas.
self.working_image = ImageTk.PhotoImage(self.pil_image)
self.image_on_canvas = self.map_canvas.create_image(0, 0, anchor = NW, image = self.working_image)
# Draws a line from one pair of coordinates to another (adjusting for offsets.)
def doline(self,x1,y1,x2,y2,colour):
s_x = x1 - self.x_offset
s_y = y1 - self.y_offset
e_x = x2 - self.x_offset
e_y = y2 - self.y_offset
adj_s_x = XDIM/2 + (s_x / self.scaling)
adj_s_y = YDIM/2 - (s_y / self.scaling)
adj_e_x = XDIM/2 + (e_x / self.scaling)
adj_e_y = YDIM/2 - (e_y / self.scaling)
self.draw.line(((adj_s_x,adj_s_y),(adj_e_x,adj_e_y)), fill = colour)
def draw_hat(self,working_z,adj_x,adj_y,fc):
if working_z > self.z_offset:
self.draw.line(((adj_x - CROSSSIZE,adj_y - (2 * CROSSSIZE)),(adj_x,adj_y - (3 * CROSSSIZE))), fill = fc, width = CROSSWIDTH)
self.draw.line(((adj_x,adj_y - (3 * CROSSSIZE)),(adj_x + CROSSSIZE,adj_y - (2 * CROSSSIZE))), fill = fc, width = CROSSWIDTH)
else:
self.draw.line(((adj_x - CROSSSIZE,adj_y + (2 * CROSSSIZE)),(adj_x,adj_y + (3 * CROSSSIZE))), fill = fc, width = CROSSWIDTH)
self.draw.line(((adj_x,adj_y + (3 * CROSSSIZE)),(adj_x + CROSSSIZE,adj_y + (2 * CROSSSIZE))), fill = fc, width = CROSSWIDTH)
def save(self):
# Save a .png of the current canvas.
filename = 'output.png'
self.pil_image.save(filename)
# Save a .csv file with stars in the current filter list.
filename = 'output.csv'
with open(filename, 'w') as opened:
opened.write('System,X,Y,Z,GalmapX,GalmapY,GalmapZ\n')
if self.filter_by_target != '':
for f in self.deferred:
opened.write(f.name + ',')
opened.write(str(f.x) + ',')
opened.write(str(f.y) + ',')
opened.write(str(f.z) + ',')
opened.write(str(f.x) + ',')
opened.write(str(f.z) + ',')
opened.write(str(f.y))
opened.write('\n')
else:
for f in self.deferred_alpha:
opened.write(f.name + ',')
opened.write(str(f.x) + ',')
opened.write(str(f.y) + ',')
opened.write(str(f.z) + ',')
opened.write(str(f.x) + ',')
opened.write(str(f.z) + ',')
opened.write(str(f.y))
opened.write('\n')
# Entry boxes with an attached label.
class Entry_Box():
def __init__(self,master,nametext,default,w1,w2):
# Create a frame for this entry box.
self.frame = Frame(master, padx = 6)
self.frame.pack(side = LEFT)
# Create a label.
self.label = Label(self.frame,text = nametext,width = w1)
self.label.pack(side = LEFT)
# Create an entry box.
self.entry = Entry(self.frame, width = w2)
self.entry.pack(side = LEFT)
self.entry.insert(0,default)
# Class to hold details for the hand-authored sectors.
class ha_sec():
def __init__(self,name,x,y,z,r,state,sec_type,priority,a_nebula,a_star):
self.name = name
self.x = x
self.y = y
self.z = z
self.r = r
self.state = state
self.sec_type = sec_type
self.priority = priority
self.a_nebula = a_nebula
self.a_star = a_star
# Class to hold details for POI.
class poi():
def __init__(self,name,x,y,z,poi_type,star_system,body,lat,lon,notes,shipyard):
self.name = name
self.x = x
self.y = y
self.z = z
self.poi_type = poi_type
self.star_system = star_system
self.body = body
self.lat = lat
self.lon = lon
self.notes = notes
self.shipyard = shipyard
# setattr is for wimps and the competent.
# Class to hold details for tourist locations.
class tourist():
def __init__(self,number,name,system,x,y,z,description,body,location,distance):
self.number = number
self.name = name
self.system = system
self.x = x
self.y = y
self.z = z
self.description = description
self.body = body # Body the POI is near or on.
self.location = location # Whether the POI is in orbit or on the surface.
self.distance = distance # Distance from jump-in point.
class rare():
def __init__(self,system,station,name,quantity,x,y,z,distance):
self.system = system
self.station = station
self.name = name
self.quantity = quantity
self.x = x
self.y = y
self.z = z
self.distance = distance
class pulsar():
def __init__(self,system,x,y,z,status):
self.name = system
self.x = x
self.y = y
self.z = z
self.status = status
self.distance = ((x ** 2) + (y ** 2) + (z ** 2)) ** 0.5 # Is this needed for anything?
class player_faction():
def __init__(self,name,superpower,government,system,x,y,z,state,valid):
self.name = name
self.superpower = superpower
self.government = government
self.system = system
self.x = x
self.y = y
self.z = z
self.state = state
self.valid = valid
class findiv():
def __init__(self,name,x,y,z,distance):
self.name = name
self.x = x
self.y = y
self.z = z
self.distance = distance
def read_sectors_file(filename):
ha_sec_list = []
with open(filename,'r') as opened:
readtext = opened.read()
lines = readtext.split('\n')
for line in lines:
values = line.split(',')
try:
name = str(values[0])
x = float(values[1])
y = float(values[2])
z = float(values[3])
r = float(values[4])
state = str(values[5]) # Reads whether the sector is open, locked, or one of the erroneous sectors from the original dataset.
sec_type = str(values[6]) # Reads the type of sector - if it's an open cluster or nebula or what have you.
priority = int(values[8])
a_nebula = str(values[10])
a_star = str(values[11])
new_ha_sec = ha_sec(name,x,y,z,r,state,sec_type,priority,a_nebula,a_star)
ha_sec_list.append(new_ha_sec)
except:
alice = 'do nowt'
return ha_sec_list
def read_poi_file(filename):
poi_list = []
with open(filename,'r') as opened:
readtext = opened.read()
lines = readtext.split('\n')
for line in lines:
values = line.split(',')
try:
name = str(values[0])
x = float(values[1])
y = float(values[2])
z = float(values[3])
poi_type = str(values[4])
star_system = str(values[5])
body = str(values[6])
try:
lat = float(values[7])
lon = float(values[8])
except:
lat = 0
lon = 0
notes = str(values[9])
shipyard = str(values[10])
new_poi = poi(name,x,y,z,poi_type,star_system,body,lat,lon,notes,shipyard)
poi_list.append(new_poi)
except:
alice = 'do nowt'
return poi_list
##def read_edsm_file(filename):
## edsm_list = []
## with open(filename,'r') as opened:
## readtext = opened.read()
##
## lines = readtext.split('\n')
##
## for line in lines:
## values = line.split(',')
## try:
## name = str(values[2])
## x = float(values[4])
## y = float(values[5])
## z = float(values[6])
## poi_type = str(values[1])
## star_system = str(values[3])
## body = ''
## lat = 0
## lon = 0
## notes = str(values[7])
## shipyard = ''
##
## new_poi = poi(name,x,y,z,poi_type,star_system,body,lat,lon,notes,shipyard)
## edsm_list.append(new_poi)
##
## except:
## alice = 'do nowt'
##
## return edsm_list
def read_tourist_file(filename):
tourist_list = []
with open(filename,'r') as opened:
readtext = opened.read()
lines = readtext.split('\n')
for line in lines:
values = line.split(',')
try:
number = int(values[0])
name = str(values[1])
system = str(values[2])
x = float(values[3])
y = float(values[4])
z = float(values[5])
description = str(values[6])
body = str(values[8])
location = str(values[9])
distance = str(values[10])
new_tourist = tourist(number,name,system,x,y,z,description,body,location,distance)
tourist_list.append(new_tourist)
except:
alice = 'do nowt'
return tourist_list
def read_rares_file(filename):
rares_list = []
with open(filename,'r') as opened:
readtext = opened.read()
lines = readtext.split('\n')
for line in lines:
values = line.split(',')
try:
system = str(values[0])
station = str(values[1])
name = str(values[2])
quantity = str(values[3])
x = float(values[4])
y = float(values[5])
z = float(values[6])
distance = int(values[7])
new_rare = rare(system,station,name,quantity,x,y,z,distance)
rares_list.append(new_rare)
except:
alice = 'do nowt'
return rares_list
def read_pulsars_file(filename):
pulsar_list = []
with open(filename, 'r') as opened:
readtext = opened.read()
lines = readtext.split('\n')
for line in lines:
values = line.split(',')
try:
system = str(values[0])
x = float(values[1])
y = float(values[2])
z = float(values[3])
status = str(values[4])
new_pulsar = pulsar(system,x,y,z,status)
pulsar_list.append(new_pulsar)
except:
alice = 'do nowt'
return pulsar_list
def read_players_file(filename):
player_list = []
with open(filename, 'r') as opened:
readtext = opened.read()
lines = readtext.split('\n')
for line in lines:
values = line.split(',')
try:
name = str(values[0])
superpower = str(values[1])
government = str(values[2])
system = str(values[3])
x = float(values[4])
y = float(values[5])
z = float(values[6])
state = str(values[7])
valid = str(values[10])
new_player_faction = player_faction(name,superpower,government,system,x,y,z,state,valid)
player_list.append(new_player_faction)
except:
alice = 'do nowt'
return player_list
def read_findiv_file(filename):
findiv_list = []
with open(filename, 'r') as opened:
readtext = opened.read()
lines = readtext.split('\n')
for line in lines:
values = line.split(',')
try:
name = str(values[0])
x = float(values[1])
y = float(values[2])
z = float(values[3])
distance = float(values[4])
new_findiv = findiv(name,x,y,z,distance)
findiv_list.append(new_findiv)
except:
alice = 'do nowt'
return findiv_list
# Find the radius of a given sector on a given z plane.
def radius_on_plane(z,r,z_target):
d = z - z_target
right = (r ** 2) - (d ** 2)
r_target = right ** 0.5
if isinstance(r_target, (int, float)):
r_return = round(r_target,1)
else:
r_return = 0
return r_return
# Find which sectors are present at a given position.
def current_member_of(x,y,z):
current = []
for sector in ha_sec_list:
sx = sector.x
sy = sector.y
sz = sector.z
sr = sector.r
if (((sx-x) ** 2) + ((sy-y) ** 2) + ((sz-z) ** 2)) < (sr ** 2):
current.append(sector.name)
# Reverse the list, to give the highest priority in real terms (lowest number) first.
current.reverse()
return current
# Find the single primary sector present at a given position.
def single_member_of(x,y,z):
current = []
for sector in ha_sec_list:
sx = sector.x
sy = sector.y
sz = sector.z
sr = sector.r
if (((sx-x) ** 2) + ((sy-y) ** 2) + ((sz-z) ** 2)) < (sr ** 2):
current.append(sector.name)
# Reverse the list, to give the highest priority in real terms (lowest number) first.
current.reverse()
try:
result = current[0]
except:
result = ''
return result
# Find which tourist destinations and POI are present at a given position. (2d only) Should maybe move this inside the main App class?
def current_tourist(x,y,scaling,d_lm,d_pr,d_ra,d_to,d_pf,d_ed,d_fi,highlight_target,deferred):
current = []
# Might as well catch POI here as well.
if d_lm == 1:
for landmark in poi_list:
lx = landmark.x
ly = landmark.y
lr = POIACC * scaling
if (((lx-x) ** 2) + ((ly-y) ** 2)) < (lr ** 2):
append_this = True
# This is a bit clunky. Depending on the amount of information available on the POI, draw its system, body and lat/lon.
if landmark.star_system != '':
if landmark.lon != 0 and landmark.lat != 0:
landmark_text = landmark.name + ' (' + landmark.star_system + ' ' + landmark.body + ' at ' + str(landmark.lat) + ',' + str(landmark.lon) + ')'
else:
landmark_text = landmark.name + ' (' + landmark.star_system + ' ' + landmark.body + ')'
else:
landmark_text = landmark.name
if landmark.poi_type == 'Landmark' or landmark.poi_type == 'Jumponium':
if mainapp.draw_landmark.get() == 0:
append_this = False
if append_this == True:
current.append(landmark_text)
## # Might as well catch edsm landmarks here as well.
## if d_ed == 1:
## for landmark in edsm_list:
## lx = landmark.x
## ly = landmark.y
## lr = POIACC * scaling
## if (((lx-x) ** 2) + ((ly-y) ** 2)) < (lr ** 2):
## # This is a bit clunky. Depending on the amount of information available on the POI, draw its system, body and lat/lon.
## if landmark.star_system != '':
## if landmark.lon != 0 and landmark.lat != 0:
## landmark_text = landmark.name + ' (' + landmark.star_system + ' ' + landmark.body + ' at ' + str(landmark.lat) + ',' + str(landmark.lon) + ')'
## else:
## landmark_text = landmark.name + ' (' + landmark.star_system + ' ' + landmark.body + ')'
## else:
## landmark_text = landmark.name
## current.append(landmark_text)
# And pulsars.
if d_pr == 1:
for psr in pulsar_list:
px = psr.x
py = psr.y
pr = PSRACC * scaling
if (((px-x) ** 2) + ((py-y) ** 2)) < (pr ** 2):
if psr.name != '':
psr_text = psr.name
current.append(psr_text)
# And player factions.
if d_pf == 1:
for pf in player_list:
pfx = pf.x
pfy = pf.y
pfr = PFACC * scaling
if (((pfx-x) ** 2) + ((pfy-y) ** 2) < (pfr ** 2)):
if pf.name != '' and pf.valid == 'Yes':
pf_text = pf.name
pf_text += ' (' + pf.system + ')'
current.append(pf_text)
# And might as well catch rare goods here.
if d_ra == 1:
for rare in rares_list:
rx = rare.x
ry = rare.y
rr = RAREACC * scaling
if (((rx-x) ** 2) + ((ry-y) ** 2)) < (rr ** 2):
if rare.name != '':
rare_text = rare.name + ' (' + rare.system + ','
available = ' ' + rare.quantity
rare_text += available
distance = ' @ ' + str(rare.distance) + ' ls)'
rare_text += distance
current.append(rare_text)
# Now go through the tourist destinations. Should maybe add the bodies to this list.
if d_to == 1:
for destination in tourist_list:
dx = destination.x
dy = destination.y
dr = TOURISTACC * scaling
if (((dx-x) ** 2) + ((dy-y) ** 2)) < (dr ** 2):
tourist_text = destination.name
if destination.number != 0:
tourist_text += ' (#' + str(destination.number) + ', ' + destination.system
else:
tourist_text += ' (#???, ' + destination.system
if destination.body != '':
tourist_text += ' ' + destination.body + ' ' + destination.location
if destination.distance != '':
tourist_text += ', ' + destination.distance + ' ls'
tourist_text += ')'
current.append(tourist_text)
# Let's try adding from the full list of individual stars; this could be slow.
# Need to change this to pull only from the filtered lists.
if d_fi == 1:
fr = FINDIVACC * scaling
if highlight_target == '':
for f in findiv_list:
if (((f.x - x) ** 2) + ((f.y - y) ** 2)) < (fr ** 2):
findiv_text = f.name
current.append(findiv_text)
else:
for f in deferred:
if highlight_target == '*':
if (((f.x - x) ** 2) + ((f.y - y) ** 2)) < (fr ** 2):
findiv_text = f.name
current.append(findiv_text)
elif highlight_target.upper() in f.name.upper():
if (((f.x - x) ** 2) + ((f.y - y) ** 2)) < (fr ** 2):
findiv_text = f.name
current.append(findiv_text)
return current
# Finds the nearest tourist POI that hasn't got a number yet. Just for gathering data.
def find_nearest_unchecked(t_list,x,y,z):
bestfit = ''
previous = ''
bestdistance = 1000000
previousbest = 1000000
for possible in t_list:
newdistance = ((x-possible.x)**2) + ((y-possible.y)**2) + ((z-possible.z)**2)
newdistance = newdistance ** 0.5
if newdistance < bestdistance:
if possible.number == 0:
previous = bestfit
previousbest = bestdistance
bestdistance = newdistance
bestfit = possible.name
return bestfit, bestdistance, previous, previousbest
# Global variables for controlling the display.
XDIM = 580
YDIM = 580
FONTSIZE = 10
CROSSSIZE = 2 # Size of cross markers.
CROSSWIDTH = 1 # Width of line for crosses - doesn't look very good if set higher than 1, though.
NEBSIZE = 3 # Size of nebulae.
POISIZE = 2 # Size of POI markers.
POI_Z_RANGE = 52 # Z range in which a POI marker will be drawn without a hat.
PSRSIZE = 1 # Size of pulsar markers.
PSR_Z_RANGE = 52 # Z range in which a Pulsar marker will be drawn without a hat. Could make this much larger than the others?
TOURISTSIZE = 1 # Size of Tourist markers.
TOURIST_Z_RANGE = 52 # Z range in which a Tourist marker will be drawn without a hat.
RARESIZE = 1 # Size of Rare Goods markers
RARE_Z_RANGE = 52 # Z range in which a Rare Goods marker will be drawn without a hat.
ZOOMSPEED = 2
RARE_MAX_DISTANCE = 55000 # Maximum distance that a rare good will be considered as practical.
PF_Z_RANGE = 52
RR_LENGTH = 2000 # Length of RR line to draw.
SOLO_ASSUMED_RADIUS = 110 # Effective radius of a "sector" which contains only individual named stars.
SEARCH_SIZE_I = 5 # Radius of inner search circle icon.
SEARCH_SIZE_O = 8 # Radius of outer search circle icon.
S_S_EXT = 5 # Length of search circle lines.
# Global variables for controlling the base accuracy of the mouseover searches. Can maybe do away with these now the scaling works properly.
PSRACC = 6
POIACC = 6
RAREACC = 6
TOURISTACC = 6
PFACC = 6
FINDIVACC = 6
# Variables that control the z +/- and scaling when the buttons are pressed.
Z_MOVE_RATE = 100 # Z axis change. Could change this to a "z-slice-size" and adjust the various XXX_Z_RANGE appropriately to half the slice size.
S_MOVE_RATE = 2 # Scaling change.
# Read sectors file.
filename = 'seclist_ra.csv'
ha_sec_list = read_sectors_file(filename)
ha_sec_list.sort(key = lambda sector:sector.priority, reverse = True)
# Compile a list of known ha sector names.
known_ha_secs = []
for sector in ha_sec_list:
known_ha_secs.append(sector.name)
# Read poi file.
filename = 'poilist.csv'
poi_list = read_poi_file(filename)
### Read edsm locations file.
##filename = 'json edd landmarks.csv'
##edsm_list = read_edsm_file(filename)
# Read tourist file.
filename = 'tourist_3.csv'
tourist_list = read_tourist_file(filename)
# Read rare goods file.
filename = 'rares.csv'
rares_list = read_rares_file(filename)
# Read pulsars file.
filename = 'pulsars.csv'
pulsar_list = read_pulsars_file(filename)
# Read player factions file.
filename = 'pfac.csv'
player_list = read_players_file(filename)
# Read full individual stars file.
filename = 'findiv.csv'
findiv_list = read_findiv_file(filename)
# Main loop.
root = Tk()
root.title('Jackie\'s Map (v.' + version + ')')
mainapp = App(root)
root.mainloop()
|
KayJohnston/jackies-map
|
jmap3t.py
|
Python
|
bsd-3-clause
| 82,087
|
[
"Galaxy"
] |
641301f00a4499da0ef483fe2534796297708d50df8d8aad1222993f04f2f785
|
"""
Caffe network visualization: draw the NetParameter protobuffer.
.. note::
This requires pydot>=1.0.2, which is not included in requirements.txt since
it requires graphviz and other prerequisites outside the scope of the
Caffe.
"""
from apollocaffe.proto import caffe_pb2
import pydot
# Internal layer and blob styles.
LAYER_STYLE_DEFAULT = {'shape': 'record',
'fillcolor': '#6495ED',
'style': 'filled'}
NEURON_LAYER_STYLE = {'shape': 'record',
'fillcolor': '#90EE90',
'style': 'filled'}
BLOB_STYLE = {'shape': 'octagon',
'fillcolor': '#E0E0E0',
'style': 'filled'}
def get_pooling_types_dict():
"""Get dictionary mapping pooling type number to type name
"""
desc = caffe_pb2.PoolingParameter.PoolMethod.DESCRIPTOR
d = {}
for k, v in desc.values_by_name.items():
d[v.number] = k
return d
def get_edge_label(layer):
"""Define edge label based on layer type.
"""
if layer.type == 'Data':
edge_label = 'Batch ' + str(layer.data_param.batch_size)
elif layer.type == 'Convolution':
edge_label = str(layer.convolution_param.num_output)
elif layer.type == 'InnerProduct':
edge_label = str(layer.inner_product_param.num_output)
else:
edge_label = '""'
return edge_label
def get_layer_label(layer, rankdir):
"""Define node label based on layer type.
Parameters
----------
layer : ?
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
Returns
-------
string :
A label for the current layer
"""
if rankdir in ('TB', 'BT'):
# If graph orientation is vertical, horizontal space is free and
# vertical space is not; separate words with spaces
separator = ' '
else:
# If graph orientation is horizontal, vertical space is free and
# horizontal space is not; separate words with newlines
separator = r'\n'
if layer.type == 'Convolution':
# Outer double quotes needed or else colon characters don't parse
# properly
node_label = '"%s%s(%s)%skernel size: %d%sstride: %d%spad: %d"' %\
(layer.name,
separator,
layer.type,
separator,
layer.convolution_param.kernel_size,
separator,
layer.convolution_param.stride,
separator,
layer.convolution_param.pad)
elif layer.type == 'Pooling':
pooling_types_dict = get_pooling_types_dict()
node_label = '"%s%s(%s %s)%skernel size: %d%sstride: %d%spad: %d"' %\
(layer.name,
separator,
pooling_types_dict[layer.pooling_param.pool],
layer.type,
separator,
layer.pooling_param.kernel_size,
separator,
layer.pooling_param.stride,
separator,
layer.pooling_param.pad)
else:
node_label = '"%s%s(%s)"' % (layer.name, separator, layer.type)
return node_label
def choose_color_by_layertype(layertype):
"""Define colors for nodes based on the layer type.
"""
color = '#6495ED' # Default
if layertype == 'Convolution':
color = '#FF5050'
elif layertype == 'Pooling':
color = '#FF9900'
elif layertype == 'InnerProduct':
color = '#CC33FF'
return color
def get_pydot_graph(caffe_net, rankdir, label_edges=True):
"""Create a data structure which represents the `caffe_net`.
Parameters
----------
caffe_net : object
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
label_edges : boolean, optional
Label the edges (default is True).
Returns
-------
pydot graph object
"""
pydot_graph = pydot.Dot(caffe_net.name,
graph_type='digraph',
rankdir=rankdir)
pydot_nodes = {}
pydot_edges = []
for layer in caffe_net.layer:
node_label = get_layer_label(layer, rankdir)
node_name = "%s_%s" % (layer.name, layer.type)
if (len(layer.bottom) == 1 and len(layer.top) == 1 and
layer.bottom[0] == layer.top[0]):
# We have an in-place neuron layer.
pydot_nodes[node_name] = pydot.Node(node_label,
**NEURON_LAYER_STYLE)
else:
layer_style = LAYER_STYLE_DEFAULT
layer_style['fillcolor'] = choose_color_by_layertype(layer.type)
pydot_nodes[node_name] = pydot.Node(node_label, **layer_style)
for bottom_blob in layer.bottom:
pydot_nodes[bottom_blob + '_blob'] = pydot.Node('%s' % bottom_blob,
**BLOB_STYLE)
edge_label = '""'
pydot_edges.append({'src': bottom_blob + '_blob',
'dst': node_name,
'label': edge_label})
for top_blob in layer.top:
pydot_nodes[top_blob + '_blob'] = pydot.Node('%s' % (top_blob))
if label_edges:
edge_label = get_edge_label(layer)
else:
edge_label = '""'
pydot_edges.append({'src': node_name,
'dst': top_blob + '_blob',
'label': edge_label})
# Now, add the nodes and edges to the graph.
for node in pydot_nodes.values():
pydot_graph.add_node(node)
for edge in pydot_edges:
pydot_graph.add_edge(
pydot.Edge(pydot_nodes[edge['src']],
pydot_nodes[edge['dst']],
label=edge['label']))
return pydot_graph
def draw_net(caffe_net, rankdir, ext='png'):
"""Draws a caffe net and returns the image string encoded using the given
extension.
Parameters
----------
caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer.
ext : string, optional
The image extension (the default is 'png').
Returns
-------
string :
Postscript representation of the graph.
"""
return get_pydot_graph(caffe_net, rankdir).create(format=ext)
def draw_net_to_file(caffe_net, filename, rankdir='LR'):
"""Draws a caffe net, and saves it to file using the format given as the
file extension. Use '.raw' to output raw text that you can manually feed
to graphviz to draw graphs.
Parameters
----------
caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer.
filename : string
The path to a file where the networks visualization will be stored.
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
"""
ext = filename[filename.rfind('.')+1:]
with open(filename, 'wb') as fid:
fid.write(draw_net(caffe_net, rankdir, ext))
|
pcmoritz/Strada.jl
|
deps/src/caffe/python/apollocaffe/utils/draw.py
|
Python
|
bsd-2-clause
| 7,124
|
[
"NEURON"
] |
64d42afb15a701cc2733582c14a75d66b8520ff1cdc70434af89159f3012ec0e
|
#!/usr/bin/env python
"""
@author: Benjamin Chretien
"""
import math
import numpy as np
from mayavi import mlab
j = complex(0,1)
min_x = -10.
max_x = 10.
min_y = -8.
max_y = 8.
root0 = 1.
lamda = 0.01/abs(max_x)
step_size = 0.1
def f_evolution_element(x, y):
root_real = 2.
roots = np.zeros((3,3))
if y < 0:
dP = np.poly([root0, root_real + y * j, root_real - y * j])
elif y > 0:
dP = np.poly([root0, root_real+y, root_real-y])
else:
dP = np.poly([root0, root_real, -root_real])
P = lamda*np.polyint(dP)
cplx_roots = np.roots(dP)
roots[:,0] = [_.real for _ in cplx_roots if _.real < max_x and _.real > min_x]
roots[:,0] = np.sort(roots[:,0])
z = np.polyval(P, x)
for i in xrange(roots.shape[0]):
roots[i,1] = y
roots[i,2] = np.polyval(P, roots[i,0])
return z,roots
def f_evolution(x, y):
z = np.zeros((x.size, y.size))
root_real = 2.
roots = np.zeros((3,y.size,3))
for k in xrange(y.size):
if y[k] < 0:
dP = np.poly([root0, root_real + y[k] * j, root_real - y[k] * j])
elif y[k] > 0:
dP = np.poly([root0, root_real + y[k], root_real-y[k]])
else:
dP = np.poly([root0, root_real, -root_real])
P = lamda*np.polyint(dP)
cplx_roots = np.roots(dP)
roots[:,k,0] = [_.real for _ in cplx_roots if _.real < max_x and _.real > min_x]
roots[:,k,0] = np.sort(roots[:,k,0])
for i in xrange(x.size):
z[i,k] = np.polyval(P, x[i])
for i in xrange(roots.shape[0]):
roots[i,k,1] = y[k]
roots[i,k,2] = np.polyval(P, roots[i,k,0])
return z,roots
# Grid
X = np.arange(min_x, max_x + step_size, step_size)
Y = np.arange(min_y, max_y + step_size, step_size)
# Compute data
Z_evol,roots_evol = f_evolution(X,Y)
fig = mlab.figure('Complex roots', bgcolor=(0, 0, 0), size=(800, 600))
# Clamp colors to get a better gradient near the minimum
vmin_1 = np.min(Z_evol[:,0:10])
vmax_1 = vmin_1 + 0.02*(np.max(Z_evol[:,0:10]) - vmin_1)
# Create the surface
s_poly = mlab.surf(X[:],Y[:],Z_evol[:,:], colormap='jet',
representation='surface',
vmin = vmin_1, vmax = vmax_1,
figure=fig)
# Real root
x = roots_evol[0,0:math.floor(len(Y)/2)+1,0].flatten(0)
y = roots_evol[0,0:math.floor(len(Y)/2)+1,1].flatten(0)
z = roots_evol[0,0:math.floor(len(Y)/2)+1,2].flatten(0)
trajectory1 = mlab.plot3d(x[:], y[:], z[:],
color=(1,0,0), tube_radius=None)
# Real part of conjugate root
x = roots_evol[2,0:math.floor(len(Y)/2)+1,0].flatten(0)
y = roots_evol[2,0:math.floor(len(Y)/2)+1,1].flatten(0)
z = roots_evol[2,0:math.floor(len(Y)/2)+1,2].flatten(0)
trajectory2 = mlab.plot3d(x[:], y[:], z[:],
color=(1,1,0), tube_radius=None)
# Real root
x = roots_evol[2,math.floor(len(Y)/2):-1,0].flatten(0)
y = roots_evol[2,math.floor(len(Y)/2):-1,1].flatten(0)
z = roots_evol[2,math.floor(len(Y)/2):-1,2].flatten(0)
trajectory3 = mlab.plot3d(x[:], y[:], z[:],
color=(1,1,0), tube_radius=None)
# Real root
x = roots_evol[0,math.floor(len(Y)/2):-1,0].flatten(0)
y = roots_evol[0,math.floor(len(Y)/2):-1,1].flatten(0)
z = roots_evol[0,math.floor(len(Y)/2):-1,2].flatten(0)
trajectory4 = mlab.plot3d(x[:], y[:], z[:],
color=(1,0,0), tube_radius=None)
# Real root
x = roots_evol[1,math.floor(len(Y)/2):-1,0].flatten(0)
y = roots_evol[1,math.floor(len(Y)/2):-1,1].flatten(0)
z = roots_evol[1,math.floor(len(Y)/2):-1,2].flatten(0)
trajectory5 = mlab.plot3d(x[:], y[:], z[:],
color=(1,1,1), tube_radius=None)
# Separation y = 0
x = X
y = [0 for _ in xrange(len(x))]
z = Z_evol[:,len(Y)/2]
trajectory6 = mlab.plot3d(x[:-2], y[:-2], z[:-2],
color=(1,1,1), tube_radius=None,
opacity=0.5)
# Create the axes
mlab.axes(s_poly, color=(.7, .7, .7),
xlabel='x', ylabel='y < 0: Imag(conj_root)\ny > 0: +/- real root', zlabel='P(x)')
# Activate antialiasing
#fig.scene.render_window.aa_frames = 8
# Show the result
mlab.show()
|
bchretien/Python-sandbox
|
src/poly_surface_extrema.py
|
Python
|
bsd-2-clause
| 4,053
|
[
"Mayavi"
] |
029db274f15e54bf79cb99f3b34b9730c5b83b7eddb56c6a9a151f79ec2f567a
|
# Copyright (C) 2009 LibreSoft
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Carlos Garcia Campos <carlosgc@libresoft.es>
if __name__ == '__main__':
import sys
sys.path.insert(0, "../")
from pycvsanaly2.AsyncQueue import AsyncQueue, TimeOut
import repositoryhandler.backends as rh
import threading
class JobPool(object):
POOL_SIZE = 5
def __init__(self, repo, repo_uri, jobs_done=True, poolsize=POOL_SIZE,
queuesize=None):
self.jobs_done = jobs_done
self.queue = AsyncQueue(queuesize or 0)
if self.jobs_done:
self.done = AsyncQueue()
for i in range(poolsize):
rep = repo.copy()
thread = threading.Thread(target=self._job_thread,
args=(rep, repo_uri))
thread.setDaemon(True)
thread.start()
def _job_thread(self, repo, repo_uri):
while True:
job = self.queue.get()
job.run(repo, repo_uri)
self.queue.done()
if self.jobs_done:
self.done.put(job)
def push(self, job):
self.queue.put(job)
# Default timeout is 5 minutes
def get_next_done(self, timeout=(5 * 60)):
if not self.jobs_done:
return None
try:
job = self.done.get(timeout)
self.done.done()
return job
except TimeOut:
return None
def get_next_done_unlocked(self):
if not self.jobs_done:
return None
if self.done.empty_unlocked():
return None
return self.done.get_unlocked()
def join(self):
self.queue.join()
class Job(object):
def __init__(self):
self.failed = False
def run(self, repo, repo_uri):
raise NotImplementedError
if __name__ == '__main__':
class JobLastRev(Job):
def __init__(self, module):
self.module = module
def run(self, repo, repo_uri):
uri = repo_uri + self.module
print "%s -> %s" % (uri, repo.get_last_revision(uri))
repo_uri = 'https://svn.forge.morfeo-project.org/svn/libresoft-tools/'
modules = ['cvsanaly', 'octopus', 'cmetrics', 'repositoryhandler',
'retrieval_system', 'bicho', 'pandaRest']
repo = rh.create_repository('svn', repo_uri)
repo_uri = 'https://svn.forge.morfeo-project.org/svn/libresoft-tools/'
pool = JobPool(repo, repo_uri, False)
for module in modules:
job = JobLastRev(module)
pool.push(job)
pool.join()
|
xybai/MininGit
|
pycvsanaly2/extensions/Jobs.py
|
Python
|
gpl-2.0
| 3,250
|
[
"Octopus"
] |
583d7dfa7ee77f8516847c99a9801b5ba8f6f373d656555bb8ec793567c004ce
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Discogs album search support to the autotagger. Requires the
discogs-client library.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import beets.ui
from beets import logging
from beets import config
from beets.autotag.hooks import AlbumInfo, TrackInfo, Distance
from beets.plugins import BeetsPlugin
from beets.util import confit
from discogs_client import Release, Client
from discogs_client.exceptions import DiscogsAPIError
from requests.exceptions import ConnectionError
import beets
import re
import time
import json
import socket
import httplib
import os
# Silence spurious INFO log lines generated by urllib3.
urllib3_logger = logging.getLogger('requests.packages.urllib3')
urllib3_logger.setLevel(logging.CRITICAL)
USER_AGENT = u'beets/{0} +http://beets.io/'.format(beets.__version__)
# Exceptions that discogs_client should really handle but does not.
CONNECTION_ERRORS = (ConnectionError, socket.error, httplib.HTTPException,
ValueError, # JSON decoding raises a ValueError.
DiscogsAPIError)
class DiscogsPlugin(BeetsPlugin):
def __init__(self):
super(DiscogsPlugin, self).__init__()
self.config.add({
'apikey': 'rAzVUQYRaoFjeBjyWuWZ',
'apisecret': 'plxtUTqoCzwxZpqdPysCwGuBSmZNdZVy',
'tokenfile': 'discogs_token.json',
'source_weight': 0.5,
})
self.config['apikey'].redact = True
self.config['apisecret'].redact = True
self.discogs_client = None
self.register_listener('import_begin', self.setup)
def setup(self, session=None):
"""Create the `discogs_client` field. Authenticate if necessary.
"""
c_key = self.config['apikey'].get(unicode)
c_secret = self.config['apisecret'].get(unicode)
# Get the OAuth token from a file or log in.
try:
with open(self._tokenfile()) as f:
tokendata = json.load(f)
except IOError:
# No token yet. Generate one.
token, secret = self.authenticate(c_key, c_secret)
else:
token = tokendata['token']
secret = tokendata['secret']
self.discogs_client = Client(USER_AGENT, c_key, c_secret,
token, secret)
def reset_auth(self):
"""Delete toke file & redo the auth steps.
"""
os.remove(self._tokenfile())
self.setup()
def _tokenfile(self):
"""Get the path to the JSON file for storing the OAuth token.
"""
return self.config['tokenfile'].get(confit.Filename(in_app_dir=True))
def authenticate(self, c_key, c_secret):
# Get the link for the OAuth page.
auth_client = Client(USER_AGENT, c_key, c_secret)
try:
_, _, url = auth_client.get_authorize_url()
except CONNECTION_ERRORS as e:
self._log.debug('connection error: {0}', e)
raise beets.ui.UserError('communication with Discogs failed')
beets.ui.print_("To authenticate with Discogs, visit:")
beets.ui.print_(url)
# Ask for the code and validate it.
code = beets.ui.input_("Enter the code:")
try:
token, secret = auth_client.get_access_token(code)
except DiscogsAPIError:
raise beets.ui.UserError('Discogs authorization failed')
except CONNECTION_ERRORS as e:
self._log.debug(u'connection error: {0}', e)
raise beets.ui.UserError('Discogs token request failed')
# Save the token for later use.
self._log.debug('Discogs token {0}, secret {1}', token, secret)
with open(self._tokenfile(), 'w') as f:
json.dump({'token': token, 'secret': secret}, f)
return token, secret
def album_distance(self, items, album_info, mapping):
"""Returns the album distance.
"""
dist = Distance()
if album_info.data_source == 'Discogs':
dist.add('source', self.config['source_weight'].as_number())
return dist
def candidates(self, items, artist, album, va_likely):
"""Returns a list of AlbumInfo objects for discogs search results
matching an album and artist (if not various).
"""
if not self.discogs_client:
return
if va_likely:
query = album
else:
query = '%s %s' % (artist, album)
try:
return self.get_albums(query)
except DiscogsAPIError as e:
self._log.debug(u'API Error: {0} (query: {1})', e, query)
if e.status_code == 401:
self.reset_auth()
return self.candidates(items, artist, album, va_likely)
else:
return []
except CONNECTION_ERRORS:
self._log.debug('Connection error in album search', exc_info=True)
return []
def album_for_id(self, album_id):
"""Fetches an album by its Discogs ID and returns an AlbumInfo object
or None if the album is not found.
"""
if not self.discogs_client:
return
self._log.debug(u'Searching for release {0}', album_id)
# Discogs-IDs are simple integers. We only look for those at the end
# of an input string as to avoid confusion with other metadata plugins.
# An optional bracket can follow the integer, as this is how discogs
# displays the release ID on its webpage.
match = re.search(r'(^|\[*r|discogs\.com/.+/release/)(\d+)($|\])',
album_id)
if not match:
return None
result = Release(self.discogs_client, {'id': int(match.group(2))})
# Try to obtain title to verify that we indeed have a valid Release
try:
getattr(result, 'title')
except DiscogsAPIError as e:
if e.status_code != 404:
self._log.debug(u'API Error: {0} (query: {1})', e, result._uri)
if e.status_code == 401:
self.reset_auth()
return self.album_for_id(album_id)
return None
except CONNECTION_ERRORS:
self._log.debug('Connection error in album lookup', exc_info=True)
return None
return self.get_album_info(result)
def get_albums(self, query):
"""Returns a list of AlbumInfo objects for a discogs search query.
"""
# Strip non-word characters from query. Things like "!" and "-" can
# cause a query to return no results, even if they match the artist or
# album title. Use `re.UNICODE` flag to avoid stripping non-english
# word characters.
# TEMPORARY: Encode as ASCII to work around a bug:
# https://github.com/beetbox/beets/issues/1051
# When the library is fixed, we should encode as UTF-8.
query = re.sub(r'(?u)\W+', ' ', query).encode('ascii', "replace")
# Strip medium information from query, Things like "CD1" and "disk 1"
# can also negate an otherwise positive result.
query = re.sub(r'(?i)\b(CD|disc)\s*\d+', '', query)
try:
releases = self.discogs_client.search(query,
type='release').page(1)
except CONNECTION_ERRORS:
self._log.debug("Communication error while searching for {0!r}",
query, exc_info=True)
return []
return [self.get_album_info(release) for release in releases[:5]]
def get_album_info(self, result):
"""Returns an AlbumInfo object for a discogs Release object.
"""
artist, artist_id = self.get_artist([a.data for a in result.artists])
album = re.sub(r' +', ' ', result.title)
album_id = result.data['id']
# Use `.data` to access the tracklist directly instead of the
# convenient `.tracklist` property, which will strip out useful artist
# information and leave us with skeleton `Artist` objects that will
# each make an API call just to get the same data back.
tracks = self.get_tracks(result.data['tracklist'])
albumtype = ', '.join(
result.data['formats'][0].get('descriptions', [])) or None
va = result.data['artists'][0]['name'].lower() == 'various'
if va:
artist = config['va_name'].get(unicode)
year = result.data['year']
label = result.data['labels'][0]['name']
mediums = len(set(t.medium for t in tracks))
catalogno = result.data['labels'][0]['catno']
if catalogno == 'none':
catalogno = None
country = result.data.get('country')
media = result.data['formats'][0]['name']
data_url = result.data['uri']
return AlbumInfo(album, album_id, artist, artist_id, tracks, asin=None,
albumtype=albumtype, va=va, year=year, month=None,
day=None, label=label, mediums=mediums,
artist_sort=None, releasegroup_id=None,
catalognum=catalogno, script=None, language=None,
country=country, albumstatus=None, media=media,
albumdisambig=None, artist_credit=None,
original_year=None, original_month=None,
original_day=None, data_source='Discogs',
data_url=data_url)
def get_artist(self, artists):
"""Returns an artist string (all artists) and an artist_id (the main
artist) for a list of discogs album or track artists.
"""
artist_id = None
bits = []
for i, artist in enumerate(artists):
if not artist_id:
artist_id = artist['id']
name = artist['name']
# Strip disambiguation number.
name = re.sub(r' \(\d+\)$', '', name)
# Move articles to the front.
name = re.sub(r'(?i)^(.*?), (a|an|the)$', r'\2 \1', name)
bits.append(name)
if artist['join'] and i < len(artists) - 1:
bits.append(artist['join'])
artist = ' '.join(bits).replace(' ,', ',') or None
return artist, artist_id
def get_tracks(self, tracklist):
"""Returns a list of TrackInfo objects for a discogs tracklist.
"""
tracks = []
index_tracks = {}
index = 0
for track in tracklist:
# Only real tracks have `position`. Otherwise, it's an index track.
if track['position']:
index += 1
tracks.append(self.get_track_info(track, index))
else:
index_tracks[index + 1] = track['title']
# Fix up medium and medium_index for each track. Discogs position is
# unreliable, but tracks are in order.
medium = None
medium_count, index_count = 0, 0
for track in tracks:
# Handle special case where a different medium does not indicate a
# new disc, when there is no medium_index and the ordinal of medium
# is not sequential. For example, I, II, III, IV, V. Assume these
# are the track index, not the medium.
medium_is_index = track.medium and not track.medium_index and (
len(track.medium) != 1 or
ord(track.medium) - 64 != medium_count + 1
)
if not medium_is_index and medium != track.medium:
# Increment medium_count and reset index_count when medium
# changes.
medium = track.medium
medium_count += 1
index_count = 0
index_count += 1
track.medium, track.medium_index = medium_count, index_count
# Get `disctitle` from Discogs index tracks. Assume that an index track
# before the first track of each medium is a disc title.
for track in tracks:
if track.medium_index == 1:
if track.index in index_tracks:
disctitle = index_tracks[track.index]
else:
disctitle = None
track.disctitle = disctitle
return tracks
def get_track_info(self, track, index):
"""Returns a TrackInfo object for a discogs track.
"""
title = track['title']
track_id = None
medium, medium_index = self.get_track_index(track['position'])
artist, artist_id = self.get_artist(track.get('artists', []))
length = self.get_track_length(track['duration'])
return TrackInfo(title, track_id, artist, artist_id, length, index,
medium, medium_index, artist_sort=None,
disctitle=None, artist_credit=None)
def get_track_index(self, position):
"""Returns the medium and medium index for a discogs track position.
"""
# medium_index is a number at the end of position. medium is everything
# else. E.g. (A)(1), (Side A, Track )(1), (A)(), ()(1), etc.
match = re.match(r'^(.*?)(\d*)$', position.upper())
if match:
medium, index = match.groups()
else:
self._log.debug(u'Invalid position: {0}', position)
medium = index = None
return medium or None, index or None
def get_track_length(self, duration):
"""Returns the track length in seconds for a discogs duration.
"""
try:
length = time.strptime(duration, '%M:%S')
except ValueError:
return None
return length.tm_min * 60 + length.tm_sec
|
parapente/beets
|
beetsplug/discogs.py
|
Python
|
mit
| 14,490
|
[
"VisIt"
] |
97ec70e67d22f333ba194407b3bed530931234483850d6f29f3b20ba53c39e26
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2013 Brian Douglass bhdouglass@gmail.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
import sys
import gettext
from gettext import gettext as _
gettext.textdomain('remindor-common')
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtUiTools import *
import logging
logger = logging.getLogger('remindor_qt')
from remindor_qt.AboutDialog import AboutDialog
from remindor_qt.PreferencesDialog import PreferencesDialog
from remindor_qt.QuickDialog import QuickDialog
from remindor_qt.ReminderDialog import ReminderDialog
from remindor_qt.SimpleDialog import SimpleDialog
from remindor_qt.scheduler_qt import SchedulerQt
from remindor_qt import helpers
from remindor_common.constants import *
from remindor_common.helpers import ManageWindowInfo
from remindor_common.threads import BlogReader
from remindor_common import database as db
class RemindorQtWindow(QMainWindow): #TODO: add font awesome as fallback icons
setup_schedule = True
ok_to_close = False
def __init__(self, dbus_service = None, parent = None):
super(RemindorQtWindow,self).__init__(parent)
self.dbus_service = dbus_service
helpers.setup_ui(self, "RemindorQtWindow.ui", True)
self.resize(700, 300)
self.action_add = self.findChild(QAction, "action_add")
self.action_quick_add = self.findChild(QAction, "action_quick_add")
self.action_edit = self.findChild(QAction, "action_edit")
self.action_postpone = self.findChild(QAction, "action_postpone")
self.action_delete = self.findChild(QAction, "action_delete")
self.action_preferences = self.findChild(QAction, "action_preferences")
self.action_news = self.findChild(QAction, "action_news")
self.action_help = self.findChild(QAction, "action_help")
self.action_close = self.findChild(QAction, "action_close")
self.action_quit = self.findChild(QAction, "action_quit")
self.action_refresh = self.findChild(QAction, "action_refresh")
self.action_clear_icon = self.findChild(QAction, "action_clear_icon")
self.action_bugs = self.findChild(QAction, "action_bugs")
self.action_request = self.findChild(QAction, "action_request")
self.action_translate = self.findChild(QAction, "action_translate")
self.action_donate = self.findChild(QAction, "action_donate")
self.action_ask = self.findChild(QAction, "action_ask")
self.action_website = self.findChild(QAction, "action_website")
self.action_about = self.findChild(QAction, "action_about")
self.action_stop = self.findChild(QAction, "action_stop")
self.translate()
self.info = ManageWindowInfo(helpers.database_file())
self.active_icon = QIcon(helpers.get_data_file("media", "remindor-qt-active.svg"))
self.app_icon = QIcon(helpers.get_data_file("media", "remindor-qt.svg"))
self.attention_icon = QIcon.fromTheme("remindor-qt-attention", QIcon(helpers.get_data_file("media", "remindor-qt-attention.svg")))
self.tray_icons = [QIcon.fromTheme("remindor-qt-active", self.active_icon),
self.active_icon,
QIcon(helpers.get_data_file("media", "remindor-qt-active_dark.svg")),
QIcon.fromTheme("remindor-qt", self.app_icon)]
self.reminder_tree = self.findChild(QTreeWidget, "reminder_tree")
self.reminder_tree.setColumnWidth(0, 200)
edit = QAction(QIcon.fromTheme("gtk-edit", QIcon(":/icons/edit.png")), _("Edit"), self)
edit.triggered.connect(self.on_action_edit_triggered)
self.reminder_tree.addAction(edit)
postpone = QAction(QIcon.fromTheme("go-jump", QIcon(":/icons/postpone.png")), _("Postpone"), self)
postpone.triggered.connect(self.on_action_postpone_triggered)
self.reminder_tree.addAction(postpone)
delete = QAction(QIcon.fromTheme("edit-delete", QIcon(":/icons/delete.png")), _("Delete"), self)
delete.triggered.connect(self.on_action_delete_triggered)
self.reminder_tree.addAction(delete)
self.news_action = self.findChild(QAction, "action_news")
self.tray_menu = QMenu()
self.tray_menu.addAction(QIcon.fromTheme("add", QIcon(":/icons/add.png")), _("Add"), self, SLOT("on_action_add_triggered()"))
self.tray_menu.addAction(QIcon.fromTheme("media-playback-start", QIcon(":/icons/wand.png")), _("Simple Add"), self, SLOT("on_action_simple_add_triggered()"))
self.tray_menu.addAction(QIcon.fromTheme("media-skip-forward", QIcon(":/icons/quick.png")), _("Quick Add"), self, SLOT("on_action_quick_add_triggered()"))
self.tray_menu.addAction(QIcon.fromTheme("media-playback-stop", QIcon(":/icons/delete.png")), _("Stop Sound"), self, SLOT("on_action_stop_triggered()"))
self.tray_menu.addAction(QIcon.fromTheme("stock_properties", QIcon(":/icons/manage.png")), _("Manage"), self, SLOT("show()"))
self.tray_menu.addAction(QIcon.fromTheme("exit", QIcon(":/icons/quit.png")), _("Quit"), self, SLOT("on_action_quit_triggered()")) #TODO: change this when reimplementing x-close button
self.tray_icon = QSystemTrayIcon(self.tray_icons[self.info.indicator_icon], self)
self.tray_icon.setContextMenu(self.tray_menu)
self.tray_icon.show()
self.tray_icon.activated.connect(self.tray_activated)
self.scheduler = SchedulerQt(self.tray_icon, self.attention_icon, self.update_slot, helpers.database_file())
if not self.dbus_service == None:
self.scheduler.add_dbus_service(self.dbus_service)
self.update()
self.updater = QTimer(self)
self.updater.setInterval(1000 * 60 * 60 * 6) #update everything every 1/4 day
self.updater.timeout.connect(self.update_schedule)
b = BlogReader(rssfeed, helpers.database_file())
b.start()
for reminder in self.info.missed_reminders:
self.scheduler.run_alarm(reminder)
def translate(self):
self.setWindowTitle("Manage Reminders")
self.action_add.setText(_("Add"))
self.action_quick_add.setText(_("Quick Add"))
self.action_edit.setText(_("Edit"))
self.action_postpone.setText(_("Postpone"))
self.action_delete.setText(_("Delete"))
self.action_preferences.setText(_("Preferences"))
self.action_news.setText(_("News"))
self.action_help.setText(_("Help"))
self.action_close.setText(_("Close"))
self.action_quit.setText(_("Quit"))
self.action_refresh.setText(_("Refresh"))
self.action_clear_icon.setText(_("Clear Icon"))
self.action_bugs.setText(_("Submit Bugs"))
self.action_request.setText(_("Request Feature"))
self.action_translate.setText(_("Help Translate"))
self.action_donate.setText(_("Donate"))
self.action_ask.setText(_("Ask a Question"))
self.action_website.setText(_("Website"))
self.action_about.setText(_("About"))
self.action_stop.setText(_("Stop Sound"))
@Slot()
def tray_activated(self, reason):
self.tray_icon.setIcon(self.tray_icons[self.info.indicator_icon])
if reason == QSystemTrayIcon.Trigger:
self.show()
elif reason == QSystemTrayIcon.MiddleClick:
self.on_action_add_triggered()
@Slot()
def closeEvent(self, event):
if self.ok_to_close:
sys.exit(0)
event.accept()
else:
event.ignore()
self.hide()
@Slot()
def on_action_add_triggered(self):
dialog = ReminderDialog(self)
dialog.added.connect(self.add_to_schedule)
dialog.exec_()
@Slot()
def on_action_quick_add_triggered(self):
dialog = QuickDialog(self)
dialog.added.connect(self.add_to_schedule)
dialog.exec_()
@Slot()
def on_action_simple_add_triggered(self):
dialog = SimpleDialog(self)
dialog.added.connect(self.add_to_schedule)
dialog.exec_()
@Slot()
def on_action_edit_triggered(self):
(selected, is_parent) = self.get_selected()
if not is_parent:
dialog = ReminderDialog(self)
dialog.edit(selected)
dialog.added.connect(self.add_to_schedule)
dialog.exec_()
@Slot()
def on_action_postpone_triggered(self):
(selected, is_parent) = self.get_selected()
if not is_parent:
if self.info.postpone(selected):
message = _("Sorry, you cannot postpone a repeating time.")
QMessageBox.information(self, _("Postpone"), message, QMessageBox.Ok)
self.update()
@Slot()
def on_action_delete_triggered(self):
(selected, is_parent) = self.get_selected()
if not is_parent:
self.info.delete(selected)
self.update()
@Slot()
def on_action_preferences_triggered(self):
dialog = PreferencesDialog(self)
dialog.update.connect(self.update)
dialog.exec_()
@Slot()
def on_action_news_triggered(self):
self.news_action.setText(_("News"))
helpers.show_uri(blogsite)
@Slot()
def on_action_help_triggered(self):
helpers.show_html_help("index")
@Slot()
def on_action_close_triggered(self):
self.hide()
@Slot()
def on_action_quit_triggered(self):
self.ok_to_close = True
self.close()
@Slot()
def on_action_refresh_triggered(self):
self.update_schedule()
@Slot()
def on_action_clear_icon_triggered(self):
self.tray_icon.setIcon(self.tray_icons[self.info.indicator_icon])
if self.dbus_service != None:
logger.debug("emmiting dbus active signal")
self.dbus_service.emitActive()
@Slot()
def on_action_bugs_triggered(self):
helpers.show_uri(bugsite_qt)
@Slot()
def on_action_request_triggered(self):
helpers.show_uri(featuresite_qt)
@Slot()
def on_action_translate_triggered(self):
helpers.show_uri(translatesite)
@Slot()
def on_action_donate_triggered(self):
helpers.show_uri(donatesite)
@Slot()
def on_action_ask_triggered(self):
helpers.show_uri(questionsite_qt)
@Slot()
def on_action_website_triggered(self):
helpers.show_uri(website_qt)
@Slot()
def on_action_about_triggered(self):
dialog = AboutDialog(self)
dialog.show()
@Slot()
def on_action_stop_triggered(self):
logger.debug("stopping sound")
self.scheduler.stop_sound()
self.on_action_clear_icon_triggered()
@Slot()
def add_to_schedule(self, id):
self.scheduler.add_reminder(id)
self.update()
@Slot()
def update(self, update_icon = True):
logger.debug("update")
if self.setup_schedule:
self.info.update(self.scheduler)
self.setup_schedule = False
else:
self.info.update(None)
if self.info.show_news == 1 and self.info.new_news == 1:
self.news_action.setText(_("New News"))
else:
self.news_action.setText(_("News"))
if self.info.hide_indicator:
if self.tray_icon.isVisible():
self.tray_icon.hide()
else:
if not self.tray_icon.isVisible():
self.tray_icon.show()
if update_icon:
self.tray_icon.setIcon(self.tray_icons[self.info.indicator_icon])
logger.debug("update: setting up headers")
self.reminder_tree.clear()
self.today = QTreeWidgetItem(self.reminder_tree, [_("Today's Reminders"), "", "", "", ""])
today_brush = QBrush(Qt.SolidPattern)
today_brush.setColor(QColor(self.info.today_color[0], self.info.today_color[1], self.info.today_color[2]))
for i in range(4):
self.today.setBackground(i, today_brush)
self.reminder_tree.addTopLevelItem(self.today)
self.future = QTreeWidgetItem(self.reminder_tree, [_("Future Reminders"), "", "", "", ""])
future_brush = QBrush(Qt.SolidPattern)
future_brush.setColor(QColor(self.info.future_color[0], self.info.future_color[1], self.info.future_color[2]))
for i in range(4):
self.future.setBackground(i, future_brush)
self.reminder_tree.addTopLevelItem(self.future)
self.past = QTreeWidgetItem(self.reminder_tree, [_("Past Reminders"), "", "", "", ""])
past_brush = QBrush(Qt.SolidPattern)
past_brush.setColor(QColor(self.info.past_color[0], self.info.past_color[1], self.info.past_color[2]))
for i in range(4):
self.past.setBackground(i, past_brush)
self.reminder_tree.addTopLevelItem(self.past)
for reminder in self.info.reminder_list:
parent = self.past
if reminder.parent == self.info.today:
parent = self.today
elif reminder.parent == self.info.future:
parent = self.future
temp = QTreeWidgetItem(parent, reminder.qt())
for i in range(4):
temp.setToolTip(i, reminder.tooltip)
self.reminder_tree.expandAll()
logger.debug("update: done setting up tree")
return True
@Slot()
def update_slot(self):
self.update(False)
return True
@Slot()
def update_schedule(self):
self.setup_schedule = True
self.scheduler.clear_schedule()
logger.debug("updating the whole schedule")
self.update()
return True
def get_selected(self):
selected_items = self.reminder_tree.selectedItems()
selected = selected_items[0]
is_parent = False
text = selected.text(0)
if text == self.today.text(0) or text == self.future.text(0) or text == self.past.text(0):
if selected.text(4) == "": #id is "" only on the 3 parents
is_parent = True
if is_parent:
return -1, is_parent
else:
return int(selected.text(4)), is_parent
def dbus_receiver(self, command):
logger.debug("received " + command + " signal from dbus")
if command == "update":
self.update_schedule()
elif command == "stop":
logger.debug("dbus: stopping sound...")
self.on_action_stop_triggered()
elif command == "manage":
self.show()
elif command == "close":
self.ok_to_close = True
self.close()
elif command == "attention" or command == "active":
pass #don't do anything, we probably sent this signal
else:
logger.debug("unrecognized dbus command: " + command)
|
bhdouglass/remindor-qt
|
remindor_qt/RemindorQtWindow.py
|
Python
|
gpl-3.0
| 15,474
|
[
"Brian"
] |
f4f31d3a7e5033924261d60ad63f7748bd3aad3bb72a06789806e6a60dbf2107
|
# ******************************************************************************
# Copyright 2014-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""
This test compares the NEON GRU layer against a numpy reference GRU
implementation and compares the NEON GRU bprop deltas to the gradients
estimated by finite differences.
The numpy reference GRU contains static methods for forward pass
and backward pass.
It runs a SINGLE layer of GRU and compare numerical values
The following are made sure to be the same in both GRUs
- initial h values (all zeros)
- initial W, b (ones or random values)
- input data (random data matrix)
- input error (random data matrix)
- the data shape inside GRU_ref is seq_len, 1, input_size.
Need transpose
- the data shape inside GRU (neon) is is batch_size, seq_len * batch_size
"""
import itertools as itt
import numpy as np
from neon import NervanaObject, logger as neon_logger
from neon.initializers.initializer import Constant, Gaussian
from neon.layers import GRU
from neon.transforms import Logistic, Tanh
from neon.layers.container import DeltasTree
from gru_ref import GRU as RefGRU
from utils import allclose_with_out
def pytest_generate_tests(metafunc):
bsz_rng = [1]
if 'refgruargs' in metafunc.fixturenames:
fargs = []
if metafunc.config.option.all:
seq_rng = [2, 3, 4]
inp_rng = [3, 5, 10]
out_rng = [3, 5, 10]
else:
seq_rng = [3]
inp_rng = [5]
out_rng = [10]
fargs = itt.product(seq_rng, inp_rng, out_rng, bsz_rng)
metafunc.parametrize('refgruargs', fargs)
if 'gradgruargs' in metafunc.fixturenames:
fargs = []
if metafunc.config.option.all:
seq_rng = [2, 3]
inp_rng = [5, 10]
out_rng = [3, 5, 10]
else:
seq_rng = [3]
inp_rng = [5]
out_rng = [10]
fargs = itt.product(seq_rng, inp_rng, out_rng, bsz_rng)
metafunc.parametrize('gradgruargs', fargs)
def test_ref_compare_ones(backend_default, refgruargs):
# run comparison with reference code
# for all ones init
seq_len, input_size, hidden_size, batch_size = refgruargs
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
check_gru(seq_len, input_size, hidden_size,
batch_size, Constant(val=1.0), [1.0, 0.0])
def test_ref_compare_rand(backend_default, refgruargs):
# run comparison with reference code
# for all ones init
seq_len, input_size, hidden_size, batch_size = refgruargs
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
check_gru(seq_len, input_size, hidden_size, batch_size,
Gaussian())
def test_ref_compare_rand_init_state(backend_default, refgruargs):
seq_len, input_size, hidden_size, batch_size = refgruargs
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
check_gru(seq_len, input_size, hidden_size, batch_size,
Gaussian(), add_init_state=True)
# compare neon GRU to reference GRU implementation
def check_gru(seq_len, input_size, hidden_size,
batch_size, init_func, inp_moms=[0.0, 1.0], add_init_state=False):
# init_func is the initializer for the model params
# inp_moms is the [ mean, std dev] of the random input
input_shape = (input_size, seq_len * batch_size)
output_shape = (hidden_size, seq_len * batch_size)
slice_shape = (hidden_size, batch_size)
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
# neon GRU
gru = GRU(hidden_size,
init_func,
activation=Tanh(),
gate_activation=Logistic())
# generate random input tensor
inp = np.random.rand(*input_shape) * inp_moms[1] + inp_moms[0]
inp_dev = gru.be.array(inp)
# generate random deltas tensor
deltas = np.random.randn(*output_shape)
# run neon fprop
gru.configure((input_size, seq_len))
gru.prev_layer = True
gru.allocate()
test_buffer = DeltasTree()
gru.allocate_deltas(test_buffer)
test_buffer.allocate_buffers()
gru.set_deltas(test_buffer)
if add_init_state:
init_state = np.random.rand(*slice_shape)*inp_moms[1] + inp_moms[0]
init_state_dev = gru.be.array(init_state)
gru.fprop(inp_dev, init_state=init_state_dev)
else:
gru.fprop(inp_dev)
# reference numpy GRU
gru_ref = RefGRU(input_size, hidden_size)
WGRU = gru_ref.weights
# make ref weights and biases the same with neon model
r_range = list(range(hidden_size))
z_range = list(range(hidden_size, hidden_size * 2))
c_range = list(range(hidden_size * 2, hidden_size * 3))
WGRU[gru_ref.weights_ind_br][:] = gru.b.get()[r_range]
WGRU[gru_ref.weights_ind_bz][:] = gru.b.get()[z_range]
WGRU[gru_ref.weights_ind_bc][:] = gru.b.get()[c_range]
WGRU[gru_ref.weights_ind_Wxr][:] = gru.W_input.get()[r_range]
WGRU[gru_ref.weights_ind_Wxz][:] = gru.W_input.get()[z_range]
WGRU[gru_ref.weights_ind_Wxc][:] = gru.W_input.get()[c_range]
WGRU[gru_ref.weights_ind_Rhr][:] = gru.W_recur.get()[r_range]
WGRU[gru_ref.weights_ind_Rhz][:] = gru.W_recur.get()[z_range]
WGRU[gru_ref.weights_ind_Rhc][:] = gru.W_recur.get()[c_range]
# transpose input X and do fprop
# the reference code expects these shapes:
# input_shape: (seq_len, input_size, batch_size)
# output_shape: (seq_len, hidden_size, batch_size)
inp_ref = inp.copy().T.reshape(
seq_len, batch_size, input_size).swapaxes(1, 2)
deltas_ref = deltas.copy().T.reshape(
seq_len, batch_size, hidden_size).swapaxes(1, 2)
if add_init_state:
init_state_ref = init_state.copy()
(dWGRU_ref, h_ref_list, dh_ref_list,
dr_ref_list, dz_ref_list, dc_ref_list) = gru_ref.lossFun(inp_ref,
deltas_ref,
init_state_ref)
else:
(dWGRU_ref, h_ref_list, dh_ref_list,
dr_ref_list, dz_ref_list, dc_ref_list) = gru_ref.lossFun(inp_ref,
deltas_ref)
neon_logger.display('====Verifying hidden states====')
assert allclose_with_out(gru.outputs.get(),
h_ref_list,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('fprop is verified')
# now test the bprop
neon_logger.display('Making sure neon GRU matches numpy GRU in bprop')
gru.bprop(gru.be.array(deltas))
# grab the delta W from gradient buffer
dWinput_neon = gru.dW_input.get()
dWrecur_neon = gru.dW_recur.get()
db_neon = gru.db.get()
dWxr_neon = dWinput_neon[r_range]
dWxz_neon = dWinput_neon[z_range]
dWxc_neon = dWinput_neon[c_range]
dWrr_neon = dWrecur_neon[r_range]
dWrz_neon = dWrecur_neon[z_range]
dWrc_neon = dWrecur_neon[c_range]
dbr_neon = db_neon[r_range]
dbz_neon = db_neon[z_range]
dbc_neon = db_neon[c_range]
drzc_neon = gru.rzhcan_delta_buffer.get()
dr_neon = drzc_neon[r_range]
dz_neon = drzc_neon[z_range]
dc_neon = drzc_neon[c_range]
dWxr_ref = dWGRU_ref[gru_ref.dW_ind_Wxr]
dWxz_ref = dWGRU_ref[gru_ref.dW_ind_Wxz]
dWxc_ref = dWGRU_ref[gru_ref.dW_ind_Wxc]
dWrr_ref = dWGRU_ref[gru_ref.dW_ind_Rhr]
dWrz_ref = dWGRU_ref[gru_ref.dW_ind_Rhz]
dWrc_ref = dWGRU_ref[gru_ref.dW_ind_Rhc]
dbr_ref = dWGRU_ref[gru_ref.dW_ind_br]
dbz_ref = dWGRU_ref[gru_ref.dW_ind_bz]
dbc_ref = dWGRU_ref[gru_ref.dW_ind_bc]
# neon_logger.display '====Verifying hidden deltas ===='
neon_logger.display('====Verifying r deltas ====')
assert allclose_with_out(dr_neon,
dr_ref_list,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('====Verifying z deltas ====')
assert allclose_with_out(dz_neon,
dz_ref_list,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('====Verifying hcan deltas ====')
assert allclose_with_out(dc_neon,
dc_ref_list,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('====Verifying update on W_input====')
neon_logger.display('dWxr')
assert allclose_with_out(dWxr_neon,
dWxr_ref,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('dWxz')
assert allclose_with_out(dWxz_neon,
dWxz_ref,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('dWxc')
assert allclose_with_out(dWxc_neon,
dWxc_ref,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('====Verifying update on W_recur====')
neon_logger.display('dWrr')
assert allclose_with_out(dWrr_neon,
dWrr_ref,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('dWrz')
assert allclose_with_out(dWrz_neon,
dWrz_ref,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('dWrc')
assert allclose_with_out(dWrc_neon,
dWrc_ref,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('====Verifying update on bias====')
neon_logger.display('dbr')
assert allclose_with_out(dbr_neon,
dbr_ref,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('dbz')
assert allclose_with_out(dbz_neon,
dbz_ref,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('dbc')
assert allclose_with_out(dbc_neon,
dbc_ref,
rtol=0.0,
atol=1.0e-5)
neon_logger.display('bprop is verified')
return
def reset_gru(gru):
# in order to run fprop multiple times
# for the gradient check tests the
# gru internal variables need to be
# cleared
gru.x = None
gru.xs = None # just in case
gru.outputs = None
return
def test_gradient_neon_gru(backend_default, gradgruargs):
seq_len, input_size, hidden_size, batch_size = gradgruargs
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
gradient_check(seq_len, input_size, hidden_size, batch_size)
def test_gradient_neon_gru_init_state(backend_default, gradgruargs):
seq_len, input_size, hidden_size, batch_size = gradgruargs
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
gradient_check(seq_len, input_size, hidden_size, batch_size, True)
def gradient_check(seq_len, input_size, hidden_size, batch_size,
add_init_state=False, threshold=1.0e-3):
# 'threshold' is the max fractional difference
# between gradient estimate and
# bprop deltas (def is 5%)
# for a given set of layer parameters calculate
# the gradients and compare to the derivatives
# obtained with the bprop function. repeat this
# for a range of perturbations and use the
# perturbation size with the best results.
# This is necessary for 32 bit computations
min_max_err = -1.0 # minimum max error
neon_logger.display('Perturb mag, max grad diff')
for pert_exp in range(-5, 0):
# need to generate the scaling and input outside
# having an issue with the random number generator
# when these are generated inside the gradient_calc
# function
input_shape = (input_size, seq_len * batch_size)
output_shape = (hidden_size, seq_len * batch_size)
rand_scale = np.random.random(output_shape) * 2.0 - 1.0
inp = np.random.randn(*input_shape)
pert_mag = 10.0**pert_exp
(grad_est, deltas) = gradient_calc(seq_len,
input_size,
hidden_size,
batch_size,
add_init_state=add_init_state,
epsilon=pert_mag,
rand_scale=rand_scale,
inp_bl=inp)
dd = np.max(np.abs(grad_est - deltas))
neon_logger.display('%e, %e' % (pert_mag, dd))
if min_max_err < 0.0 or dd < min_max_err:
min_max_err = dd
# reset the seed so models are same in each run
# allclose_with_out(grad_est,deltas, rtol=0.0, atol=0.0)
NervanaObject.be.rng_reset()
# check that best value of worst case error is less than threshold
neon_logger.display('Worst case error %e with perturbation %e' % (min_max_err, pert_mag))
neon_logger.display('Threshold %e' % (threshold))
assert min_max_err < threshold
def gradient_calc(seq_len, input_size, hidden_size, batch_size, add_init_state=False,
epsilon=None, rand_scale=None, inp_bl=None):
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
input_shape = (input_size, seq_len * batch_size)
# generate input if one is not given
if inp_bl is None:
inp_bl = np.random.randn(*input_shape)
# neon gru instance
gru = GRU(hidden_size, init=Gaussian(), activation=Tanh(), gate_activation=Logistic())
inpa = gru.be.array(np.copy(inp_bl))
# run fprop on the baseline input
gru.configure((input_size, seq_len))
gru.prev_layer = True
gru.allocate()
test_buffer = DeltasTree()
gru.allocate_deltas(test_buffer)
test_buffer.allocate_buffers()
gru.set_deltas(test_buffer)
if add_init_state is True:
slice_shape = (hidden_size, batch_size)
ini_s = np.random.randn(*slice_shape)
ini_s_dev = gru.be.array(ini_s.copy())
out_bl = gru.fprop(inpa, ini_s_dev).get()
else:
out_bl = gru.fprop(inpa).get()
# random scaling/hash to generate fake loss
if rand_scale is None:
rand_scale = np.random.random(out_bl.shape) * 2.0 - 1.0
# loss function would be:
# loss_bl = np.sum(rand_scale * out_bl)
# run back prop with rand_scale as the errors
# use copy to avoid any interactions
deltas_neon = gru.bprop(gru.be.array(np.copy(rand_scale))).get()
# add a perturbation to each input element
grads_est = np.zeros(inpa.shape)
inp_pert = inp_bl.copy()
for pert_ind in range(inpa.size):
save_val = inp_pert.flat[pert_ind]
inp_pert.flat[pert_ind] = save_val + epsilon
reset_gru(gru)
gru.allocate()
if add_init_state is True:
ini_s_dev = gru.be.array(ini_s.copy())
out_pos = gru.fprop(gru.be.array(inp_pert), ini_s_dev).get()
else:
out_pos = gru.fprop(gru.be.array(inp_pert)).get()
inp_pert.flat[pert_ind] = save_val - epsilon
reset_gru(gru)
gru.allocate()
if add_init_state is True:
ini_s_dev = gru.be.array(ini_s.copy())
out_neg = gru.fprop(gru.be.array(inp_pert), ini_s_dev).get()
else:
out_neg = gru.fprop(gru.be.array(inp_pert)).get()
# calculate the loss with perturbations
loss_pos = np.sum(rand_scale * out_pos)
loss_neg = np.sum(rand_scale * out_neg)
# compute the gradient estimate
grad = 0.5 / float(epsilon) * (loss_pos - loss_neg)
grads_est.flat[pert_ind] = grad
# reset the perturbed input element
inp_pert.flat[pert_ind] = save_val
del gru
return (grads_est, deltas_neon)
|
NervanaSystems/neon
|
tests/test_gru.py
|
Python
|
apache-2.0
| 16,736
|
[
"Gaussian"
] |
3dcd41d7b0856386c2e5353eda3fef0fdcca0753b5b8e5cd455129dab5112a15
|
#!/usr/bin/python
# File created on 27 Jan 2012.
from __future__ import division
__author__ = "Kishori M Konwar"
__copyright__ = "Copyright 2013, MetaPathways"
__credits__ = ["r"]
__version__ = "1.0"
__maintainer__ = "Kishori M Konwar"
__status__ = "Release"
try:
import os, re
from os import makedirs, sys, remove
from sys import path
from glob import glob
from optparse import OptionParser
from libs.python_modules.utils.metapathways_utils import parse_command_line_parameters, fprintf, eprintf, exit_process
from libs.python_modules.utils.sysutil import pathDelim
except:
print(""" Could not load some user defined module functions""")
print(""" Make sure your typed \"source MetaPathwaysrc\" """)
print(""" """)
sys.exit(3)
PATHDELIM = pathDelim()
usage= "Usage :\n" + " " + sys.argv[0] + """ -s sample_name -f folder_path """
parser=None
def createParser():
global parser
parser = OptionParser(usage)
parser.add_option("-s", "--sample_name", dest="sample_name",
help='the sample name [REQUIRED]')
parser.add_option("-f", "--folder_path", dest="folder_path",
help='the folder path [REQUIRED]')
def valid_arguments(opts, args):
state = True
if opts.sample_name == None :
state = False
if opts.folder_path == None :
state = False
return state
class FastaRecord(object):
def __init__(self, name, sequence):
self.name = name
self.sequence = sequence
def read_fasta_records(input_file):
records = []
sequence=""
name=""
while 1:
line = input_file.readline()
if line == "":
if sequence!="" and name!="":
records.append(FastaRecord(name, sequence))
return records
if line=='\n':
continue
line = line.rstrip()
if line.startswith(">") :
if sequence!="" and name!="":
records.append(FastaRecord(name, sequence))
name = line.rstrip()
sequence =""
else:
sequence = sequence + line.rstrip()
return records
# the main function
SIZE = 1000
def get_number_of_BLAST_LAST_hits(file_name):
commentPATTERN = re.compile(r'^#')
count = 0
try:
inputfilename = open(file_name, 'r')
except:
#exit_process("ERROR: Cannot find the file name : %s\n" %( file_name) );
return None
line = inputfilename.readline()
while line:
if commentPATTERN.search(line):
line = inputfilename.readline()
continue
fields= [x.strip() for x in line.split('\t') ]
if len(fields) < 12:
line = inputfilename.readline()
continue
count += 1
line = inputfilename.readline()
inputfilename.close()
return count
def get_number_of_rRNA_hits(file_name):
commentPATTERN = re.compile(r'similarity')
count = 0
try:
inputfilename = open(file_name, 'r')
except:
return count
line = inputfilename.readline()
while line:
if commentPATTERN.search(line):
line = inputfilename.readline()
continue
fields= [x.strip() for x in line.split('\t') ]
if len(fields) < 7:
line = inputfilename.readline()
continue
count += 1
line = inputfilename.readline()
inputfilename.close()
return count
def get_number_of_tRNA_hits(file_name):
dataPATTERN = re.compile(r'number of predicted tRNA=(.*)')
count = 0
try:
inputfilename = open(file_name, 'r')
except:
return count
line = inputfilename.readline()
while line:
if not dataPATTERN.search(line):
line = inputfilename.readline()
continue
result = dataPATTERN.search(line)
if result:
if len(result.groups())==1:
count = result.group(1)
return count
line = inputfilename.readline()
inputfilename.close()
return count
# get the rRNA_hits
def get_rRNA_hits(sample_name, folder_path):
results = []
regPattern = re.compile(r'.rRNA.stats.txt')
input_dir = folder_path + PATHDELIM + 'results' + PATHDELIM + 'rRNA'
files = [ re.sub(r'.*\/','',f) for f in glob(input_dir + PATHDELIM + sample_name + '*') if regPattern.search(f) ]
regPattern = re.compile(r'[.](.*)[.]rRNA.stats.txt')
for file in files:
result = regPattern.search(file)
if result:
database = result.group(1)
file_name = input_dir + PATHDELIM + sample_name + '.' + result.group(1) + '.rRNA.stats.txt'
count = get_number_of_rRNA_hits(file_name)
results.append( ( 'Number of rRNA hits in ' + database, count ) )
if results==[]:
return None
return results
# get the rRNA_hits
def get_tRNA_hits(sample_name, folder_path):
results = []
regPattern = re.compile(r'.tRNA.stats.txt')
input_dir = folder_path + PATHDELIM + 'results' + PATHDELIM + 'tRNA'
file_name = input_dir + PATHDELIM + sample_name + '.tRNA.stats.txt'
count = get_number_of_tRNA_hits(file_name)
results.append( ('Number of tRNA hits in ', count ) )
if results==[]:
return None
return results
def get_number_of_uncommented_lines(file_name):
commentPATTERN = re.compile(r'^#')
count = 0
try:
inputfilename = open(file_name, 'r')
except:
return count
line = inputfilename.readline()
while line:
if commentPATTERN.search(line):
line = inputfilename.readline()
continue
fields= [x.strip() for x in line.split('\t') ]
count += 1
line = inputfilename.readline()
inputfilename.close()
return count
#counts the number of taxonomic and annotated ORFs
def get_functional_taxonomic_hits(sample_name, folder_path):
results = []
# for the LAST algorithm
regPattern = re.compile(r'.annot.gff$', re.IGNORECASE)
input_dir = folder_path + PATHDELIM + 'results' + PATHDELIM + 'annotation_table'
file_name = input_dir + PATHDELIM + 'functional_and_taxonomic_table.txt'
eprintf("\nCounting number of functionally and taxonomically ORFs ...")
count = get_number_of_uncommented_lines(file_name)
eprintf("done\n")
results.append( ('Total number of taxonomically and taxonmically annotated ORFs', count ) )
if results==[]:
return None
return results
#counts the number of ORFs in the table ORF_annotation_table
def get_ORF_annotations_hits(sample_name, folder_path):
results = []
# for the LAST algorithm
regPattern = re.compile(r'.annot.gff$', re.IGNORECASE)
input_dir = folder_path + PATHDELIM + 'results' + PATHDELIM + 'annotation_table'
file_name = input_dir + PATHDELIM + 'ORF_annotation_table.txt'
eprintf("\nCounting number of ORFs for mapping to functional classification ...")
count = get_number_of_uncommented_lines(file_name)
eprintf("done\n")
results.append( ('Total orfs count for functional classification', count ) )
if results==[]:
return None
return results
#counts the number of annotatations generated
def get_annotation_hits(sample_name, folder_path):
results = []
# for the LAST algorithm
regPattern = re.compile(r'.annot.gff$', re.IGNORECASE)
input_dir = folder_path + PATHDELIM + 'genbank'
files = [ re.sub(r'.*\/','',f) for f in glob(input_dir + PATHDELIM + sample_name + '*') if regPattern.search(f) ]
regPattern = re.compile(r'(.*)[.]annot.gff$', re.IGNORECASE)
for file in files:
result = regPattern.search(file)
if result:
file_name = input_dir + PATHDELIM + sample_name + '.annot.gff'
eprintf("\nCounting number of annotations...")
count = get_number_of_uncommented_lines(file_name)
eprintf("done\n")
results.append( ('Total number of valid annotations', count ) )
if results==[]:
return None
return results
# counts the number of parsed BLAST or LAST hits
def get_BLAST_LAST_parsed_hits(sample_name, folder_path):
results = []
# for the LAST algorithm
regPattern = re.compile(r'.LASTout.parsed.txt$', re.IGNORECASE)
input_dir = folder_path + PATHDELIM + 'blast_results'
files = [ re.sub(r'.*\/','',f) for f in glob(input_dir + PATHDELIM + sample_name + '*') if regPattern.search(f) ]
regPattern = re.compile(r'[.](.*)[.]LASTout.parsed.txt$', re.IGNORECASE)
for file in files:
result = regPattern.search(file)
if result:
database = result.group(1)
file_name = input_dir + PATHDELIM + sample_name + '.' + result.group(1) + '.LASTout.parsed.txt'
eprintf("\nParse LAST hits for : %s...", database)
count = get_number_of_uncommented_lines(file_name)
results.append(('Total number of selected hits in ' + database + ' with LAST ', count ) )
# now for the BLAST algorithm
regPattern = re.compile(r'.BLASTout.parsed.txt')
input_dir = folder_path + PATHDELIM + 'blast_results'
files = [ re.sub(r'.*\/','',f) for f in glob(input_dir + PATHDELIM + sample_name + '*') if regPattern.search(f) ]
regPattern = re.compile(r'[.](.*)[.]BLASTout')
for file in files:
result = regPattern.search(file)
if result:
database = result.group(1)
file_name = input_dir + PATHDELIM + sample_name + '.' + result.group(1) + '.BLASTout.parsed.txt'
eprintf("\nParse BLAST hits for : %s...", database)
count = get_number_of_uncommented_lines(file_name)
results.append(('Total number of selected hits in ' + database + ' with BLAST ', count ) )
if results==[]:
return None
return results
# counts the number of BLAST or LAST hits
def get_BLAST_LAST_hits(sample_name, folder_path):
results = []
# for the LAST algorithm
regPattern = re.compile(r'.LASTout$')
input_dir = folder_path + PATHDELIM + 'blast_results'
files = [ re.sub(r'.*\/','',f) for f in glob(input_dir + PATHDELIM + sample_name + '*') if regPattern.search(f) ]
regPattern = re.compile(r'[.](.*)[.]LASTout$')
for file in files:
result = regPattern.search(file)
if result:
database = result.group(1)
file_name = input_dir + PATHDELIM + sample_name + '.' + result.group(1) + '.LASTout'
eprintf("\nProcess LAST hits for : %s...", database)
count = get_number_of_BLAST_LAST_hits(file_name)
eprintf("done")
results.append(('Total number of hits in ' + database + ' with LAST ', count ) )
# now for the BLAST algorithm
regPattern = re.compile(r'.BLASTout')
input_dir = folder_path + PATHDELIM + 'blast_results'
files = [ re.sub(r'.*\/','',f) for f in glob(input_dir + PATHDELIM + sample_name + '*') if regPattern.search(f) ]
regPattern = re.compile(r'[.](.*)[.]BLASTout')
for file in files:
result = regPattern.search(file)
if result:
database = result.group(1)
file_name = input_dir + PATHDELIM + sample_name + '.' + result.group(1) + '.BLASTout'
eprintf("\nProcess BLAST hits for : %s...", database)
count = get_number_of_BLAST_LAST_hits(file_name)
results.append( ( 'Total number of hits in ' + database + ' with BLAST ', count ) )
if results==[]:
return None
return results
def get_stats_from_stats_file(sample_name, folder_path, type):
sequencesPATTERN = re.compile(r'\t([^\t]* of sequences[^\t]*)\t([^\t]*)\t([^\t]*)$')
input_file_name = folder_path + PATHDELIM + 'run_statistics' + PATHDELIM + sample_name + '.' + type + '.stats'
results = []
if type=='nuc':
tag = ' (nucleotide) '
else:
tag = ' (amino) '
try:
inputfilename = open(input_file_name, 'r')
except:
return results
lines = inputfilename.readlines()
inputfilename.close()
for line in lines:
line = re.sub(r':', '', line)
result = sequencesPATTERN.search(line.strip())
if result:
try:
num2 = '%d' %(float(result.group(2)) )
except:
num2 = 0
try:
num3 = '%d' %(float(result.group(3)) )
except:
num3 = 0
results.append( (result.group(1) + tag + 'BEFORE filtering ', num2 ) )
results.append( (result.group(1) + tag + 'AFTER filtering ', num3 ) )
if results==[]:
return None
return results
def main(argv, errorlogger = None):
global parser
(opts, args) = parser.parse_args(argv)
if not valid_arguments(opts, args):
print(usage)
sys.exit(0)
sample_name = opts.sample_name
folder_path = opts.folder_path
results = []
try:
STEP_NAME = "GATHER_STATS"
# read the nucleotide seequences
status = get_stats_from_stats_file(sample_name, folder_path, 'nuc')
if status!=None:
results += status
else:
errorlogger.write("%s\tERROR\tCannot read nuc stats file\t%s" %(STEP_NAME, folder_path + PATHDELIM + sample_name))
exit_process()
# read the nucleotide seequences
status = get_stats_from_stats_file(sample_name, folder_path, 'amino')
if status!=None:
results += status
else:
errorlogger.write("%s\tERROR\tCannot read amino stats file\t%s" %(STEP_NAME, folder_path + PATHDELIM + sample_name))
exit_process()
# read the blast/last hits
status = get_BLAST_LAST_hits(sample_name, folder_path)
if status!=None:
results += status
else:
errorlogger.write("%s\tERROR\tReading BLAST HITS\t%s" %(STEP_NAME, folder_path + PATHDELIM + sample_name))
exit_process()
# read the selected parsed blast/last hits
status = get_BLAST_LAST_parsed_hits(sample_name, folder_path)
if status!=None:
results += status
else:
errorlogger.write("%s\tERROR\tReading parsed BLAST HITS\t%s" %(STEP_NAME, folder_path + PATHDELIM + sample_name))
exit_process()
# read the annotated gff hits
status = get_annotation_hits(sample_name, folder_path)
if status!=None:
results += status
# read the annotated gff hits
status = get_functional_taxonomic_hits(sample_name, folder_path)
if status!=None:
results += status
# read the number of ORFs that are used for mapping to functional categories
status = get_ORF_annotations_hits(sample_name, folder_path)
if status!=None:
results += status
# get the rRNA hits
status = get_rRNA_hits(sample_name, folder_path)
if status!=None:
results += status
# get the tRNA hits
status = get_tRNA_hits(sample_name, folder_path)
if status!=None:
results += status
stats_file_name = folder_path + PATHDELIM + 'run_statistics' + PATHDELIM + sample_name + '.run.stats.txt'
try:
statsfilename = open(stats_file_name, 'w')
except:
print("ERRROR : Cannot open stats file format " + stats_file_name)
sys.exit(0)
for pair in results:
fprintf(statsfilename, '%s\t%s\n', pair[0], pair[1])
statsfilename.close()
except:
exit_process()
def MetaPathways_gather_run_stats(argv, errorlogger= None):
createParser()
errorlogger.write("#STEP\tGATHER_STATS\n");
main(argv, errorlogger = errorlogger)
return (0,'')
# the main function of metapaths
if __name__ == "__main__":
createParser()
main(sys.argv[1:])
|
kishori82/MetaPathways_Python.3.0
|
libs/python_modules/pipeline/MetaPathways_gather_run_stats.py
|
Python
|
mit
| 15,903
|
[
"BLAST"
] |
922a11fd6726b872bd552ac83cacbff4b52390a7b82202a60edbe5b4d0df28eb
|
##############################################################################
##############################################################################
# Example 1.1
# Gaussian process regression for ice varve data
#
# Copyright (c) 2016 Johan Dahlin [ johan.dahlin (at) liu.se ]
# Distributed under the MIT license.
#
##############################################################################
##############################################################################
import numpy as np
import pylab as pb
import GPy
import pandas
d = np.loadtxt("data/icevarve.txt")
# Generate some realisations from the GP prior
x = np.arange(0,634,1)
X = x[:,None];
Y = d[:,None];
mu = np.zeros(634);
# Fit the GP
k1 = GPy.kern.Bias(1) + GPy.kern.Matern32(1, lengthscale=1);
m1 = GPy.models.GPRegression(X,Y,k1);
m1.sum.bias.variance = 767.366976247
m1.sum.Mat32.lengthscale = 25
m1.sum.Mat32.variance = 226.641568122
m1.Gaussian_noise.variance = 173.380045038
#m1.optimize('bfgs',max_iters=200)
#m1.optimize_restarts(num_restarts = 10, robust=True)
# Evaluate the predictive distribution on a grid
Mup1, var1 = m1.predict( X );
# Export data
out = np.hstack((Mup1,Mup1-1.96*np.sqrt(var1),Mup1+1.96*np.sqrt(var1)));
pandas.DataFrame(out).to_csv("ch1-icevarve-posterior.csv");
pandas.DataFrame(X).to_csv("ch1-icevarve-grid.csv");
########################################################################
# End of file
########################################################################
|
compops/phd-thesis
|
example-icevarve/ex-icevarve-gp.py
|
Python
|
gpl-3.0
| 1,560
|
[
"Gaussian"
] |
c9cfb0ed9f71797d40423988905deb94c2ac8538a96f81be909f51bbbecdc7cf
|
#!/usr/bin/env python
""" segmental_duplication_gene_analyzer
"""
import argparse
import logging
import os
import sys
from SDDetector.version import __version__
from SDDetector.Entities.Region import Region
from SDDetector.Entities.GeneLink import GeneLink
from SDDetector.Parser.Gff.GffDuplicationParser import GffDuplicationParser
from SDDetector.Parser.Gff.GffGeneParser import GffGeneParser
from SDDetector.Parser.Gff.GffTEParser import GffTEParser
from SDDetector.Parser.Blast.BlastXMLParserExpat import BlastXMLParserExpat
from SDDetector.Db.GeneDB import GeneDB
from SDDetector.Utils.CircosPlot import CircosPlot
from SDDetector.Utils.FastaFileIndexer import FastaFileIndexer
class Analyzer(object):
def __init__(self, SDFile='', BlastXMLFile='', GeneFile='', outputFile='', \
GenomeFile='', TEFile='', circos=False, logLevel='ERROR'):
"""Constructor"""
self.SDFile = SDFile
self.BlastXMLFile = BlastXMLFile
self.GeneFile = GeneFile
self.outputFile = outputFile
self.GenomeFile = GenomeFile
self.TEFile = TEFile
self.circos = circos
self.logLevel = logLevel
logging.basicConfig(level=self.logLevel)
self.lDuplications = []
def __del__(self):
"""Destructor"""
if os.path.exists('gene.db'):
os.remove('gene.db')
def getPolymorphismEffect(self):
"""Analyze polymorphism between genes and return list of variants"""
with open(self.outputFile,'w') as f:
logging.info('Writing polymorphism effect in {}'.format(self.outputFile))
for link in self.lGeneLinks:
f.write('Genes: ({},{}); sequences: ({},{}); strands: ({},{})\n'.format(link.gene1.id, link.gene2.id,link.gene1.seqid,link.gene2.seqid,link.gene1.strand,link.gene2.strand))
if len(link.gene1.lTranscripts) > 0 and len(link.gene2.lTranscripts) > 0:
lAlignEffect, lMutations, r1, r2 = link.getEffect()
if lAlignEffect:
f.write('Alignment: ({},{},{},{}) vs ({},{},{},{})\n'.format(r1.seq,r1.start,r1.end,r1.strand,r2.seq,r2.start,r2.end,r2.strand))
for strMutation in lMutations:
f.write(strMutation)
f.write('\n')
nbBases = len(lAlignEffect[0])
size = 60
indexSize = 0
indexBase = 0
algmtGene = ''
if r1.strand == 1:
algmt1Start, algmt1End = (r1.start, r1.end)
else:
algmt1Start, algmt1End = (r1.end, r1.start)
if r2.strand == 1:
algmt2Start, algmt2End = (r2.start, r2.end)
else:
algmt2Start, algmt2End = (r2.end, r2.start)
start1 = algmt1Start
start2 = algmt2Start
end1 = 0
end2 = 0
while indexBase < nbBases:
nbHyphen1 = lAlignEffect[2][indexBase:indexBase+size].count('-')
nbHyphen2 = lAlignEffect[4][indexBase:indexBase+size].count('-')
if r1.strand == -1:
end1 = start1-size-1-nbHyphen1
else:
end1 = start1+size-1-nbHyphen1
if r2.strand == -1:
end2 = start2-size-1-nbHyphen2
else:
end2 = start2+size-1-nbHyphen2
scale1 = str(start1) + ' '*(size-len(str(start1))-len(str(end1))) + str(end1)
scale2 = str(start2) + ' '*(size-len(str(start2))-len(str(end2))) + str(end2)
algmtGene += '{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n\n'.format(scale1,lAlignEffect[0][indexBase:indexBase+size],lAlignEffect[1][indexBase:indexBase+size],lAlignEffect[2][indexBase:indexBase+size],lAlignEffect[3][indexBase:indexBase+size],lAlignEffect[4][indexBase:indexBase+size],lAlignEffect[5][indexBase:indexBase+size],lAlignEffect[6][indexBase:indexBase+size],scale2)
indexBase += size
start1 = end1+1
start2 = end2+1
f.write(algmtGene)
else:
f.write('No alignment build for gene {} or gene {}\n'.format(link.gene1.id, link.gene2.id))
else:
f.write('Missing transcripts for gene {} or gene {} in defined regions\n'.format(link.gene1.id, link.gene2.id))
f.close()
def runAnalyze(self):
"""run analysis"""
logging.info('Parsing Duplication gff file')
iGffDuplicationParser = GffDuplicationParser(self.SDFile)
self.lDuplications = iGffDuplicationParser.getNonRedondantDuplications()
lRegions = []
for dup in self.lDuplications:
for region in dup.lRegions:
lRegions.append(region)
logging.info('Parsing Blast xml file')
lAlignmentTuples = []
try:
iBlastXMLParser = BlastXMLParserExpat(self.BlastXMLFile)
lAlignmentTuples = iBlastXMLParser.getAlignmentsFromTupleOfRegions(lRegions)
except Exception as e:
logging.error(e.message)
sys.exit(1)
index = 0
for dup in self.lDuplications:
lAlgmts = []
for region in dup.lRegions:
lAlgmts.append((lAlignmentTuples[index][0],lAlignmentTuples[index][1]))
index += 1
dup.lSeqAlgmts = lAlgmts
dup.dSeqToSeq = dup.getdSeqToSeq()
logging.info('Parsing Gene gff file')
iGffGeneParser = GffGeneParser(self.GeneFile)
self.db = GeneDB(dbfile='gene.db')
self.lGenes = iGffGeneParser.getAllGenes()
self.db.insertlGenes(self.lGenes)
self.lGeneLinks = []
for dup in self.lDuplications:
(lGeneSeq1,lGeneSeq2) = self._extractGeneInDuplication(dup)
if lGeneSeq1 and lGeneSeq2:
if dup.DuplicationType not in ['mirror', 'bridge']:
self.lGeneLinks.extend(self._buildGeneLinks(lGeneSeq1,lGeneSeq2,dup))
else:
logging.info('Duplication type is {} for duplication: {}'
', no gene polymorphism analysis performed'
.format(dup.DuplicationType, dup))
else:
logging.info('One of sequence in the duplication has no gene - no gene impact analysis for ({}-{}-{})--({}-{}-{})'
.format(dup.seq1,dup.start1,dup.end1,dup.seq2,dup.start2,dup.end2))
self.getPolymorphismEffect()
if self.circos:
self.lTEs = []
if self.GenomeFile:
logging.info('Indexing Genome fasta file')
iFastaGenomeParser = FastaFileIndexer(self.GenomeFile)
iFastaGenomeParser.read()
lSeqNames = iFastaGenomeParser.lSeq
self.lSeqs = [ (seq, len(iFastaGenomeParser.dSeq[seq])) for seq in lSeqNames ]
else:
logging.error('Missing Genome File - required for Circos plot')
sys.exit(1)
if self.TEFile:
logging.debug('Parsing TE gff file')
parser = GffTEParser(self.TEFile)
self.lTEs = parser.getAllTEs()
logging.info('Generating circos files')
self.writeCircosPlotFiles()
def writeCircosPlotFiles(self):
"""write circos files"""
cPlot = CircosPlot()
if self.lSeqs:
SeqDataFile = 'genome.txt'
logging.info('Writing circos sequence data file in {}'.format(SeqDataFile))
cPlot.writeSeqDataFile(self.lSeqs, SeqDataFile)
if self.lDuplications:
SDDataFile = 'segdup.txt'
logging.info('Writing circos SD data file in {}'.format(SDDataFile))
cPlot.writeSegDupDataFile(self.lDuplications, SDDataFile)
SimilarityDataFile = 'similarity.txt'
logging.info('Writing circos similarity data file in {}'.format(SimilarityDataFile))
cPlot.writeSimilarityDataFile(self.lDuplications, SimilarityDataFile)
if self.lGenes:
GeneDataFile = 'gene.txt'
logging.info('Writing circos gene data file in {}'.format(GeneDataFile))
cPlot.writeGeneDataFile(self.lGenes, GeneDataFile)
if self.lGeneLinks:
GeneLinkDataFile = 'gene-link.txt'
logging.info('Writing circos gene-link data file in {}'.format(GeneLinkDataFile))
cPlot.writeGeneLinkDataFile(self.lGeneLinks, GeneLinkDataFile)
if self.lTEs:
TEDataFile = 'TE.txt'
logging.info('Writing circos TE/Repeat data file in {}'.format(TEDataFile))
cPlot.writeTEDataFile(self.lTEs, TEDataFile)
CircosConfFile = 'circos.conf'
logging.info('Writing circos configuration file in {}'.format(CircosConfFile))
cPlot.writeCircosConf()
def _buildGeneLinks(self,lGeneSeq1,lGeneSeq2,dup):
""" build links between genes """
lLinks = []
for gene1 in lGeneSeq1:
if gene1.start in dup.dSeqToSeq[gene1.seqid] and gene1.end in dup.dSeqToSeq[gene1.seqid]:
(seq2ID,val1) = dup.dSeqToSeq[gene1.seqid][gene1.start]
(seq2ID,val2) = dup.dSeqToSeq[gene1.seqid][gene1.end]
seq2Start = min(val1,val2)
seq2End = max(val1,val2)
for gene2 in lGeneSeq2:
if gene2.start in dup.dSeqToSeq[gene2.seqid] and gene2.end in dup.dSeqToSeq[gene2.seqid]:
if (gene2.start < seq2Start and gene2.end < seq2Start) or (gene2.start > seq2End and gene2.end > seq2End):
next
else:
lLinks.append(GeneLink(dup=dup,gene1=gene1,gene2=gene2))
else:
logging.info('Could not analyze polymorphism on gene : {}, no full alignment span this region'.format(gene2.id))
else:
logging.info('Could not analyze polymorphism on gene : {}, no full alignment span this region'.format(gene1.id))
return lLinks
def _extractGeneInDuplication(self, dup):
"""extract all genes in a duplication"""
lGeneSeq1 = self.db.getlGenesFromCoordinates(dup.seq1,dup.start1,dup.end1)
lGeneSeq2 = self.db.getlGenesFromCoordinates(dup.seq2,dup.start2,dup.end2)
return (lGeneSeq1,lGeneSeq2)
if __name__ == "__main__":
program = 'segmental_duplication_gene_analyzer'
version = __version__
description = "segmental_duplication_gene_analyzer: analyzes segmental\
duplications in your assembly"
parser = argparse.ArgumentParser(prog=program)
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--version', action='version', version='{} {}'.format(program,version))
parser.add_argument("SDFile", help="Segmental Duplication gff3 file = output of SDDetector (filtered or not)", type=str)
parser.add_argument("BlastXMLFile", help="Input Blast XML file", type=str)
parser.add_argument("GeneFile", help="gene annotation file in gff3 format", type=str)
parser.add_argument("outputFile", help="Output File", type=str)
parser.add_argument("-v", "--verbosity", type=int, choices=[1,2,3],
help="increase output verbosity 1=error, 2=info, 3=debug")
parser.add_argument("-t", "--TEFile", type=str, default=None, help="Transposable \
elements / Repeats file in gff3 format")
parser.add_argument("-g", "--GenomeFile", type=str, default=None, help="Genome \
fasta file, required for circos plot")
parser.add_argument("--circos", action="store_true", help="Write circos \
configuration file and associated data files")
args = parser.parse_args()
logLevel = 'ERROR'
if args.verbosity == 1:
logLevel = 'ERROR'
if args.verbosity == 2:
logLevel = 'INFO'
if args.verbosity == 3:
logLevel = 'DEBUG'
logging.getLogger().setLevel(logLevel)
if not os.path.exists(args.SDFile):
raise Exception('File {} does not exist'.format(args.SDFile))
if not os.path.exists(args.BlastXMLFile):
raise Exception('File {} does not exist'.format(args.BlastXMLFile))
if not os.path.exists(args.GeneFile):
raise Exception('File {} does not exist'.format(args.GeneFile))
if args.TEFile:
if not os.path.exists(args.TEFile):
raise Exception('File {} does not exist'.format(args.TEFile))
if args.GenomeFile:
if not os.path.exists(args.GenomeFile):
raise Exception('File {} does not exist'.format(args.GenomeFile))
analyzer = Analyzer(args.SDFile, args.BlastXMLFile, args.GeneFile, args.outputFile, GenomeFile=args.GenomeFile, TEFile=args.TEFile, circos=args.circos, logLevel=logLevel)
analyzer.runAnalyze()
|
nlapalu/SDDetector
|
bin/segmental_duplication_gene_analyzer.py
|
Python
|
gpl-3.0
| 13,598
|
[
"BLAST"
] |
f81a6481e693e97bca0a7938153ad26caf63800b1596b098d5d9d0a43744c43f
|
# revlog.py - storage back-end for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""Storage back-end for Mercurial.
This provides efficient delta storage with O(1) retrieve and append
and O(changes) merge between branches.
"""
# import stuff from node for others to import from revlog
from node import bin, hex, nullid, nullrev, short #@UnusedImport
from i18n import _
import changegroup, ancestor, mdiff, parsers, error, util
import struct, zlib, errno
_pack = struct.pack
_unpack = struct.unpack
_compress = zlib.compress
_decompress = zlib.decompress
_sha = util.sha1
# revlog flags
REVLOGV0 = 0
REVLOGNG = 1
REVLOGNGINLINEDATA = (1 << 16)
REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
REVLOG_DEFAULT_FORMAT = REVLOGNG
REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
# amount of data read unconditionally, should be >= 4
# when not inline: threshold for using lazy index
_prereadsize = 1048576
# max size of revlog with inline data
_maxinline = 131072
RevlogError = error.RevlogError
LookupError = error.LookupError
def getoffset(q):
return int(q >> 16)
def gettype(q):
return int(q & 0xFFFF)
def offset_type(offset, type):
return long(long(offset) << 16 | type)
nullhash = _sha(nullid)
def hash(text, p1, p2):
"""generate a hash from the given text and its parent hashes
This hash combines both the current file contents and its history
in a manner that makes it easy to distinguish nodes with the same
content in the revision graph.
"""
# As of now, if one of the parent node is null, p2 is null
if p2 == nullid:
# deep copy of a hash is faster than creating one
s = nullhash.copy()
s.update(p1)
else:
# none of the parent nodes are nullid
l = [p1, p2]
l.sort()
s = _sha(l[0])
s.update(l[1])
s.update(text)
return s.digest()
def compress(text):
""" generate a possibly-compressed representation of text """
if not text:
return ("", text)
l = len(text)
bin = None
if l < 44:
pass
elif l > 1000000:
# zlib makes an internal copy, thus doubling memory usage for
# large files, so lets do this in pieces
z = zlib.compressobj()
p = []
pos = 0
while pos < l:
pos2 = pos + 2**20
p.append(z.compress(text[pos:pos2]))
pos = pos2
p.append(z.flush())
if sum(map(len, p)) < l:
bin = "".join(p)
else:
bin = _compress(text)
if bin is None or len(bin) > l:
if text[0] == '\0':
return ("", text)
return ('u', text)
return ("", bin)
def decompress(bin):
""" decompress the given input """
if not bin:
return bin
t = bin[0]
if t == '\0':
return bin
if t == 'x':
return _decompress(bin)
if t == 'u':
return bin[1:]
raise RevlogError(_("unknown compression type %r") % t)
class lazyparser(object):
"""
this class avoids the need to parse the entirety of large indices
"""
# lazyparser is not safe to use on windows if win32 extensions not
# available. it keeps file handle open, which make it not possible
# to break hardlinks on local cloned repos.
def __init__(self, dataf):
try:
size = util.fstat(dataf).st_size
except AttributeError:
size = 0
self.dataf = dataf
self.s = struct.calcsize(indexformatng)
self.datasize = size
self.l = size / self.s
self.index = [None] * self.l
self.map = {nullid: nullrev}
self.allmap = 0
self.all = 0
self.mapfind_count = 0
def loadmap(self):
"""
during a commit, we need to make sure the rev being added is
not a duplicate. This requires loading the entire index,
which is fairly slow. loadmap can load up just the node map,
which takes much less time.
"""
if self.allmap:
return
end = self.datasize
self.allmap = 1
cur = 0
count = 0
blocksize = self.s * 256
self.dataf.seek(0)
while cur < end:
data = self.dataf.read(blocksize)
off = 0
for x in xrange(256):
n = data[off + ngshaoffset:off + ngshaoffset + 20]
self.map[n] = count
count += 1
if count >= self.l:
break
off += self.s
cur += blocksize
def loadblock(self, blockstart, blocksize, data=None):
if self.all:
return
if data is None:
self.dataf.seek(blockstart)
if blockstart + blocksize > self.datasize:
# the revlog may have grown since we've started running,
# but we don't have space in self.index for more entries.
# limit blocksize so that we don't get too much data.
blocksize = max(self.datasize - blockstart, 0)
data = self.dataf.read(blocksize)
lend = len(data) / self.s
i = blockstart / self.s
off = 0
# lazyindex supports __delitem__
if lend > len(self.index) - i:
lend = len(self.index) - i
for x in xrange(lend):
if self.index[i + x] is None:
b = data[off : off + self.s]
self.index[i + x] = b
n = b[ngshaoffset:ngshaoffset + 20]
self.map[n] = i + x
off += self.s
def findnode(self, node):
"""search backwards through the index file for a specific node"""
if self.allmap:
return None
# hg log will cause many many searches for the manifest
# nodes. After we get called a few times, just load the whole
# thing.
if self.mapfind_count > 8:
self.loadmap()
if node in self.map:
return node
return None
self.mapfind_count += 1
last = self.l - 1
while self.index[last] != None:
if last == 0:
self.all = 1
self.allmap = 1
return None
last -= 1
end = (last + 1) * self.s
blocksize = self.s * 256
while end >= 0:
start = max(end - blocksize, 0)
self.dataf.seek(start)
data = self.dataf.read(end - start)
findend = end - start
while True:
# we're searching backwards, so we have to make sure
# we don't find a changeset where this node is a parent
off = data.find(node, 0, findend)
findend = off
if off >= 0:
i = off / self.s
off = i * self.s
n = data[off + ngshaoffset:off + ngshaoffset + 20]
if n == node:
self.map[n] = i + start / self.s
return node
else:
break
end -= blocksize
return None
def loadindex(self, i=None, end=None):
if self.all:
return
all = False
if i is None:
blockstart = 0
blocksize = (65536 / self.s) * self.s
end = self.datasize
all = True
else:
if end:
blockstart = i * self.s
end = end * self.s
blocksize = end - blockstart
else:
blockstart = (i & ~1023) * self.s
blocksize = self.s * 1024
end = blockstart + blocksize
while blockstart < end:
self.loadblock(blockstart, blocksize)
blockstart += blocksize
if all:
self.all = True
class lazyindex(object):
"""a lazy version of the index array"""
def __init__(self, parser):
self.p = parser
def __len__(self):
return len(self.p.index)
def load(self, pos):
if pos < 0:
pos += len(self.p.index)
self.p.loadindex(pos)
return self.p.index[pos]
def __getitem__(self, pos):
return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
def __setitem__(self, pos, item):
self.p.index[pos] = _pack(indexformatng, *item)
def __delitem__(self, pos):
del self.p.index[pos]
def insert(self, pos, e):
self.p.index.insert(pos, _pack(indexformatng, *e))
def append(self, e):
self.p.index.append(_pack(indexformatng, *e))
class lazymap(object):
"""a lazy version of the node map"""
def __init__(self, parser):
self.p = parser
def load(self, key):
n = self.p.findnode(key)
if n is None:
raise KeyError(key)
def __contains__(self, key):
if key in self.p.map:
return True
self.p.loadmap()
return key in self.p.map
def __iter__(self):
yield nullid
for i, ret in enumerate(self.p.index):
if not ret:
self.p.loadindex(i)
ret = self.p.index[i]
if isinstance(ret, str):
ret = _unpack(indexformatng, ret)
yield ret[7]
def __getitem__(self, key):
try:
return self.p.map[key]
except KeyError:
try:
self.load(key)
return self.p.map[key]
except KeyError:
raise KeyError("node " + hex(key))
def __setitem__(self, key, val):
self.p.map[key] = val
def __delitem__(self, key):
del self.p.map[key]
indexformatv0 = ">4l20s20s20s"
v0shaoffset = 56
class revlogoldio(object):
def __init__(self):
self.size = struct.calcsize(indexformatv0)
def parseindex(self, fp, data, inline):
s = self.size
index = []
nodemap = {nullid: nullrev}
n = off = 0
if len(data) == _prereadsize:
data += fp.read() # read the rest
l = len(data)
while off + s <= l:
cur = data[off:off + s]
off += s
e = _unpack(indexformatv0, cur)
# transform to revlogv1 format
e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
index.append(e2)
nodemap[e[6]] = n
n += 1
return index, nodemap, None
def packentry(self, entry, node, version, rev):
if gettype(entry[0]):
raise RevlogError(_("index entry flags need RevlogNG"))
e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
node(entry[5]), node(entry[6]), entry[7])
return _pack(indexformatv0, *e2)
# index ng:
# 6 bytes offset
# 2 bytes flags
# 4 bytes compressed length
# 4 bytes uncompressed length
# 4 bytes: base rev
# 4 bytes link rev
# 4 bytes parent 1 rev
# 4 bytes parent 2 rev
# 32 bytes: nodeid
indexformatng = ">Qiiiiii20s12x"
ngshaoffset = 32
versionformat = ">I"
class revlogio(object):
def __init__(self):
self.size = struct.calcsize(indexformatng)
def parseindex(self, fp, data, inline):
if len(data) == _prereadsize:
if util.openhardlinks() and not inline:
# big index, let's parse it on demand
parser = lazyparser(fp)
index = lazyindex(parser)
nodemap = lazymap(parser)
e = list(index[0])
type = gettype(e[0])
e[0] = offset_type(0, type)
index[0] = e
return index, nodemap, None
else:
data += fp.read()
# call the C implementation to parse the index data
index, nodemap, cache = parsers.parse_index(data, inline)
return index, nodemap, cache
def packentry(self, entry, node, version, rev):
p = _pack(indexformatng, *entry)
if rev == 0:
p = _pack(versionformat, version) + p[4:]
return p
class revlog(object):
"""
the underlying revision storage object
A revlog consists of two parts, an index and the revision data.
The index is a file with a fixed record size containing
information on each revision, including its nodeid (hash), the
nodeids of its parents, the position and offset of its data within
the data file, and the revision it's based on. Finally, each entry
contains a linkrev entry that can serve as a pointer to external
data.
The revision data itself is a linear collection of data chunks.
Each chunk represents a revision and is usually represented as a
delta against the previous chunk. To bound lookup time, runs of
deltas are limited to about 2 times the length of the original
version data. This makes retrieval of a version proportional to
its size, or O(1) relative to the number of revisions.
Both pieces of the revlog are written to in an append-only
fashion, which means we never need to rewrite a file to insert or
remove data, and can use some simple techniques to avoid the need
for locking while reading.
"""
def __init__(self, opener, indexfile):
"""
create a revlog object
opener is a function that abstracts the file opening operation
and can be used to implement COW semantics or the like.
"""
self.indexfile = indexfile
self.datafile = indexfile[:-2] + ".d"
self.opener = opener
self._cache = None
self._chunkcache = (0, '')
self.nodemap = {nullid: nullrev}
self.index = []
v = REVLOG_DEFAULT_VERSION
if hasattr(opener, 'options') and 'defversion' in opener.options:
v = opener.options['defversion']
if v & REVLOGNG:
v |= REVLOGNGINLINEDATA
i = ''
try:
f = self.opener(self.indexfile)
i = f.read(_prereadsize)
if len(i) > 0:
v = struct.unpack(versionformat, i[:4])[0]
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
self.version = v
self._inline = v & REVLOGNGINLINEDATA
flags = v & ~0xFFFF
fmt = v & 0xFFFF
if fmt == REVLOGV0 and flags:
raise RevlogError(_("index %s unknown flags %#04x for format v0")
% (self.indexfile, flags >> 16))
elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
raise RevlogError(_("index %s unknown flags %#04x for revlogng")
% (self.indexfile, flags >> 16))
elif fmt > REVLOGNG:
raise RevlogError(_("index %s unknown format %d")
% (self.indexfile, fmt))
self._io = revlogio()
if self.version == REVLOGV0:
self._io = revlogoldio()
if i:
try:
d = self._io.parseindex(f, i, self._inline)
except (ValueError, IndexError):
raise RevlogError(_("index %s is corrupted") % (self.indexfile))
self.index, self.nodemap, self._chunkcache = d
if not self._chunkcache:
self._chunkclear()
# add the magic null revision at -1 (if it hasn't been done already)
if (self.index == [] or isinstance(self.index, lazyindex) or
self.index[-1][7] != nullid) :
self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
def _loadindex(self, start, end):
"""load a block of indexes all at once from the lazy parser"""
if isinstance(self.index, lazyindex):
self.index.p.loadindex(start, end)
def _loadindexmap(self):
"""loads both the map and the index from the lazy parser"""
if isinstance(self.index, lazyindex):
p = self.index.p
p.loadindex()
self.nodemap = p.map
def _loadmap(self):
"""loads the map from the lazy parser"""
if isinstance(self.nodemap, lazymap):
self.nodemap.p.loadmap()
self.nodemap = self.nodemap.p.map
def tip(self):
return self.node(len(self.index) - 2)
def __len__(self):
return len(self.index) - 1
def __iter__(self):
for i in xrange(len(self)):
yield i
def rev(self, node):
try:
return self.nodemap[node]
except KeyError:
raise LookupError(node, self.indexfile, _('no node'))
def node(self, rev):
return self.index[rev][7]
def linkrev(self, rev):
return self.index[rev][4]
def parents(self, node):
i = self.index
d = i[self.rev(node)]
return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
def parentrevs(self, rev):
return self.index[rev][5:7]
def start(self, rev):
return int(self.index[rev][0] >> 16)
def end(self, rev):
return self.start(rev) + self.length(rev)
def length(self, rev):
return self.index[rev][1]
def base(self, rev):
return self.index[rev][3]
def size(self, rev):
"""return the length of the uncompressed text for a given revision"""
l = self.index[rev][2]
if l >= 0:
return l
t = self.revision(self.node(rev))
return len(t)
def reachable(self, node, stop=None):
"""return the set of all nodes ancestral to a given node, including
the node itself, stopping when stop is matched"""
reachable = set((node,))
visit = [node]
if stop:
stopn = self.rev(stop)
else:
stopn = 0
while visit:
n = visit.pop(0)
if n == stop:
continue
if n == nullid:
continue
for p in self.parents(n):
if self.rev(p) < stopn:
continue
if p not in reachable:
reachable.add(p)
visit.append(p)
return reachable
def ancestors(self, *revs):
"""Generate the ancestors of 'revs' in reverse topological order.
Yield a sequence of revision numbers starting with the parents
of each revision in revs, i.e., each revision is *not* considered
an ancestor of itself. Results are in breadth-first order:
parents of each rev in revs, then parents of those, etc. Result
does not include the null revision."""
visit = list(revs)
seen = set([nullrev])
while visit:
for parent in self.parentrevs(visit.pop(0)):
if parent not in seen:
visit.append(parent)
seen.add(parent)
yield parent
def descendants(self, *revs):
"""Generate the descendants of 'revs' in revision order.
Yield a sequence of revision numbers starting with a child of
some rev in revs, i.e., each revision is *not* considered a
descendant of itself. Results are ordered by revision number (a
topological sort)."""
seen = set(revs)
for i in xrange(min(revs) + 1, len(self)):
for x in self.parentrevs(i):
if x != nullrev and x in seen:
seen.add(i)
yield i
break
def findmissing(self, common=None, heads=None):
"""Return the ancestors of heads that are not ancestors of common.
More specifically, return a list of nodes N such that every N
satisfies the following constraints:
1. N is an ancestor of some node in 'heads'
2. N is not an ancestor of any node in 'common'
The list is sorted by revision number, meaning it is
topologically sorted.
'heads' and 'common' are both lists of node IDs. If heads is
not supplied, uses all of the revlog's heads. If common is not
supplied, uses nullid."""
if common is None:
common = [nullid]
if heads is None:
heads = self.heads()
common = [self.rev(n) for n in common]
heads = [self.rev(n) for n in heads]
# we want the ancestors, but inclusive
has = set(self.ancestors(*common))
has.add(nullrev)
has.update(common)
# take all ancestors from heads that aren't in has
missing = set()
visit = [r for r in heads if r not in has]
while visit:
r = visit.pop(0)
if r in missing:
continue
else:
missing.add(r)
for p in self.parentrevs(r):
if p not in has:
visit.append(p)
missing = list(missing)
missing.sort()
return [self.node(r) for r in missing]
def nodesbetween(self, roots=None, heads=None):
"""Return a topological path from 'roots' to 'heads'.
Return a tuple (nodes, outroots, outheads) where 'nodes' is a
topologically sorted list of all nodes N that satisfy both of
these constraints:
1. N is a descendant of some node in 'roots'
2. N is an ancestor of some node in 'heads'
Every node is considered to be both a descendant and an ancestor
of itself, so every reachable node in 'roots' and 'heads' will be
included in 'nodes'.
'outroots' is the list of reachable nodes in 'roots', i.e., the
subset of 'roots' that is returned in 'nodes'. Likewise,
'outheads' is the subset of 'heads' that is also in 'nodes'.
'roots' and 'heads' are both lists of node IDs. If 'roots' is
unspecified, uses nullid as the only root. If 'heads' is
unspecified, uses list of all of the revlog's heads."""
nonodes = ([], [], [])
if roots is not None:
roots = list(roots)
if not roots:
return nonodes
lowestrev = min([self.rev(n) for n in roots])
else:
roots = [nullid] # Everybody's a descendent of nullid
lowestrev = nullrev
if (lowestrev == nullrev) and (heads is None):
# We want _all_ the nodes!
return ([self.node(r) for r in self], [nullid], list(self.heads()))
if heads is None:
# All nodes are ancestors, so the latest ancestor is the last
# node.
highestrev = len(self) - 1
# Set ancestors to None to signal that every node is an ancestor.
ancestors = None
# Set heads to an empty dictionary for later discovery of heads
heads = {}
else:
heads = list(heads)
if not heads:
return nonodes
ancestors = set()
# Turn heads into a dictionary so we can remove 'fake' heads.
# Also, later we will be using it to filter out the heads we can't
# find from roots.
heads = dict.fromkeys(heads, 0)
# Start at the top and keep marking parents until we're done.
nodestotag = set(heads)
# Remember where the top was so we can use it as a limit later.
highestrev = max([self.rev(n) for n in nodestotag])
while nodestotag:
# grab a node to tag
n = nodestotag.pop()
# Never tag nullid
if n == nullid:
continue
# A node's revision number represents its place in a
# topologically sorted list of nodes.
r = self.rev(n)
if r >= lowestrev:
if n not in ancestors:
# If we are possibly a descendent of one of the roots
# and we haven't already been marked as an ancestor
ancestors.add(n) # Mark as ancestor
# Add non-nullid parents to list of nodes to tag.
nodestotag.update([p for p in self.parents(n) if
p != nullid])
elif n in heads: # We've seen it before, is it a fake head?
# So it is, real heads should not be the ancestors of
# any other heads.
heads.pop(n)
if not ancestors:
return nonodes
# Now that we have our set of ancestors, we want to remove any
# roots that are not ancestors.
# If one of the roots was nullid, everything is included anyway.
if lowestrev > nullrev:
# But, since we weren't, let's recompute the lowest rev to not
# include roots that aren't ancestors.
# Filter out roots that aren't ancestors of heads
roots = [n for n in roots if n in ancestors]
# Recompute the lowest revision
if roots:
lowestrev = min([self.rev(n) for n in roots])
else:
# No more roots? Return empty list
return nonodes
else:
# We are descending from nullid, and don't need to care about
# any other roots.
lowestrev = nullrev
roots = [nullid]
# Transform our roots list into a set.
descendents = set(roots)
# Also, keep the original roots so we can filter out roots that aren't
# 'real' roots (i.e. are descended from other roots).
roots = descendents.copy()
# Our topologically sorted list of output nodes.
orderedout = []
# Don't start at nullid since we don't want nullid in our output list,
# and if nullid shows up in descedents, empty parents will look like
# they're descendents.
for r in xrange(max(lowestrev, 0), highestrev + 1):
n = self.node(r)
isdescendent = False
if lowestrev == nullrev: # Everybody is a descendent of nullid
isdescendent = True
elif n in descendents:
# n is already a descendent
isdescendent = True
# This check only needs to be done here because all the roots
# will start being marked is descendents before the loop.
if n in roots:
# If n was a root, check if it's a 'real' root.
p = tuple(self.parents(n))
# If any of its parents are descendents, it's not a root.
if (p[0] in descendents) or (p[1] in descendents):
roots.remove(n)
else:
p = tuple(self.parents(n))
# A node is a descendent if either of its parents are
# descendents. (We seeded the dependents list with the roots
# up there, remember?)
if (p[0] in descendents) or (p[1] in descendents):
descendents.add(n)
isdescendent = True
if isdescendent and ((ancestors is None) or (n in ancestors)):
# Only include nodes that are both descendents and ancestors.
orderedout.append(n)
if (ancestors is not None) and (n in heads):
# We're trying to figure out which heads are reachable
# from roots.
# Mark this head as having been reached
heads[n] = 1
elif ancestors is None:
# Otherwise, we're trying to discover the heads.
# Assume this is a head because if it isn't, the next step
# will eventually remove it.
heads[n] = 1
# But, obviously its parents aren't.
for p in self.parents(n):
heads.pop(p, None)
heads = [n for n in heads.iterkeys() if heads[n] != 0]
roots = list(roots)
assert orderedout
assert roots
assert heads
return (orderedout, roots, heads)
def heads(self, start=None, stop=None):
"""return the list of all nodes that have no children
if start is specified, only heads that are descendants of
start will be returned
if stop is specified, it will consider all the revs from stop
as if they had no children
"""
if start is None and stop is None:
count = len(self)
if not count:
return [nullid]
ishead = [1] * (count + 1)
index = self.index
for r in xrange(count):
e = index[r]
ishead[e[5]] = ishead[e[6]] = 0
return [self.node(r) for r in xrange(count) if ishead[r]]
if start is None:
start = nullid
if stop is None:
stop = []
stoprevs = set([self.rev(n) for n in stop])
startrev = self.rev(start)
reachable = set((startrev,))
heads = set((startrev,))
parentrevs = self.parentrevs
for r in xrange(startrev + 1, len(self)):
for p in parentrevs(r):
if p in reachable:
if r not in stoprevs:
reachable.add(r)
heads.add(r)
if p in heads and p not in stoprevs:
heads.remove(p)
return [self.node(r) for r in heads]
def children(self, node):
"""find the children of a given node"""
c = []
p = self.rev(node)
for r in range(p + 1, len(self)):
prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
if prevs:
for pr in prevs:
if pr == p:
c.append(self.node(r))
elif p == nullrev:
c.append(self.node(r))
return c
def _match(self, id):
if isinstance(id, (long, int)):
# rev
return self.node(id)
if len(id) == 20:
# possibly a binary node
# odds of a binary node being all hex in ASCII are 1 in 10**25
try:
node = id
self.rev(node) # quick search the index
return node
except LookupError:
pass # may be partial hex id
try:
# str(rev)
rev = int(id)
if str(rev) != id:
raise ValueError
if rev < 0:
rev = len(self) + rev
if rev < 0 or rev >= len(self):
raise ValueError
return self.node(rev)
except (ValueError, OverflowError):
pass
if len(id) == 40:
try:
# a full hex nodeid?
node = bin(id)
self.rev(node)
return node
except (TypeError, LookupError):
pass
def _partialmatch(self, id):
if len(id) < 40:
try:
# hex(node)[:...]
l = len(id) // 2 # grab an even number of digits
bin_id = bin(id[:l * 2])
nl = [n for n in self.nodemap if n[:l] == bin_id]
nl = [n for n in nl if hex(n).startswith(id)]
if len(nl) > 0:
if len(nl) == 1:
return nl[0]
raise LookupError(id, self.indexfile,
_('ambiguous identifier'))
return None
except TypeError:
pass
def lookup(self, id):
"""locate a node based on:
- revision number or str(revision number)
- nodeid or subset of hex nodeid
"""
n = self._match(id)
if n is not None:
return n
n = self._partialmatch(id)
if n:
return n
raise LookupError(id, self.indexfile, _('no match found'))
def cmp(self, node, text):
"""compare text with a given file revision"""
p1, p2 = self.parents(node)
return hash(text, p1, p2) != node
def _addchunk(self, offset, data):
o, d = self._chunkcache
# try to add to existing cache
if o + len(d) == offset and len(d) + len(data) < _prereadsize:
self._chunkcache = o, d + data
else:
self._chunkcache = offset, data
def _loadchunk(self, offset, length):
if self._inline:
df = self.opener(self.indexfile)
else:
df = self.opener(self.datafile)
readahead = max(65536, length)
df.seek(offset)
d = df.read(readahead)
self._addchunk(offset, d)
if readahead > length:
return d[:length]
return d
def _getchunk(self, offset, length):
o, d = self._chunkcache
l = len(d)
# is it in the cache?
cachestart = offset - o
cacheend = cachestart + length
if cachestart >= 0 and cacheend <= l:
if cachestart == 0 and cacheend == l:
return d # avoid a copy
return d[cachestart:cacheend]
return self._loadchunk(offset, length)
def _chunkraw(self, startrev, endrev):
start = self.start(startrev)
length = self.end(endrev) - start
if self._inline:
start += (startrev + 1) * self._io.size
return self._getchunk(start, length)
def _chunk(self, rev):
return decompress(self._chunkraw(rev, rev))
def _chunkclear(self):
self._chunkcache = (0, '')
def revdiff(self, rev1, rev2):
"""return or calculate a delta between two revisions"""
if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
return self._chunk(rev2)
return mdiff.textdiff(self.revision(self.node(rev1)),
self.revision(self.node(rev2)))
def revision(self, node):
"""return an uncompressed revision of a given node"""
if node == nullid:
return ""
if self._cache and self._cache[0] == node:
return self._cache[2]
# look up what we need to read
text = None
rev = self.rev(node)
base = self.base(rev)
# check rev flags
if self.index[rev][0] & 0xFFFF:
raise RevlogError(_('incompatible revision flag %x') %
(self.index[rev][0] & 0xFFFF))
# do we have useful data cached?
if self._cache and self._cache[1] >= base and self._cache[1] < rev:
base = self._cache[1]
text = self._cache[2]
self._loadindex(base, rev + 1)
self._chunkraw(base, rev)
if text is None:
text = self._chunk(base)
bins = [self._chunk(r) for r in xrange(base + 1, rev + 1)]
text = mdiff.patches(text, bins)
p1, p2 = self.parents(node)
if node != hash(text, p1, p2):
raise RevlogError(_("integrity check failed on %s:%d")
% (self.indexfile, rev))
self._cache = (node, rev, text)
return text
def checkinlinesize(self, tr, fp=None):
if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
return
trinfo = tr.find(self.indexfile)
if trinfo is None:
raise RevlogError(_("%s not found in the transaction")
% self.indexfile)
trindex = trinfo[2]
dataoff = self.start(trindex)
tr.add(self.datafile, dataoff)
if fp:
fp.flush()
fp.close()
df = self.opener(self.datafile, 'w')
try:
for r in self:
df.write(self._chunkraw(r, r))
finally:
df.close()
fp = self.opener(self.indexfile, 'w', atomictemp=True)
self.version &= ~(REVLOGNGINLINEDATA)
self._inline = False
for i in self:
e = self._io.packentry(self.index[i], self.node, self.version, i)
fp.write(e)
# if we don't call rename, the temp file will never replace the
# real index
fp.rename()
tr.replace(self.indexfile, trindex * self._io.size)
self._chunkclear()
def addrevision(self, text, transaction, link, p1, p2, d=None):
"""add a revision to the log
text - the revision data to add
transaction - the transaction object used for rollback
link - the linkrev data to add
p1, p2 - the parent nodeids of the revision
d - an optional precomputed delta
"""
dfh = None
if not self._inline:
dfh = self.opener(self.datafile, "a")
ifh = self.opener(self.indexfile, "a+")
try:
return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
finally:
if dfh:
dfh.close()
ifh.close()
def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
node = hash(text, p1, p2)
if node in self.nodemap:
return node
curr = len(self)
prev = curr - 1
base = self.base(prev)
offset = self.end(prev)
if curr:
if not d:
ptext = self.revision(self.node(prev))
d = mdiff.textdiff(ptext, text)
data = compress(d)
l = len(data[1]) + len(data[0])
dist = l + offset - self.start(base)
# full versions are inserted when the needed deltas
# become comparable to the uncompressed text
if not curr or dist > len(text) * 2:
data = compress(text)
l = len(data[1]) + len(data[0])
base = curr
e = (offset_type(offset, 0), l, len(text),
base, link, self.rev(p1), self.rev(p2), node)
self.index.insert(-1, e)
self.nodemap[node] = curr
entry = self._io.packentry(e, self.node, self.version, curr)
if not self._inline:
transaction.add(self.datafile, offset)
transaction.add(self.indexfile, curr * len(entry))
if data[0]:
dfh.write(data[0])
dfh.write(data[1])
dfh.flush()
ifh.write(entry)
else:
offset += curr * self._io.size
transaction.add(self.indexfile, offset, curr)
ifh.write(entry)
ifh.write(data[0])
ifh.write(data[1])
self.checkinlinesize(transaction, ifh)
if type(text) == str: # only accept immutable objects
self._cache = (node, curr, text)
return node
def descendant(self, start, end):
for i in self.descendants(start):
if i == end:
return True
elif i > end:
break
return False
def ancestor(self, a, b):
"""calculate the least common ancestor of nodes a and b"""
# fast path, check if it is a descendant
a, b = self.rev(a), self.rev(b)
start, end = sorted((a, b))
if self.descendant(start, end):
return self.node(start)
def parents(rev):
return [p for p in self.parentrevs(rev) if p != nullrev]
c = ancestor.ancestor(a, b, parents)
if c is None:
return nullid
return self.node(c)
def group(self, nodelist, lookup, infocollect=None):
"""Calculate a delta group, yielding a sequence of changegroup chunks
(strings).
Given a list of changeset revs, return a set of deltas and
metadata corresponding to nodes. the first delta is
parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
have this parent as it has all history before these
changesets. parent is parent[0]
"""
revs = [self.rev(n) for n in nodelist]
# if we don't have any revisions touched by these changesets, bail
if not revs:
yield changegroup.closechunk()
return
# add the parent of the first rev
p = self.parentrevs(revs[0])[0]
revs.insert(0, p)
# build deltas
for d in xrange(len(revs) - 1):
a, b = revs[d], revs[d + 1]
nb = self.node(b)
if infocollect is not None:
infocollect(nb)
p = self.parents(nb)
meta = nb + p[0] + p[1] + lookup(nb)
if a == -1:
d = self.revision(nb)
meta += mdiff.trivialdiffheader(len(d))
else:
d = self.revdiff(a, b)
yield changegroup.chunkheader(len(meta) + len(d))
yield meta
if len(d) > 2**20:
pos = 0
while pos < len(d):
pos2 = pos + 2 ** 18
yield d[pos:pos2]
pos = pos2
else:
yield d
yield changegroup.closechunk()
def addgroup(self, revs, linkmapper, transaction):
"""
add a delta group
given a set of deltas, add them to the revision log. the
first delta is against its parent, which should be in our
log, the rest are against the previous delta.
"""
#track the base of the current delta log
r = len(self)
t = r - 1
node = None
base = prev = nullrev
start = end = textlen = 0
if r:
end = self.end(t)
ifh = self.opener(self.indexfile, "a+")
isize = r * self._io.size
if self._inline:
transaction.add(self.indexfile, end + isize, r)
dfh = None
else:
transaction.add(self.indexfile, isize, r)
transaction.add(self.datafile, end)
dfh = self.opener(self.datafile, "a")
try:
# loop through our set of deltas
chain = None
for chunk in revs:
node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
link = linkmapper(cs)
if node in self.nodemap:
# this can happen if two branches make the same change
chain = node
continue
delta = buffer(chunk, 80)
del chunk
for p in (p1, p2):
if not p in self.nodemap:
raise LookupError(p, self.indexfile, _('unknown parent'))
if not chain:
# retrieve the parent revision of the delta chain
chain = p1
if not chain in self.nodemap:
raise LookupError(chain, self.indexfile, _('unknown base'))
# full versions are inserted when the needed deltas become
# comparable to the uncompressed text or when the previous
# version is not the one we have a delta against. We use
# the size of the previous full rev as a proxy for the
# current size.
if chain == prev:
cdelta = compress(delta)
cdeltalen = len(cdelta[0]) + len(cdelta[1])
textlen = mdiff.patchedsize(textlen, delta)
if chain != prev or (end - start + cdeltalen) > textlen * 2:
# flush our writes here so we can read it in revision
if dfh:
dfh.flush()
ifh.flush()
text = self.revision(chain)
if len(text) == 0:
# skip over trivial delta header
text = buffer(delta, 12)
else:
text = mdiff.patches(text, [delta])
del delta
chk = self._addrevision(text, transaction, link, p1, p2, None,
ifh, dfh)
if not dfh and not self._inline:
# addrevision switched from inline to conventional
# reopen the index
dfh = self.opener(self.datafile, "a")
ifh = self.opener(self.indexfile, "a")
if chk != node:
raise RevlogError(_("consistency error adding group"))
textlen = len(text)
else:
e = (offset_type(end, 0), cdeltalen, textlen, base,
link, self.rev(p1), self.rev(p2), node)
self.index.insert(-1, e)
self.nodemap[node] = r
entry = self._io.packentry(e, self.node, self.version, r)
if self._inline:
ifh.write(entry)
ifh.write(cdelta[0])
ifh.write(cdelta[1])
self.checkinlinesize(transaction, ifh)
if not self._inline:
dfh = self.opener(self.datafile, "a")
ifh = self.opener(self.indexfile, "a")
else:
dfh.write(cdelta[0])
dfh.write(cdelta[1])
ifh.write(entry)
t, r, chain, prev = r, r + 1, node, node
base = self.base(t)
start = self.start(base)
end = self.end(t)
finally:
if dfh:
dfh.close()
ifh.close()
return node
def strip(self, minlink, transaction):
"""truncate the revlog on the first revision with a linkrev >= minlink
This function is called when we're stripping revision minlink and
its descendants from the repository.
We have to remove all revisions with linkrev >= minlink, because
the equivalent changelog revisions will be renumbered after the
strip.
So we truncate the revlog on the first of these revisions, and
trust that the caller has saved the revisions that shouldn't be
removed and that it'll readd them after this truncation.
"""
if len(self) == 0:
return
if isinstance(self.index, lazyindex):
self._loadindexmap()
for rev in self:
if self.index[rev][4] >= minlink:
break
else:
return
# first truncate the files on disk
end = self.start(rev)
if not self._inline:
transaction.add(self.datafile, end)
end = rev * self._io.size
else:
end += rev * self._io.size
transaction.add(self.indexfile, end)
# then reset internal state in memory to forget those revisions
self._cache = None
self._chunkclear()
for x in xrange(rev, len(self)):
del self.nodemap[self.node(x)]
del self.index[rev:-1]
def checksize(self):
expected = 0
if len(self):
expected = max(0, self.end(len(self) - 1))
try:
f = self.opener(self.datafile)
f.seek(0, 2)
actual = f.tell()
dd = actual - expected
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
dd = 0
try:
f = self.opener(self.indexfile)
f.seek(0, 2)
actual = f.tell()
s = self._io.size
i = max(0, actual // s)
di = actual - (i * s)
if self._inline:
databytes = 0
for r in self:
databytes += max(0, self.length(r))
dd = 0
di = actual - len(self) * s - databytes
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
di = 0
return (dd, di)
def files(self):
res = [self.indexfile]
if not self._inline:
res.append(self.datafile)
return res
|
joewalnes/idea-community
|
plugins/hg4idea/testData/bin/mercurial/revlog.py
|
Python
|
apache-2.0
| 48,667
|
[
"VisIt"
] |
c7e8eb2771af9d2cdd75c8735440c7bd0b5e47328bf8aca2f852b90a5a0cedba
|
#!/usr/bin/python
#
# Copyright (C) 2011, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Converter tools between ovf and ganeti config file
"""
# pylint: disable=F0401, E1101
# F0401 because ElementTree is not default for python 2.4
# E1101 makes no sense - pylint assumes that ElementTree object is a tuple
import ConfigParser
import errno
import logging
import os
import os.path
import re
import shutil
import tarfile
import tempfile
import xml.dom.minidom
import xml.parsers.expat
try:
import xml.etree.ElementTree as ET
except ImportError:
import elementtree.ElementTree as ET
try:
ParseError = ET.ParseError # pylint: disable=E1103
except AttributeError:
ParseError = None
from ganeti import constants
from ganeti import errors
from ganeti import utils
from ganeti import pathutils
# Schemas used in OVF format
GANETI_SCHEMA = "http://ganeti"
OVF_SCHEMA = "http://schemas.dmtf.org/ovf/envelope/1"
RASD_SCHEMA = ("http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/"
"CIM_ResourceAllocationSettingData")
VSSD_SCHEMA = ("http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/"
"CIM_VirtualSystemSettingData")
XML_SCHEMA = "http://www.w3.org/2001/XMLSchema-instance"
# File extensions in OVF package
OVA_EXT = ".ova"
OVF_EXT = ".ovf"
MF_EXT = ".mf"
CERT_EXT = ".cert"
COMPRESSION_EXT = ".gz"
FILE_EXTENSIONS = [
OVF_EXT,
MF_EXT,
CERT_EXT,
]
COMPRESSION_TYPE = "gzip"
NO_COMPRESSION = [None, "identity"]
COMPRESS = "compression"
DECOMPRESS = "decompression"
ALLOWED_ACTIONS = [COMPRESS, DECOMPRESS]
VMDK = "vmdk"
RAW = "raw"
COW = "cow"
ALLOWED_FORMATS = [RAW, COW, VMDK]
# ResourceType values
RASD_TYPE = {
"vcpus": "3",
"memory": "4",
"scsi-controller": "6",
"ethernet-adapter": "10",
"disk": "17",
}
SCSI_SUBTYPE = "lsilogic"
VS_TYPE = {
"ganeti": "ganeti-ovf",
"external": "vmx-04",
}
# AllocationUnits values and conversion
ALLOCATION_UNITS = {
"b": ["bytes", "b"],
"kb": ["kilobytes", "kb", "byte * 2^10", "kibibytes", "kib"],
"mb": ["megabytes", "mb", "byte * 2^20", "mebibytes", "mib"],
"gb": ["gigabytes", "gb", "byte * 2^30", "gibibytes", "gib"],
}
CONVERT_UNITS_TO_MB = {
"b": lambda x: x / (1024 * 1024),
"kb": lambda x: x / 1024,
"mb": lambda x: x,
"gb": lambda x: x * 1024,
}
# Names of the config fields
NAME = "name"
OS = "os"
HYPERV = "hypervisor"
VCPUS = "vcpus"
MEMORY = "memory"
AUTO_BALANCE = "auto_balance"
DISK_TEMPLATE = "disk_template"
TAGS = "tags"
VERSION = "version"
# Instance IDs of System and SCSI controller
INSTANCE_ID = {
"system": 0,
"vcpus": 1,
"memory": 2,
"scsi": 3,
}
# Disk format descriptions
DISK_FORMAT = {
RAW: "http://en.wikipedia.org/wiki/Byte",
VMDK: "http://www.vmware.com/interfaces/specifications/vmdk.html"
"#monolithicSparse",
COW: "http://www.gnome.org/~markmc/qcow-image-format.html",
}
def CheckQemuImg():
""" Make sure that qemu-img is present before performing operations.
@raise errors.OpPrereqError: when qemu-img was not found in the system
"""
if not constants.QEMUIMG_PATH:
raise errors.OpPrereqError("qemu-img not found at build time, unable"
" to continue", errors.ECODE_STATE)
def LinkFile(old_path, prefix=None, suffix=None, directory=None):
"""Create link with a given prefix and suffix.
This is a wrapper over os.link. It tries to create a hard link for given file,
but instead of rising error when file exists, the function changes the name
a little bit.
@type old_path:string
@param old_path: path to the file that is to be linked
@type prefix: string
@param prefix: prefix of filename for the link
@type suffix: string
@param suffix: suffix of the filename for the link
@type directory: string
@param directory: directory of the link
@raise errors.OpPrereqError: when error on linking is different than
"File exists"
"""
assert(prefix is not None or suffix is not None)
if directory is None:
directory = os.getcwd()
new_path = utils.PathJoin(directory, "%s%s" % (prefix, suffix))
counter = 1
while True:
try:
os.link(old_path, new_path)
break
except OSError, err:
if err.errno == errno.EEXIST:
new_path = utils.PathJoin(directory,
"%s_%s%s" % (prefix, counter, suffix))
counter += 1
else:
raise errors.OpPrereqError("Error moving the file %s to %s location:"
" %s" % (old_path, new_path, err),
errors.ECODE_ENVIRON)
return new_path
class OVFReader(object):
"""Reader class for OVF files.
@type files_list: list
@ivar files_list: list of files in the OVF package
@type tree: ET.ElementTree
@ivar tree: XML tree of the .ovf file
@type schema_name: string
@ivar schema_name: name of the .ovf file
@type input_dir: string
@ivar input_dir: directory in which the .ovf file resides
"""
def __init__(self, input_path):
"""Initialiaze the reader - load the .ovf file to XML parser.
It is assumed that names of manifesto (.mf), certificate (.cert) and ovf
files are the same. In order to account any other files as part of the ovf
package, they have to be explicitly mentioned in the Resources section
of the .ovf file.
@type input_path: string
@param input_path: absolute path to the .ovf file
@raise errors.OpPrereqError: when .ovf file is not a proper XML file or some
of the files mentioned in Resources section do not exist
"""
self.tree = ET.ElementTree()
try:
self.tree.parse(input_path)
except (ParseError, xml.parsers.expat.ExpatError), err:
raise errors.OpPrereqError("Error while reading %s file: %s" %
(OVF_EXT, err), errors.ECODE_ENVIRON)
# Create a list of all files in the OVF package
(input_dir, input_file) = os.path.split(input_path)
(input_name, _) = os.path.splitext(input_file)
files_directory = utils.ListVisibleFiles(input_dir)
files_list = []
for file_name in files_directory:
(name, extension) = os.path.splitext(file_name)
if extension in FILE_EXTENSIONS and name == input_name:
files_list.append(file_name)
files_list += self._GetAttributes("{%s}References/{%s}File" %
(OVF_SCHEMA, OVF_SCHEMA),
"{%s}href" % OVF_SCHEMA)
for file_name in files_list:
file_path = utils.PathJoin(input_dir, file_name)
if not os.path.exists(file_path):
raise errors.OpPrereqError("File does not exist: %s" % file_path,
errors.ECODE_ENVIRON)
logging.info("Files in the OVF package: %s", " ".join(files_list))
self.files_list = files_list
self.input_dir = input_dir
self.schema_name = input_name
def _GetAttributes(self, path, attribute):
"""Get specified attribute from all nodes accessible using given path.
Function follows the path from root node to the desired tags using path,
then reads the apropriate attribute values.
@type path: string
@param path: path of nodes to visit
@type attribute: string
@param attribute: attribute for which we gather the information
@rtype: list
@return: for each accessible tag with the attribute value set, value of the
attribute
"""
current_list = self.tree.findall(path)
results = [x.get(attribute) for x in current_list]
return filter(None, results)
def _GetElementMatchingAttr(self, path, match_attr):
"""Searches for element on a path that matches certain attribute value.
Function follows the path from root node to the desired tags using path,
then searches for the first one matching the attribute value.
@type path: string
@param path: path of nodes to visit
@type match_attr: tuple
@param match_attr: pair (attribute, value) for which we search
@rtype: ET.ElementTree or None
@return: first element matching match_attr or None if nothing matches
"""
potential_elements = self.tree.findall(path)
(attr, val) = match_attr
for elem in potential_elements:
if elem.get(attr) == val:
return elem
return None
def _GetElementMatchingText(self, path, match_text):
"""Searches for element on a path that matches certain text value.
Function follows the path from root node to the desired tags using path,
then searches for the first one matching the text value.
@type path: string
@param path: path of nodes to visit
@type match_text: tuple
@param match_text: pair (node, text) for which we search
@rtype: ET.ElementTree or None
@return: first element matching match_text or None if nothing matches
"""
potential_elements = self.tree.findall(path)
(node, text) = match_text
for elem in potential_elements:
if elem.findtext(node) == text:
return elem
return None
@staticmethod
def _GetDictParameters(root, schema):
"""Reads text in all children and creates the dictionary from the contents.
@type root: ET.ElementTree or None
@param root: father of the nodes we want to collect data about
@type schema: string
@param schema: schema name to be removed from the tag
@rtype: dict
@return: dictionary containing tags and their text contents, tags have their
schema fragment removed or empty dictionary, when root is None
"""
if not root:
return {}
results = {}
for element in list(root):
pref_len = len("{%s}" % schema)
assert(schema in element.tag)
tag = element.tag[pref_len:]
results[tag] = element.text
return results
def VerifyManifest(self):
"""Verifies manifest for the OVF package, if one is given.
@raise errors.OpPrereqError: if SHA1 checksums do not match
"""
if "%s%s" % (self.schema_name, MF_EXT) in self.files_list:
logging.warning("Verifying SHA1 checksums, this may take a while")
manifest_filename = "%s%s" % (self.schema_name, MF_EXT)
manifest_path = utils.PathJoin(self.input_dir, manifest_filename)
manifest_content = utils.ReadFile(manifest_path).splitlines()
manifest_files = {}
regexp = r"SHA1\((\S+)\)= (\S+)"
for line in manifest_content:
match = re.match(regexp, line)
if match:
file_name = match.group(1)
sha1_sum = match.group(2)
manifest_files[file_name] = sha1_sum
files_with_paths = [utils.PathJoin(self.input_dir, file_name)
for file_name in self.files_list]
sha1_sums = utils.FingerprintFiles(files_with_paths)
for file_name, value in manifest_files.iteritems():
if sha1_sums.get(utils.PathJoin(self.input_dir, file_name)) != value:
raise errors.OpPrereqError("SHA1 checksum of %s does not match the"
" value in manifest file" % file_name,
errors.ECODE_ENVIRON)
logging.info("SHA1 checksums verified")
def GetInstanceName(self):
"""Provides information about instance name.
@rtype: string
@return: instance name string
"""
find_name = "{%s}VirtualSystem/{%s}Name" % (OVF_SCHEMA, OVF_SCHEMA)
return self.tree.findtext(find_name)
def GetDiskTemplate(self):
"""Returns disk template from .ovf file
@rtype: string or None
@return: name of the template
"""
find_template = ("{%s}GanetiSection/{%s}DiskTemplate" %
(GANETI_SCHEMA, GANETI_SCHEMA))
return self.tree.findtext(find_template)
def GetHypervisorData(self):
"""Provides hypervisor information - hypervisor name and options.
@rtype: dict
@return: dictionary containing name of the used hypervisor and all the
specified options
"""
hypervisor_search = ("{%s}GanetiSection/{%s}Hypervisor" %
(GANETI_SCHEMA, GANETI_SCHEMA))
hypervisor_data = self.tree.find(hypervisor_search)
if not hypervisor_data:
return {"hypervisor_name": constants.VALUE_AUTO}
results = {
"hypervisor_name": hypervisor_data.findtext("{%s}Name" % GANETI_SCHEMA,
default=constants.VALUE_AUTO),
}
parameters = hypervisor_data.find("{%s}Parameters" % GANETI_SCHEMA)
results.update(self._GetDictParameters(parameters, GANETI_SCHEMA))
return results
def GetOSData(self):
""" Provides operating system information - os name and options.
@rtype: dict
@return: dictionary containing name and options for the chosen OS
"""
results = {}
os_search = ("{%s}GanetiSection/{%s}OperatingSystem" %
(GANETI_SCHEMA, GANETI_SCHEMA))
os_data = self.tree.find(os_search)
if os_data:
results["os_name"] = os_data.findtext("{%s}Name" % GANETI_SCHEMA)
parameters = os_data.find("{%s}Parameters" % GANETI_SCHEMA)
results.update(self._GetDictParameters(parameters, GANETI_SCHEMA))
return results
def GetBackendData(self):
""" Provides backend information - vcpus, memory, auto balancing options.
@rtype: dict
@return: dictionary containing options for vcpus, memory and auto balance
settings
"""
results = {}
find_vcpus = ("{%s}VirtualSystem/{%s}VirtualHardwareSection/{%s}Item" %
(OVF_SCHEMA, OVF_SCHEMA, OVF_SCHEMA))
match_vcpus = ("{%s}ResourceType" % RASD_SCHEMA, RASD_TYPE["vcpus"])
vcpus = self._GetElementMatchingText(find_vcpus, match_vcpus)
if vcpus:
vcpus_count = vcpus.findtext("{%s}VirtualQuantity" % RASD_SCHEMA,
default=constants.VALUE_AUTO)
else:
vcpus_count = constants.VALUE_AUTO
results["vcpus"] = str(vcpus_count)
find_memory = find_vcpus
match_memory = ("{%s}ResourceType" % RASD_SCHEMA, RASD_TYPE["memory"])
memory = self._GetElementMatchingText(find_memory, match_memory)
memory_raw = None
if memory:
alloc_units = memory.findtext("{%s}AllocationUnits" % RASD_SCHEMA)
matching_units = [units for units, variants in ALLOCATION_UNITS.items()
if alloc_units.lower() in variants]
if matching_units == []:
raise errors.OpPrereqError("Unit %s for RAM memory unknown" %
alloc_units, errors.ECODE_INVAL)
units = matching_units[0]
memory_raw = int(memory.findtext("{%s}VirtualQuantity" % RASD_SCHEMA,
default=constants.VALUE_AUTO))
memory_count = CONVERT_UNITS_TO_MB[units](memory_raw)
else:
memory_count = constants.VALUE_AUTO
results["memory"] = str(memory_count)
find_balance = ("{%s}GanetiSection/{%s}AutoBalance" %
(GANETI_SCHEMA, GANETI_SCHEMA))
balance = self.tree.findtext(find_balance, default=constants.VALUE_AUTO)
results["auto_balance"] = balance
return results
def GetTagsData(self):
"""Provides tags information for instance.
@rtype: string or None
@return: string of comma-separated tags for the instance
"""
find_tags = "{%s}GanetiSection/{%s}Tags" % (GANETI_SCHEMA, GANETI_SCHEMA)
results = self.tree.findtext(find_tags)
if results:
return results
else:
return None
def GetVersionData(self):
"""Provides version number read from .ovf file
@rtype: string
@return: string containing the version number
"""
find_version = ("{%s}GanetiSection/{%s}Version" %
(GANETI_SCHEMA, GANETI_SCHEMA))
return self.tree.findtext(find_version)
def GetNetworkData(self):
"""Provides data about the network in the OVF instance.
The method gathers the data about networks used by OVF instance. It assumes
that 'name' tag means something - in essence, if it contains one of the
words 'bridged' or 'routed' then that will be the mode of this network in
Ganeti. The information about the network can be either in GanetiSection or
VirtualHardwareSection.
@rtype: dict
@return: dictionary containing all the network information
"""
results = {}
networks_search = ("{%s}NetworkSection/{%s}Network" %
(OVF_SCHEMA, OVF_SCHEMA))
network_names = self._GetAttributes(networks_search,
"{%s}name" % OVF_SCHEMA)
required = ["ip", "mac", "link", "mode", "network"]
for (counter, network_name) in enumerate(network_names):
network_search = ("{%s}VirtualSystem/{%s}VirtualHardwareSection/{%s}Item"
% (OVF_SCHEMA, OVF_SCHEMA, OVF_SCHEMA))
ganeti_search = ("{%s}GanetiSection/{%s}Network/{%s}Nic" %
(GANETI_SCHEMA, GANETI_SCHEMA, GANETI_SCHEMA))
network_match = ("{%s}Connection" % RASD_SCHEMA, network_name)
ganeti_match = ("{%s}name" % OVF_SCHEMA, network_name)
network_data = self._GetElementMatchingText(network_search, network_match)
network_ganeti_data = self._GetElementMatchingAttr(ganeti_search,
ganeti_match)
ganeti_data = {}
if network_ganeti_data:
ganeti_data["mode"] = network_ganeti_data.findtext("{%s}Mode" %
GANETI_SCHEMA)
ganeti_data["mac"] = network_ganeti_data.findtext("{%s}MACAddress" %
GANETI_SCHEMA)
ganeti_data["ip"] = network_ganeti_data.findtext("{%s}IPAddress" %
GANETI_SCHEMA)
ganeti_data["link"] = network_ganeti_data.findtext("{%s}Link" %
GANETI_SCHEMA)
ganeti_data["network"] = network_ganeti_data.findtext("{%s}Net" %
GANETI_SCHEMA)
mac_data = None
if network_data:
mac_data = network_data.findtext("{%s}Address" % RASD_SCHEMA)
network_name = network_name.lower()
# First, some not Ganeti-specific information is collected
if constants.NIC_MODE_BRIDGED in network_name:
results["nic%s_mode" % counter] = "bridged"
elif constants.NIC_MODE_ROUTED in network_name:
results["nic%s_mode" % counter] = "routed"
results["nic%s_mac" % counter] = mac_data
# GanetiSection data overrides 'manually' collected data
for name, value in ganeti_data.iteritems():
results["nic%s_%s" % (counter, name)] = value
# Bridged network has no IP - unless specifically stated otherwise
if (results.get("nic%s_mode" % counter) == "bridged" and
not results.get("nic%s_ip" % counter)):
results["nic%s_ip" % counter] = constants.VALUE_NONE
for option in required:
if not results.get("nic%s_%s" % (counter, option)):
results["nic%s_%s" % (counter, option)] = constants.VALUE_AUTO
if network_names:
results["nic_count"] = str(len(network_names))
return results
def GetDisksNames(self):
"""Provides list of file names for the disks used by the instance.
@rtype: list
@return: list of file names, as referenced in .ovf file
"""
results = []
disks_search = "{%s}DiskSection/{%s}Disk" % (OVF_SCHEMA, OVF_SCHEMA)
disk_ids = self._GetAttributes(disks_search, "{%s}fileRef" % OVF_SCHEMA)
for disk in disk_ids:
disk_search = "{%s}References/{%s}File" % (OVF_SCHEMA, OVF_SCHEMA)
disk_match = ("{%s}id" % OVF_SCHEMA, disk)
disk_elem = self._GetElementMatchingAttr(disk_search, disk_match)
if disk_elem is None:
raise errors.OpPrereqError("%s file corrupted - disk %s not found in"
" references" % (OVF_EXT, disk),
errors.ECODE_ENVIRON)
disk_name = disk_elem.get("{%s}href" % OVF_SCHEMA)
disk_compression = disk_elem.get("{%s}compression" % OVF_SCHEMA)
results.append((disk_name, disk_compression))
return results
def SubElementText(parent, tag, text, attrib={}, **extra):
# pylint: disable=W0102
"""This is just a wrapper on ET.SubElement that always has text content.
"""
if text is None:
return None
elem = ET.SubElement(parent, tag, attrib=attrib, **extra)
elem.text = str(text)
return elem
class OVFWriter(object):
"""Writer class for OVF files.
@type tree: ET.ElementTree
@ivar tree: XML tree that we are constructing
@type virtual_system_type: string
@ivar virtual_system_type: value of vssd:VirtualSystemType, for external usage
in VMWare this requires to be vmx
@type hardware_list: list
@ivar hardware_list: list of items prepared for VirtualHardwareSection
@type next_instance_id: int
@ivar next_instance_id: next instance id to be used when creating elements on
hardware_list
"""
def __init__(self, has_gnt_section):
"""Initialize the writer - set the top element.
@type has_gnt_section: bool
@param has_gnt_section: if the Ganeti schema should be added - i.e. this
means that Ganeti section will be present
"""
env_attribs = {
"xmlns:xsi": XML_SCHEMA,
"xmlns:vssd": VSSD_SCHEMA,
"xmlns:rasd": RASD_SCHEMA,
"xmlns:ovf": OVF_SCHEMA,
"xmlns": OVF_SCHEMA,
"xml:lang": "en-US",
}
if has_gnt_section:
env_attribs["xmlns:gnt"] = GANETI_SCHEMA
self.virtual_system_type = VS_TYPE["ganeti"]
else:
self.virtual_system_type = VS_TYPE["external"]
self.tree = ET.Element("Envelope", attrib=env_attribs)
self.hardware_list = []
# INSTANCE_ID contains statically assigned IDs, starting from 0
self.next_instance_id = len(INSTANCE_ID) # FIXME: hackish
def SaveDisksData(self, disks):
"""Convert disk information to certain OVF sections.
@type disks: list
@param disks: list of dictionaries of disk options from config.ini
"""
references = ET.SubElement(self.tree, "References")
disk_section = ET.SubElement(self.tree, "DiskSection")
SubElementText(disk_section, "Info", "Virtual disk information")
for counter, disk in enumerate(disks):
file_id = "file%s" % counter
disk_id = "disk%s" % counter
file_attribs = {
"ovf:href": disk["path"],
"ovf:size": str(disk["real-size"]),
"ovf:id": file_id,
}
disk_attribs = {
"ovf:capacity": str(disk["virt-size"]),
"ovf:diskId": disk_id,
"ovf:fileRef": file_id,
"ovf:format": DISK_FORMAT.get(disk["format"], disk["format"]),
}
if "compression" in disk:
file_attribs["ovf:compression"] = disk["compression"]
ET.SubElement(references, "File", attrib=file_attribs)
ET.SubElement(disk_section, "Disk", attrib=disk_attribs)
# Item in VirtualHardwareSection creation
disk_item = ET.Element("Item")
SubElementText(disk_item, "rasd:ElementName", disk_id)
SubElementText(disk_item, "rasd:HostResource", "ovf:/disk/%s" % disk_id)
SubElementText(disk_item, "rasd:InstanceID", self.next_instance_id)
SubElementText(disk_item, "rasd:Parent", INSTANCE_ID["scsi"])
SubElementText(disk_item, "rasd:ResourceType", RASD_TYPE["disk"])
self.hardware_list.append(disk_item)
self.next_instance_id += 1
def SaveNetworksData(self, networks):
"""Convert network information to NetworkSection.
@type networks: list
@param networks: list of dictionaries of network options form config.ini
"""
network_section = ET.SubElement(self.tree, "NetworkSection")
SubElementText(network_section, "Info", "List of logical networks")
for counter, network in enumerate(networks):
network_name = "%s%s" % (network["mode"], counter)
network_attrib = {"ovf:name": network_name}
ET.SubElement(network_section, "Network", attrib=network_attrib)
# Item in VirtualHardwareSection creation
network_item = ET.Element("Item")
SubElementText(network_item, "rasd:Address", network["mac"])
SubElementText(network_item, "rasd:Connection", network_name)
SubElementText(network_item, "rasd:ElementName", network_name)
SubElementText(network_item, "rasd:InstanceID", self.next_instance_id)
SubElementText(network_item, "rasd:ResourceType",
RASD_TYPE["ethernet-adapter"])
self.hardware_list.append(network_item)
self.next_instance_id += 1
@staticmethod
def _SaveNameAndParams(root, data):
"""Save name and parameters information under root using data.
@type root: ET.Element
@param root: root element for the Name and Parameters
@type data: dict
@param data: data from which we gather the values
"""
assert(data.get("name"))
name = SubElementText(root, "gnt:Name", data["name"])
params = ET.SubElement(root, "gnt:Parameters")
for name, value in data.iteritems():
if name != "name":
SubElementText(params, "gnt:%s" % name, value)
def SaveGanetiData(self, ganeti, networks):
"""Convert Ganeti-specific information to GanetiSection.
@type ganeti: dict
@param ganeti: dictionary of Ganeti-specific options from config.ini
@type networks: list
@param networks: list of dictionaries of network options form config.ini
"""
ganeti_section = ET.SubElement(self.tree, "gnt:GanetiSection")
SubElementText(ganeti_section, "gnt:Version", ganeti.get("version"))
SubElementText(ganeti_section, "gnt:DiskTemplate",
ganeti.get("disk_template"))
SubElementText(ganeti_section, "gnt:AutoBalance",
ganeti.get("auto_balance"))
SubElementText(ganeti_section, "gnt:Tags", ganeti.get("tags"))
osys = ET.SubElement(ganeti_section, "gnt:OperatingSystem")
self._SaveNameAndParams(osys, ganeti["os"])
hypervisor = ET.SubElement(ganeti_section, "gnt:Hypervisor")
self._SaveNameAndParams(hypervisor, ganeti["hypervisor"])
network_section = ET.SubElement(ganeti_section, "gnt:Network")
for counter, network in enumerate(networks):
network_name = "%s%s" % (network["mode"], counter)
nic_attrib = {"ovf:name": network_name}
nic = ET.SubElement(network_section, "gnt:Nic", attrib=nic_attrib)
SubElementText(nic, "gnt:Mode", network["mode"])
SubElementText(nic, "gnt:MACAddress", network["mac"])
SubElementText(nic, "gnt:IPAddress", network["ip"])
SubElementText(nic, "gnt:Link", network["link"])
SubElementText(nic, "gnt:Net", network["network"])
def SaveVirtualSystemData(self, name, vcpus, memory):
"""Convert virtual system information to OVF sections.
@type name: string
@param name: name of the instance
@type vcpus: int
@param vcpus: number of VCPUs
@type memory: int
@param memory: RAM memory in MB
"""
assert(vcpus > 0)
assert(memory > 0)
vs_attrib = {"ovf:id": name}
virtual_system = ET.SubElement(self.tree, "VirtualSystem", attrib=vs_attrib)
SubElementText(virtual_system, "Info", "A virtual machine")
name_section = ET.SubElement(virtual_system, "Name")
name_section.text = name
os_attrib = {"ovf:id": "0"}
os_section = ET.SubElement(virtual_system, "OperatingSystemSection",
attrib=os_attrib)
SubElementText(os_section, "Info", "Installed guest operating system")
hardware_section = ET.SubElement(virtual_system, "VirtualHardwareSection")
SubElementText(hardware_section, "Info", "Virtual hardware requirements")
# System description
system = ET.SubElement(hardware_section, "System")
SubElementText(system, "vssd:ElementName", "Virtual Hardware Family")
SubElementText(system, "vssd:InstanceID", INSTANCE_ID["system"])
SubElementText(system, "vssd:VirtualSystemIdentifier", name)
SubElementText(system, "vssd:VirtualSystemType", self.virtual_system_type)
# Item for vcpus
vcpus_item = ET.SubElement(hardware_section, "Item")
SubElementText(vcpus_item, "rasd:ElementName",
"%s virtual CPU(s)" % vcpus)
SubElementText(vcpus_item, "rasd:InstanceID", INSTANCE_ID["vcpus"])
SubElementText(vcpus_item, "rasd:ResourceType", RASD_TYPE["vcpus"])
SubElementText(vcpus_item, "rasd:VirtualQuantity", vcpus)
# Item for memory
memory_item = ET.SubElement(hardware_section, "Item")
SubElementText(memory_item, "rasd:AllocationUnits", "byte * 2^20")
SubElementText(memory_item, "rasd:ElementName", "%sMB of memory" % memory)
SubElementText(memory_item, "rasd:InstanceID", INSTANCE_ID["memory"])
SubElementText(memory_item, "rasd:ResourceType", RASD_TYPE["memory"])
SubElementText(memory_item, "rasd:VirtualQuantity", memory)
# Item for scsi controller
scsi_item = ET.SubElement(hardware_section, "Item")
SubElementText(scsi_item, "rasd:Address", INSTANCE_ID["system"])
SubElementText(scsi_item, "rasd:ElementName", "scsi_controller0")
SubElementText(scsi_item, "rasd:InstanceID", INSTANCE_ID["scsi"])
SubElementText(scsi_item, "rasd:ResourceSubType", SCSI_SUBTYPE)
SubElementText(scsi_item, "rasd:ResourceType", RASD_TYPE["scsi-controller"])
# Other items - from self.hardware_list
for item in self.hardware_list:
hardware_section.append(item)
def PrettyXmlDump(self):
"""Formatter of the XML file.
@rtype: string
@return: XML tree in the form of nicely-formatted string
"""
raw_string = ET.tostring(self.tree)
parsed_xml = xml.dom.minidom.parseString(raw_string)
xml_string = parsed_xml.toprettyxml(indent=" ")
text_re = re.compile(">\n\s+([^<>\s].*?)\n\s+</", re.DOTALL)
return text_re.sub(">\g<1></", xml_string)
class Converter(object):
"""Converter class for OVF packages.
Converter is a class above both ImporterOVF and ExporterOVF. It's purpose is
to provide a common interface for the two.
@type options: optparse.Values
@ivar options: options parsed from the command line
@type output_dir: string
@ivar output_dir: directory to which the results of conversion shall be
written
@type temp_file_manager: L{utils.TemporaryFileManager}
@ivar temp_file_manager: container for temporary files created during
conversion
@type temp_dir: string
@ivar temp_dir: temporary directory created then we deal with OVA
"""
def __init__(self, input_path, options):
"""Initialize the converter.
@type input_path: string
@param input_path: path to the Converter input file
@type options: optparse.Values
@param options: command line options
@raise errors.OpPrereqError: if file does not exist
"""
input_path = os.path.abspath(input_path)
if not os.path.isfile(input_path):
raise errors.OpPrereqError("File does not exist: %s" % input_path,
errors.ECODE_ENVIRON)
self.options = options
self.temp_file_manager = utils.TemporaryFileManager()
self.temp_dir = None
self.output_dir = None
self._ReadInputData(input_path)
def _ReadInputData(self, input_path):
"""Reads the data on which the conversion will take place.
@type input_path: string
@param input_path: absolute path to the Converter input file
"""
raise NotImplementedError()
def _CompressDisk(self, disk_path, compression, action):
"""Performs (de)compression on the disk and returns the new path
@type disk_path: string
@param disk_path: path to the disk
@type compression: string
@param compression: compression type
@type action: string
@param action: whether the action is compression or decompression
@rtype: string
@return: new disk path after (de)compression
@raise errors.OpPrereqError: disk (de)compression failed or "compression"
is not supported
"""
assert(action in ALLOWED_ACTIONS)
# For now we only support gzip, as it is used in ovftool
if compression != COMPRESSION_TYPE:
raise errors.OpPrereqError("Unsupported compression type: %s"
% compression, errors.ECODE_INVAL)
disk_file = os.path.basename(disk_path)
if action == DECOMPRESS:
(disk_name, _) = os.path.splitext(disk_file)
prefix = disk_name
elif action == COMPRESS:
prefix = disk_file
new_path = utils.GetClosedTempfile(suffix=COMPRESSION_EXT, prefix=prefix,
dir=self.output_dir)
self.temp_file_manager.Add(new_path)
args = ["gzip", "-c", disk_path]
run_result = utils.RunCmd(args, output=new_path)
if run_result.failed:
raise errors.OpPrereqError("Disk %s failed with output: %s"
% (action, run_result.stderr),
errors.ECODE_ENVIRON)
logging.info("The %s of the disk is completed", action)
return (COMPRESSION_EXT, new_path)
def _ConvertDisk(self, disk_format, disk_path):
"""Performes conversion to specified format.
@type disk_format: string
@param disk_format: format to which the disk should be converted
@type disk_path: string
@param disk_path: path to the disk that should be converted
@rtype: string
@return path to the output disk
@raise errors.OpPrereqError: convertion of the disk failed
"""
CheckQemuImg()
disk_file = os.path.basename(disk_path)
(disk_name, disk_extension) = os.path.splitext(disk_file)
if disk_extension != disk_format:
logging.warning("Conversion of disk image to %s format, this may take"
" a while", disk_format)
new_disk_path = utils.GetClosedTempfile(
suffix=".%s" % disk_format, prefix=disk_name, dir=self.output_dir)
self.temp_file_manager.Add(new_disk_path)
args = [
constants.QEMUIMG_PATH,
"convert",
"-O",
disk_format,
disk_path,
new_disk_path,
]
run_result = utils.RunCmd(args, cwd=os.getcwd())
if run_result.failed:
raise errors.OpPrereqError("Convertion to %s failed, qemu-img output was"
": %s" % (disk_format, run_result.stderr),
errors.ECODE_ENVIRON)
return (".%s" % disk_format, new_disk_path)
@staticmethod
def _GetDiskQemuInfo(disk_path, regexp):
"""Figures out some information of the disk using qemu-img.
@type disk_path: string
@param disk_path: path to the disk we want to know the format of
@type regexp: string
@param regexp: string that has to be matched, it has to contain one group
@rtype: string
@return: disk format
@raise errors.OpPrereqError: format information cannot be retrieved
"""
CheckQemuImg()
args = [constants.QEMUIMG_PATH, "info", disk_path]
run_result = utils.RunCmd(args, cwd=os.getcwd())
if run_result.failed:
raise errors.OpPrereqError("Gathering info about the disk using qemu-img"
" failed, output was: %s" % run_result.stderr,
errors.ECODE_ENVIRON)
result = run_result.output
regexp = r"%s" % regexp
match = re.search(regexp, result)
if match:
disk_format = match.group(1)
else:
raise errors.OpPrereqError("No file information matching %s found in:"
" %s" % (regexp, result),
errors.ECODE_ENVIRON)
return disk_format
def Parse(self):
"""Parses the data and creates a structure containing all required info.
"""
raise NotImplementedError()
def Save(self):
"""Saves the gathered configuration in an apropriate format.
"""
raise NotImplementedError()
def Cleanup(self):
"""Cleans the temporary directory, if one was created.
"""
self.temp_file_manager.Cleanup()
if self.temp_dir:
shutil.rmtree(self.temp_dir)
self.temp_dir = None
class OVFImporter(Converter):
"""Converter from OVF to Ganeti config file.
@type input_dir: string
@ivar input_dir: directory in which the .ovf file resides
@type output_dir: string
@ivar output_dir: directory to which the results of conversion shall be
written
@type input_path: string
@ivar input_path: complete path to the .ovf file
@type ovf_reader: L{OVFReader}
@ivar ovf_reader: OVF reader instance collects data from .ovf file
@type results_name: string
@ivar results_name: name of imported instance
@type results_template: string
@ivar results_template: disk template read from .ovf file or command line
arguments
@type results_hypervisor: dict
@ivar results_hypervisor: hypervisor information gathered from .ovf file or
command line arguments
@type results_os: dict
@ivar results_os: operating system information gathered from .ovf file or
command line arguments
@type results_backend: dict
@ivar results_backend: backend information gathered from .ovf file or
command line arguments
@type results_tags: string
@ivar results_tags: string containing instance-specific tags
@type results_version: string
@ivar results_version: version as required by Ganeti import
@type results_network: dict
@ivar results_network: network information gathered from .ovf file or command
line arguments
@type results_disk: dict
@ivar results_disk: disk information gathered from .ovf file or command line
arguments
"""
def _ReadInputData(self, input_path):
"""Reads the data on which the conversion will take place.
@type input_path: string
@param input_path: absolute path to the .ovf or .ova input file
@raise errors.OpPrereqError: if input file is neither .ovf nor .ova
"""
(input_dir, input_file) = os.path.split(input_path)
(_, input_extension) = os.path.splitext(input_file)
if input_extension == OVF_EXT:
logging.info("%s file extension found, no unpacking necessary", OVF_EXT)
self.input_dir = input_dir
self.input_path = input_path
self.temp_dir = None
elif input_extension == OVA_EXT:
logging.info("%s file extension found, proceeding to unpacking", OVA_EXT)
self._UnpackOVA(input_path)
else:
raise errors.OpPrereqError("Unknown file extension; expected %s or %s"
" file" % (OVA_EXT, OVF_EXT),
errors.ECODE_INVAL)
assert ((input_extension == OVA_EXT and self.temp_dir) or
(input_extension == OVF_EXT and not self.temp_dir))
assert self.input_dir in self.input_path
if self.options.output_dir:
self.output_dir = os.path.abspath(self.options.output_dir)
if (os.path.commonprefix([pathutils.EXPORT_DIR, self.output_dir]) !=
pathutils.EXPORT_DIR):
logging.warning("Export path is not under %s directory, import to"
" Ganeti using gnt-backup may fail",
pathutils.EXPORT_DIR)
else:
self.output_dir = pathutils.EXPORT_DIR
self.ovf_reader = OVFReader(self.input_path)
self.ovf_reader.VerifyManifest()
def _UnpackOVA(self, input_path):
"""Unpacks the .ova package into temporary directory.
@type input_path: string
@param input_path: path to the .ova package file
@raise errors.OpPrereqError: if file is not a proper tarball, one of the
files in the archive seem malicious (e.g. path starts with '../') or
.ova package does not contain .ovf file
"""
input_name = None
if not tarfile.is_tarfile(input_path):
raise errors.OpPrereqError("The provided %s file is not a proper tar"
" archive" % OVA_EXT, errors.ECODE_ENVIRON)
ova_content = tarfile.open(input_path)
temp_dir = tempfile.mkdtemp()
self.temp_dir = temp_dir
for file_name in ova_content.getnames():
file_normname = os.path.normpath(file_name)
try:
utils.PathJoin(temp_dir, file_normname)
except ValueError, err:
raise errors.OpPrereqError("File %s inside %s package is not safe" %
(file_name, OVA_EXT), errors.ECODE_ENVIRON)
if file_name.endswith(OVF_EXT):
input_name = file_name
if not input_name:
raise errors.OpPrereqError("No %s file in %s package found" %
(OVF_EXT, OVA_EXT), errors.ECODE_ENVIRON)
logging.warning("Unpacking the %s archive, this may take a while",
input_path)
self.input_dir = temp_dir
self.input_path = utils.PathJoin(self.temp_dir, input_name)
try:
try:
extract = ova_content.extractall
except AttributeError:
# This is a prehistorical case of using python < 2.5
for member in ova_content.getmembers():
ova_content.extract(member, path=self.temp_dir)
else:
extract(self.temp_dir)
except tarfile.TarError, err:
raise errors.OpPrereqError("Error while extracting %s archive: %s" %
(OVA_EXT, err), errors.ECODE_ENVIRON)
logging.info("OVA package extracted to %s directory", self.temp_dir)
def Parse(self):
"""Parses the data and creates a structure containing all required info.
The method reads the information given either as a command line option or as
a part of the OVF description.
@raise errors.OpPrereqError: if some required part of the description of
virtual instance is missing or unable to create output directory
"""
self.results_name = self._GetInfo("instance name", self.options.name,
self._ParseNameOptions,
self.ovf_reader.GetInstanceName)
if not self.results_name:
raise errors.OpPrereqError("Name of instance not provided",
errors.ECODE_INVAL)
self.output_dir = utils.PathJoin(self.output_dir, self.results_name)
try:
utils.Makedirs(self.output_dir)
except OSError, err:
raise errors.OpPrereqError("Failed to create directory %s: %s" %
(self.output_dir, err), errors.ECODE_ENVIRON)
self.results_template = self._GetInfo(
"disk template", self.options.disk_template, self._ParseTemplateOptions,
self.ovf_reader.GetDiskTemplate)
if not self.results_template:
logging.info("Disk template not given")
self.results_hypervisor = self._GetInfo(
"hypervisor", self.options.hypervisor, self._ParseHypervisorOptions,
self.ovf_reader.GetHypervisorData)
assert self.results_hypervisor["hypervisor_name"]
if self.results_hypervisor["hypervisor_name"] == constants.VALUE_AUTO:
logging.debug("Default hypervisor settings from the cluster will be used")
self.results_os = self._GetInfo(
"OS", self.options.os, self._ParseOSOptions, self.ovf_reader.GetOSData)
if not self.results_os.get("os_name"):
raise errors.OpPrereqError("OS name must be provided",
errors.ECODE_INVAL)
self.results_backend = self._GetInfo(
"backend", self.options.beparams,
self._ParseBackendOptions, self.ovf_reader.GetBackendData)
assert self.results_backend.get("vcpus")
assert self.results_backend.get("memory")
assert self.results_backend.get("auto_balance") is not None
self.results_tags = self._GetInfo(
"tags", self.options.tags, self._ParseTags, self.ovf_reader.GetTagsData)
ovf_version = self.ovf_reader.GetVersionData()
if ovf_version:
self.results_version = ovf_version
else:
self.results_version = constants.EXPORT_VERSION
self.results_network = self._GetInfo(
"network", self.options.nics, self._ParseNicOptions,
self.ovf_reader.GetNetworkData, ignore_test=self.options.no_nics)
self.results_disk = self._GetInfo(
"disk", self.options.disks, self._ParseDiskOptions, self._GetDiskInfo,
ignore_test=self.results_template == constants.DT_DISKLESS)
if not self.results_disk and not self.results_network:
raise errors.OpPrereqError("Either disk specification or network"
" description must be present",
errors.ECODE_STATE)
@staticmethod
def _GetInfo(name, cmd_arg, cmd_function, nocmd_function,
ignore_test=False):
"""Get information about some section - e.g. disk, network, hypervisor.
@type name: string
@param name: name of the section
@type cmd_arg: dict
@param cmd_arg: command line argument specific for section 'name'
@type cmd_function: callable
@param cmd_function: function to call if 'cmd_args' exists
@type nocmd_function: callable
@param nocmd_function: function to call if 'cmd_args' is not there
"""
if ignore_test:
logging.info("Information for %s will be ignored", name)
return {}
if cmd_arg:
logging.info("Information for %s will be parsed from command line", name)
results = cmd_function()
else:
logging.info("Information for %s will be parsed from %s file",
name, OVF_EXT)
results = nocmd_function()
logging.info("Options for %s were succesfully read", name)
return results
def _ParseNameOptions(self):
"""Returns name if one was given in command line.
@rtype: string
@return: name of an instance
"""
return self.options.name
def _ParseTemplateOptions(self):
"""Returns disk template if one was given in command line.
@rtype: string
@return: disk template name
"""
return self.options.disk_template
def _ParseHypervisorOptions(self):
"""Parses hypervisor options given in a command line.
@rtype: dict
@return: dictionary containing name of the chosen hypervisor and all the
options
"""
assert type(self.options.hypervisor) is tuple
assert len(self.options.hypervisor) == 2
results = {}
if self.options.hypervisor[0]:
results["hypervisor_name"] = self.options.hypervisor[0]
else:
results["hypervisor_name"] = constants.VALUE_AUTO
results.update(self.options.hypervisor[1])
return results
def _ParseOSOptions(self):
"""Parses OS options given in command line.
@rtype: dict
@return: dictionary containing name of chosen OS and all its options
"""
assert self.options.os
results = {}
results["os_name"] = self.options.os
results.update(self.options.osparams)
return results
def _ParseBackendOptions(self):
"""Parses backend options given in command line.
@rtype: dict
@return: dictionary containing vcpus, memory and auto-balance options
"""
assert self.options.beparams
backend = {}
backend.update(self.options.beparams)
must_contain = ["vcpus", "memory", "auto_balance"]
for element in must_contain:
if backend.get(element) is None:
backend[element] = constants.VALUE_AUTO
return backend
def _ParseTags(self):
"""Returns tags list given in command line.
@rtype: string
@return: string containing comma-separated tags
"""
return self.options.tags
def _ParseNicOptions(self):
"""Parses network options given in a command line or as a dictionary.
@rtype: dict
@return: dictionary of network-related options
"""
assert self.options.nics
results = {}
for (nic_id, nic_desc) in self.options.nics:
results["nic%s_mode" % nic_id] = \
nic_desc.get("mode", constants.VALUE_AUTO)
results["nic%s_mac" % nic_id] = nic_desc.get("mac", constants.VALUE_AUTO)
results["nic%s_link" % nic_id] = \
nic_desc.get("link", constants.VALUE_AUTO)
results["nic%s_network" % nic_id] = \
nic_desc.get("network", constants.VALUE_AUTO)
if nic_desc.get("mode") == "bridged":
results["nic%s_ip" % nic_id] = constants.VALUE_NONE
else:
results["nic%s_ip" % nic_id] = constants.VALUE_AUTO
results["nic_count"] = str(len(self.options.nics))
return results
def _ParseDiskOptions(self):
"""Parses disk options given in a command line.
@rtype: dict
@return: dictionary of disk-related options
@raise errors.OpPrereqError: disk description does not contain size
information or size information is invalid or creation failed
"""
CheckQemuImg()
assert self.options.disks
results = {}
for (disk_id, disk_desc) in self.options.disks:
results["disk%s_ivname" % disk_id] = "disk/%s" % disk_id
if disk_desc.get("size"):
try:
disk_size = utils.ParseUnit(disk_desc["size"])
except ValueError:
raise errors.OpPrereqError("Invalid disk size for disk %s: %s" %
(disk_id, disk_desc["size"]),
errors.ECODE_INVAL)
new_path = utils.PathJoin(self.output_dir, str(disk_id))
args = [
constants.QEMUIMG_PATH,
"create",
"-f",
"raw",
new_path,
disk_size,
]
run_result = utils.RunCmd(args)
if run_result.failed:
raise errors.OpPrereqError("Creation of disk %s failed, output was:"
" %s" % (new_path, run_result.stderr),
errors.ECODE_ENVIRON)
results["disk%s_size" % disk_id] = str(disk_size)
results["disk%s_dump" % disk_id] = "disk%s.raw" % disk_id
else:
raise errors.OpPrereqError("Disks created for import must have their"
" size specified",
errors.ECODE_INVAL)
results["disk_count"] = str(len(self.options.disks))
return results
def _GetDiskInfo(self):
"""Gathers information about disks used by instance, perfomes conversion.
@rtype: dict
@return: dictionary of disk-related options
@raise errors.OpPrereqError: disk is not in the same directory as .ovf file
"""
results = {}
disks_list = self.ovf_reader.GetDisksNames()
for (counter, (disk_name, disk_compression)) in enumerate(disks_list):
if os.path.dirname(disk_name):
raise errors.OpPrereqError("Disks are not allowed to have absolute"
" paths or paths outside main OVF"
" directory", errors.ECODE_ENVIRON)
disk, _ = os.path.splitext(disk_name)
disk_path = utils.PathJoin(self.input_dir, disk_name)
if disk_compression not in NO_COMPRESSION:
_, disk_path = self._CompressDisk(disk_path, disk_compression,
DECOMPRESS)
disk, _ = os.path.splitext(disk)
if self._GetDiskQemuInfo(disk_path, "file format: (\S+)") != "raw":
logging.info("Conversion to raw format is required")
ext, new_disk_path = self._ConvertDisk("raw", disk_path)
final_disk_path = LinkFile(new_disk_path, prefix=disk, suffix=ext,
directory=self.output_dir)
final_name = os.path.basename(final_disk_path)
disk_size = os.path.getsize(final_disk_path) / (1024 * 1024)
results["disk%s_dump" % counter] = final_name
results["disk%s_size" % counter] = str(disk_size)
results["disk%s_ivname" % counter] = "disk/%s" % str(counter)
if disks_list:
results["disk_count"] = str(len(disks_list))
return results
def Save(self):
"""Saves all the gathered information in a constant.EXPORT_CONF_FILE file.
@raise errors.OpPrereqError: when saving to config file failed
"""
logging.info("Conversion was succesfull, saving %s in %s directory",
constants.EXPORT_CONF_FILE, self.output_dir)
results = {
constants.INISECT_INS: {},
constants.INISECT_BEP: {},
constants.INISECT_EXP: {},
constants.INISECT_OSP: {},
constants.INISECT_HYP: {},
}
results[constants.INISECT_INS].update(self.results_disk)
results[constants.INISECT_INS].update(self.results_network)
results[constants.INISECT_INS]["hypervisor"] = \
self.results_hypervisor["hypervisor_name"]
results[constants.INISECT_INS]["name"] = self.results_name
if self.results_template:
results[constants.INISECT_INS]["disk_template"] = self.results_template
if self.results_tags:
results[constants.INISECT_INS]["tags"] = self.results_tags
results[constants.INISECT_BEP].update(self.results_backend)
results[constants.INISECT_EXP]["os"] = self.results_os["os_name"]
results[constants.INISECT_EXP]["version"] = self.results_version
del self.results_os["os_name"]
results[constants.INISECT_OSP].update(self.results_os)
del self.results_hypervisor["hypervisor_name"]
results[constants.INISECT_HYP].update(self.results_hypervisor)
output_file_name = utils.PathJoin(self.output_dir,
constants.EXPORT_CONF_FILE)
output = []
for section, options in results.iteritems():
output.append("[%s]" % section)
for name, value in options.iteritems():
if value is None:
value = ""
output.append("%s = %s" % (name, value))
output.append("")
output_contents = "\n".join(output)
try:
utils.WriteFile(output_file_name, data=output_contents)
except errors.ProgrammerError, err:
raise errors.OpPrereqError("Saving the config file failed: %s" % err,
errors.ECODE_ENVIRON)
self.Cleanup()
class ConfigParserWithDefaults(ConfigParser.SafeConfigParser):
"""This is just a wrapper on SafeConfigParser, that uses default values
"""
def get(self, section, options, raw=None, vars=None): # pylint: disable=W0622
try:
result = ConfigParser.SafeConfigParser.get(self, section, options,
raw=raw, vars=vars)
except ConfigParser.NoOptionError:
result = None
return result
def getint(self, section, options):
try:
result = ConfigParser.SafeConfigParser.get(self, section, options)
except ConfigParser.NoOptionError:
result = 0
return int(result)
class OVFExporter(Converter):
"""Converter from Ganeti config file to OVF
@type input_dir: string
@ivar input_dir: directory in which the config.ini file resides
@type output_dir: string
@ivar output_dir: directory to which the results of conversion shall be
written
@type packed_dir: string
@ivar packed_dir: if we want OVA package, this points to the real (i.e. not
temp) output directory
@type input_path: string
@ivar input_path: complete path to the config.ini file
@type output_path: string
@ivar output_path: complete path to .ovf file
@type config_parser: L{ConfigParserWithDefaults}
@ivar config_parser: parser for the config.ini file
@type reference_files: list
@ivar reference_files: files referenced in the ovf file
@type results_disk: list
@ivar results_disk: list of dictionaries of disk options from config.ini
@type results_network: list
@ivar results_network: list of dictionaries of network options form config.ini
@type results_name: string
@ivar results_name: name of the instance
@type results_vcpus: string
@ivar results_vcpus: number of VCPUs
@type results_memory: string
@ivar results_memory: RAM memory in MB
@type results_ganeti: dict
@ivar results_ganeti: dictionary of Ganeti-specific options from config.ini
"""
def _ReadInputData(self, input_path):
"""Reads the data on which the conversion will take place.
@type input_path: string
@param input_path: absolute path to the config.ini input file
@raise errors.OpPrereqError: error when reading the config file
"""
input_dir = os.path.dirname(input_path)
self.input_path = input_path
self.input_dir = input_dir
if self.options.output_dir:
self.output_dir = os.path.abspath(self.options.output_dir)
else:
self.output_dir = input_dir
self.config_parser = ConfigParserWithDefaults()
logging.info("Reading configuration from %s file", input_path)
try:
self.config_parser.read(input_path)
except ConfigParser.MissingSectionHeaderError, err:
raise errors.OpPrereqError("Error when trying to read %s: %s" %
(input_path, err), errors.ECODE_ENVIRON)
if self.options.ova_package:
self.temp_dir = tempfile.mkdtemp()
self.packed_dir = self.output_dir
self.output_dir = self.temp_dir
self.ovf_writer = OVFWriter(not self.options.ext_usage)
def _ParseName(self):
"""Parses name from command line options or config file.
@rtype: string
@return: name of Ganeti instance
@raise errors.OpPrereqError: if name of the instance is not provided
"""
if self.options.name:
name = self.options.name
else:
name = self.config_parser.get(constants.INISECT_INS, NAME)
if name is None:
raise errors.OpPrereqError("No instance name found",
errors.ECODE_ENVIRON)
return name
def _ParseVCPUs(self):
"""Parses vcpus number from config file.
@rtype: int
@return: number of virtual CPUs
@raise errors.OpPrereqError: if number of VCPUs equals 0
"""
vcpus = self.config_parser.getint(constants.INISECT_BEP, VCPUS)
if vcpus == 0:
raise errors.OpPrereqError("No CPU information found",
errors.ECODE_ENVIRON)
return vcpus
def _ParseMemory(self):
"""Parses vcpus number from config file.
@rtype: int
@return: amount of memory in MB
@raise errors.OpPrereqError: if amount of memory equals 0
"""
memory = self.config_parser.getint(constants.INISECT_BEP, MEMORY)
if memory == 0:
raise errors.OpPrereqError("No memory information found",
errors.ECODE_ENVIRON)
return memory
def _ParseGaneti(self):
"""Parses Ganeti data from config file.
@rtype: dictionary
@return: dictionary of Ganeti-specific options
"""
results = {}
# hypervisor
results["hypervisor"] = {}
hyp_name = self.config_parser.get(constants.INISECT_INS, HYPERV)
if hyp_name is None:
raise errors.OpPrereqError("No hypervisor information found",
errors.ECODE_ENVIRON)
results["hypervisor"]["name"] = hyp_name
pairs = self.config_parser.items(constants.INISECT_HYP)
for (name, value) in pairs:
results["hypervisor"][name] = value
# os
results["os"] = {}
os_name = self.config_parser.get(constants.INISECT_EXP, OS)
if os_name is None:
raise errors.OpPrereqError("No operating system information found",
errors.ECODE_ENVIRON)
results["os"]["name"] = os_name
pairs = self.config_parser.items(constants.INISECT_OSP)
for (name, value) in pairs:
results["os"][name] = value
# other
others = [
(constants.INISECT_INS, DISK_TEMPLATE, "disk_template"),
(constants.INISECT_BEP, AUTO_BALANCE, "auto_balance"),
(constants.INISECT_INS, TAGS, "tags"),
(constants.INISECT_EXP, VERSION, "version"),
]
for (section, element, name) in others:
results[name] = self.config_parser.get(section, element)
return results
def _ParseNetworks(self):
"""Parses network data from config file.
@rtype: list
@return: list of dictionaries of network options
@raise errors.OpPrereqError: then network mode is not recognized
"""
results = []
counter = 0
while True:
data_link = \
self.config_parser.get(constants.INISECT_INS,
"nic%s_link" % counter)
if data_link is None:
break
results.append({
"mode": self.config_parser.get(constants.INISECT_INS,
"nic%s_mode" % counter),
"mac": self.config_parser.get(constants.INISECT_INS,
"nic%s_mac" % counter),
"ip": self.config_parser.get(constants.INISECT_INS,
"nic%s_ip" % counter),
"network": self.config_parser.get(constants.INISECT_INS,
"nic%s_network" % counter),
"link": data_link,
})
if results[counter]["mode"] not in constants.NIC_VALID_MODES:
raise errors.OpPrereqError("Network mode %s not recognized"
% results[counter]["mode"],
errors.ECODE_INVAL)
counter += 1
return results
def _GetDiskOptions(self, disk_file, compression):
"""Convert the disk and gather disk info for .ovf file.
@type disk_file: string
@param disk_file: name of the disk (without the full path)
@type compression: bool
@param compression: whether the disk should be compressed or not
@raise errors.OpPrereqError: when disk image does not exist
"""
disk_path = utils.PathJoin(self.input_dir, disk_file)
results = {}
if not os.path.isfile(disk_path):
raise errors.OpPrereqError("Disk image does not exist: %s" % disk_path,
errors.ECODE_ENVIRON)
if os.path.dirname(disk_file):
raise errors.OpPrereqError("Path for the disk: %s contains a directory"
" name" % disk_path, errors.ECODE_ENVIRON)
disk_name, _ = os.path.splitext(disk_file)
ext, new_disk_path = self._ConvertDisk(self.options.disk_format, disk_path)
results["format"] = self.options.disk_format
results["virt-size"] = self._GetDiskQemuInfo(
new_disk_path, "virtual size: \S+ \((\d+) bytes\)")
if compression:
ext2, new_disk_path = self._CompressDisk(new_disk_path, "gzip",
COMPRESS)
disk_name, _ = os.path.splitext(disk_name)
results["compression"] = "gzip"
ext += ext2
final_disk_path = LinkFile(new_disk_path, prefix=disk_name, suffix=ext,
directory=self.output_dir)
final_disk_name = os.path.basename(final_disk_path)
results["real-size"] = os.path.getsize(final_disk_path)
results["path"] = final_disk_name
self.references_files.append(final_disk_path)
return results
def _ParseDisks(self):
"""Parses disk data from config file.
@rtype: list
@return: list of dictionaries of disk options
"""
results = []
counter = 0
while True:
disk_file = \
self.config_parser.get(constants.INISECT_INS, "disk%s_dump" % counter)
if disk_file is None:
break
results.append(self._GetDiskOptions(disk_file, self.options.compression))
counter += 1
return results
def Parse(self):
"""Parses the data and creates a structure containing all required info.
"""
try:
utils.Makedirs(self.output_dir)
except OSError, err:
raise errors.OpPrereqError("Failed to create directory %s: %s" %
(self.output_dir, err), errors.ECODE_ENVIRON)
self.references_files = []
self.results_name = self._ParseName()
self.results_vcpus = self._ParseVCPUs()
self.results_memory = self._ParseMemory()
if not self.options.ext_usage:
self.results_ganeti = self._ParseGaneti()
self.results_network = self._ParseNetworks()
self.results_disk = self._ParseDisks()
def _PrepareManifest(self, path):
"""Creates manifest for all the files in OVF package.
@type path: string
@param path: path to manifesto file
@raise errors.OpPrereqError: if error occurs when writing file
"""
logging.info("Preparing manifest for the OVF package")
lines = []
files_list = [self.output_path]
files_list.extend(self.references_files)
logging.warning("Calculating SHA1 checksums, this may take a while")
sha1_sums = utils.FingerprintFiles(files_list)
for file_path, value in sha1_sums.iteritems():
file_name = os.path.basename(file_path)
lines.append("SHA1(%s)= %s" % (file_name, value))
lines.append("")
data = "\n".join(lines)
try:
utils.WriteFile(path, data=data)
except errors.ProgrammerError, err:
raise errors.OpPrereqError("Saving the manifest file failed: %s" % err,
errors.ECODE_ENVIRON)
@staticmethod
def _PrepareTarFile(tar_path, files_list):
"""Creates tarfile from the files in OVF package.
@type tar_path: string
@param tar_path: path to the resulting file
@type files_list: list
@param files_list: list of files in the OVF package
"""
logging.info("Preparing tarball for the OVF package")
open(tar_path, mode="w").close()
ova_package = tarfile.open(name=tar_path, mode="w")
for file_path in files_list:
file_name = os.path.basename(file_path)
ova_package.add(file_path, arcname=file_name)
ova_package.close()
def Save(self):
"""Saves the gathered configuration in an apropriate format.
@raise errors.OpPrereqError: if unable to create output directory
"""
output_file = "%s%s" % (self.results_name, OVF_EXT)
output_path = utils.PathJoin(self.output_dir, output_file)
self.ovf_writer = OVFWriter(not self.options.ext_usage)
logging.info("Saving read data to %s", output_path)
self.output_path = utils.PathJoin(self.output_dir, output_file)
files_list = [self.output_path]
self.ovf_writer.SaveDisksData(self.results_disk)
self.ovf_writer.SaveNetworksData(self.results_network)
if not self.options.ext_usage:
self.ovf_writer.SaveGanetiData(self.results_ganeti, self.results_network)
self.ovf_writer.SaveVirtualSystemData(self.results_name, self.results_vcpus,
self.results_memory)
data = self.ovf_writer.PrettyXmlDump()
utils.WriteFile(self.output_path, data=data)
manifest_file = "%s%s" % (self.results_name, MF_EXT)
manifest_path = utils.PathJoin(self.output_dir, manifest_file)
self._PrepareManifest(manifest_path)
files_list.append(manifest_path)
files_list.extend(self.references_files)
if self.options.ova_package:
ova_file = "%s%s" % (self.results_name, OVA_EXT)
packed_path = utils.PathJoin(self.packed_dir, ova_file)
try:
utils.Makedirs(self.packed_dir)
except OSError, err:
raise errors.OpPrereqError("Failed to create directory %s: %s" %
(self.packed_dir, err),
errors.ECODE_ENVIRON)
self._PrepareTarFile(packed_path, files_list)
logging.info("Creation of the OVF package was successfull")
self.Cleanup()
|
sarahn/ganeti
|
lib/ovf.py
|
Python
|
gpl-2.0
| 67,549
|
[
"VisIt"
] |
a3f00ffd98d66a185aa4d0f514b11ac82e4a107ec59bd55a0cc71d0710fc951a
|
# BEGIN_COPYRIGHT
#
# Copyright (C) 2014 CRS4.
#
# This file is part of blast-python.
#
# blast-python is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# blast-python is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# blast-python. If not, see <http://www.gnu.org/licenses/>.
#
# END_COPYRIGHT
import random, time
import ncbi_toolkit
NITER = 1000
LEN = 10000
r = random.Random()
def make_seq(n):
return ''.join([r.choice(['A', 'C', 'G', 'T']) for i in xrange(n)])
s = make_seq(LEN)
factory_fasta = ncbi_toolkit.blast_sseq_loc_from_fasta()
factory_str = ncbi_toolkit.blast_sseq_loc_from_str()
start = time.time()
for i in xrange(NITER):
sseq = factory_fasta.make(
'>xxxx\n%s' % s, ncbi_toolkit.strand.plus, 0, 0, False
)
print 'sseq construction (Fasta) ', (time.time() - start)/NITER
start = time.time()
for i in xrange(NITER):
sseq = factory_str.make(
s, False, 10022, 'title xxx', ncbi_toolkit.strand.plus, 0, 0
)
print 'sseq construction (str)', (time.time() - start)/NITER
start = time.time()
for i in xrange(NITER):
sseq = factory_fasta.make_dummy(
'>xxxx\n%s' % s, ncbi_toolkit.strand.plus, 0, 0, False
)
print 'dummy construction ', (time.time() - start)/NITER
start = time.time()
for i in xrange(NITER):
sseq = 'x' * LEN
print 'string construction ', (time.time() - start)/NITER
|
crs4/blast-python
|
test/test_sseq_speed.py
|
Python
|
gpl-3.0
| 1,817
|
[
"BLAST"
] |
d5aac2b2119994dbdcdb60aa8aaac55cb4cd8218814aa4418562403202ef9114
|
"""Analyze python import statements."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import os
from . import types as t
from .util import (
display,
ApplicationError,
is_subdir,
)
from .data import (
data_context,
)
VIRTUAL_PACKAGES = set([
'ansible.module_utils.six',
])
def get_python_module_utils_imports(compile_targets):
"""Return a dictionary of module_utils names mapped to sets of python file paths.
:type compile_targets: list[TestTarget]
:rtype: dict[str, set[str]]
"""
module_utils = enumerate_module_utils()
virtual_utils = set(m for m in module_utils if any(m.startswith('%s.' % v) for v in VIRTUAL_PACKAGES))
module_utils -= virtual_utils
imports_by_target_path = {}
for target in compile_targets:
imports_by_target_path[target.path] = extract_python_module_utils_imports(target.path, module_utils)
def recurse_import(import_name, depth=0, seen=None): # type: (str, int, t.Optional[t.Set[str]]) -> t.Set[str]
"""Recursively expand module_utils imports from module_utils files."""
display.info('module_utils import: %s%s' % (' ' * depth, import_name), verbosity=4)
if seen is None:
seen = set([import_name])
results = set([import_name])
# virtual packages depend on the modules they contain instead of the reverse
if import_name in VIRTUAL_PACKAGES:
for sub_import in sorted(virtual_utils):
if sub_import.startswith('%s.' % import_name):
if sub_import in seen:
continue
seen.add(sub_import)
matches = sorted(recurse_import(sub_import, depth + 1, seen))
for result in matches:
results.add(result)
import_path = os.path.join('lib/', '%s.py' % import_name.replace('.', '/'))
if import_path not in imports_by_target_path:
import_path = os.path.join('lib/', import_name.replace('.', '/'), '__init__.py')
if import_path not in imports_by_target_path:
raise ApplicationError('Cannot determine path for module_utils import: %s' % import_name)
# process imports in reverse so the deepest imports come first
for name in sorted(imports_by_target_path[import_path], reverse=True):
if name in virtual_utils:
continue
if name in seen:
continue
seen.add(name)
matches = sorted(recurse_import(name, depth + 1, seen))
for result in matches:
results.add(result)
return results
for module_util in module_utils:
# recurse over module_utils imports while excluding self
module_util_imports = recurse_import(module_util)
module_util_imports.remove(module_util)
# add recursive imports to all path entries which import this module_util
for target_path in imports_by_target_path:
if module_util in imports_by_target_path[target_path]:
for module_util_import in sorted(module_util_imports):
if module_util_import not in imports_by_target_path[target_path]:
display.info('%s inherits import %s via %s' % (target_path, module_util_import, module_util), verbosity=6)
imports_by_target_path[target_path].add(module_util_import)
imports = dict([(module_util, set()) for module_util in module_utils | virtual_utils])
for target_path in imports_by_target_path:
for module_util in imports_by_target_path[target_path]:
imports[module_util].add(target_path)
# for purposes of mapping module_utils to paths, treat imports of virtual utils the same as the parent package
for virtual_util in virtual_utils:
parent_package = '.'.join(virtual_util.split('.')[:-1])
imports[virtual_util] = imports[parent_package]
display.info('%s reports imports from parent package %s' % (virtual_util, parent_package), verbosity=6)
for module_util in sorted(imports):
if not imports[module_util]:
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def get_python_module_utils_name(path): # type: (str) -> str
"""Return a namespace and name from the given module_utils path."""
base_path = data_context().content.module_utils_path
if data_context().content.collection:
prefix = 'ansible_collections.' + data_context().content.collection.prefix
else:
prefix = 'ansible.module_utils.'
if path.endswith('/__init__.py'):
path = os.path.dirname(path)
name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.sep, '.')
return name
def enumerate_module_utils():
"""Return a list of available module_utils imports.
:rtype: set[str]
"""
module_utils = []
for path in data_context().content.walk_files(data_context().content.module_utils_path):
ext = os.path.splitext(path)[1]
if path == os.path.join(data_context().content.module_utils_path, '__init__.py'):
continue
if ext != '.py':
continue
module_utils.append(get_python_module_utils_name(path))
return set(module_utils)
def extract_python_module_utils_imports(path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
:rtype: set[str]
"""
with open(path, 'r') as module_fd:
code = module_fd.read()
try:
tree = ast.parse(code)
except SyntaxError as ex:
# Treat this error as a warning so tests can be executed as best as possible.
# The compile test will detect and report this syntax error.
display.warning('%s:%s Syntax error extracting module_utils imports: %s' % (path, ex.lineno, ex.msg))
return set()
finder = ModuleUtilFinder(path, module_utils)
finder.visit(tree)
return finder.imports
class ModuleUtilFinder(ast.NodeVisitor):
"""AST visitor to find valid module_utils imports."""
def __init__(self, path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
"""
self.path = path
self.module_utils = module_utils
self.imports = set()
# implicitly import parent package
if path.endswith('/__init__.py'):
path = os.path.split(path)[0]
if path.startswith('lib/ansible/module_utils/'):
package = os.path.split(path)[0].replace('/', '.')[4:]
if package != 'ansible.module_utils' and package not in VIRTUAL_PACKAGES:
self.add_import(package, 0)
# noinspection PyPep8Naming
# pylint: disable=locally-disabled, invalid-name
def visit_Import(self, node):
"""
:type node: ast.Import
"""
self.generic_visit(node)
for alias in node.names:
if alias.name.startswith('ansible.module_utils.'):
# import ansible.module_utils.MODULE[.MODULE]
self.add_import(alias.name, node.lineno)
# noinspection PyPep8Naming
# pylint: disable=locally-disabled, invalid-name
def visit_ImportFrom(self, node):
"""
:type node: ast.ImportFrom
"""
self.generic_visit(node)
if not node.module:
return
if node.module == 'ansible.module_utils' or node.module.startswith('ansible.module_utils.'):
for alias in node.names:
# from ansible.module_utils import MODULE[, MODULE]
# from ansible.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
self.add_import('%s.%s' % (node.module, alias.name), node.lineno)
def add_import(self, name, line_number):
"""
:type name: str
:type line_number: int
"""
import_name = name
while len(name) > len('ansible.module_utils.'):
if name in self.module_utils:
if name not in self.imports:
display.info('%s:%d imports module_utils: %s' % (self.path, line_number, name), verbosity=5)
self.imports.add(name)
return # duplicate imports are ignored
name = '.'.join(name.split('.')[:-1])
if is_subdir(self.path, data_context().content.test_path):
return # invalid imports in tests are ignored
# Treat this error as a warning so tests can be executed as best as possible.
# This error should be detected by unit or integration tests.
display.warning('%s:%d Invalid module_utils import: %s' % (self.path, line_number, import_name))
|
Dhivyap/ansible
|
test/lib/ansible_test/_internal/import_analysis.py
|
Python
|
gpl-3.0
| 9,015
|
[
"VisIt"
] |
89e5990394d9a8aedc568d77d71a1bb855debc52a019dbd5bdf389108f936723
|
#
# Copyright (C) 2007, Mark Lee
#
#http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# $Revision: 592 $
# $Date: 2009-02-04 16:24:59 -0700 (Wed, 04 Feb 2009) $
# $Author: brian@tannerpages.com $
# $HeadURL: http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Python/src/rlglue/network/Network.py $
#
#The Network class is defined in here
#
import socket
import struct
import array
import time
import sys
import StringIO
try:
import numpy
numpy_int_type = numpy.dtype('int32').newbyteorder('>')
numpy_float_type = numpy.dtype('float64').newbyteorder('>')
numpy_char_type = 'S1'#numpy.dtype('uint8').newbyteorder('>')
except:
pass
from rlglue.types import Action
from rlglue.types import Observation
from rlglue.types import Reward_observation_terminal
from rlglue.types import RL_Abstract_Type
# RL-Glue needs to know what type of object is trying to connect.
kExperimentConnection = 1
kAgentConnection = 2
kEnvironmentConnection = 3
kAgentInit = 4 # agent_* start by sending one of these values
kAgentStart = 5 # to the client to let it know what type of
kAgentStep = 6 # event to respond to
kAgentEnd = 7
kAgentCleanup = 8
kAgentMessage = 10
kEnvInit = 11
kEnvStart = 12
kEnvStep = 13
kEnvCleanup = 14
kEnvMessage = 19
kRLInit = 20
kRLStart = 21
kRLStep = 22
kRLCleanup = 23
kRLReturn = 24
kRLNumSteps = 25
kRLNumEpisodes = 26
kRLEpisode = 27
kRLAgentMessage = 33
kRLEnvMessage = 34
kRLTerm = 35
kLocalHost = "127.0.0.1"
kDefaultPort = 4096
kRetryTimeout = 2
kDefaultBufferSize = 4096
kIntSize = 4
kDoubleSize = 8
kCharSize = 1
kUnknownMessage = "Unknown Message: %s\n"
class Network:
def __init__(self):
self.sock = None
self.recvBuffer = StringIO.StringIO('')
self.sendBuffer = StringIO.StringIO('')
if 'numpy' in globals():
self.getAbstractType = self.getAbstractType_numpy
else:
self.getAbstractType = self.getAbstractType_list
def connect(self, host=kLocalHost, port=kDefaultPort, retryTimeout=kRetryTimeout):
while self.sock == None:
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.sock.connect((host, port))
except socket.error, msg:
self.sock = None
time.sleep(retryTimeout)
else:
break
def close(self):
self.sock.close()
def send(self):
self.sock.sendall(self.sendBuffer.getvalue())
def recv(self,size):
s = ''
while len(s) < size:
s += self.sock.recv(size - len(s))
self.recvBuffer.write(s)
self.recvBuffer.seek(0)
return len(s)
def clearSendBuffer(self):
self.sendBuffer.close()
self.sendBuffer = StringIO.StringIO()
def clearRecvBuffer(self):
self.recvBuffer.close()
self.recvBuffer = StringIO.StringIO()
def flipSendBuffer(self):
self.clearSendBuffer()
def flipRecvBuffer(self):
self.clearRecvBuffer()
def getInt(self):
s = self.recvBuffer.read(kIntSize)
return struct.unpack("!i",s)[0]
def getDouble(self):
s = self.recvBuffer.read(kDoubleSize)
return struct.unpack("!d",s)[0]
def getString(self):
#If you read 0 you get "" not None so that's fine
length = self.getInt()
return self.recvBuffer.read(length)
def getAbstractType_list(self):
numInts = self.getInt()
numDoubles = self.getInt()
numChars = self.getInt()
returnStruct=RL_Abstract_Type()
if numInts > 0:
s = self.recvBuffer.read(numInts*kIntSize)
returnStruct.intArray = list(struct.unpack("!%di" % (numInts),s))
if numDoubles > 0:
s = self.recvBuffer.read(numDoubles*kDoubleSize)
returnStruct.doubleArray = list(struct.unpack("!%dd" % (numDoubles),s))
if numChars > 0:
s = self.recvBuffer.read(numChars*kCharSize)
returnStruct.charArray = list(struct.unpack("!%dc" % (numChars),s))
return returnStruct
def getAbstractType_numpy(self):
numInts = self.getInt()
numDoubles = self.getInt()
numChars = self.getInt()
returnStruct=RL_Abstract_Type()
if numInts > 0:
s = self.recvBuffer.read(numInts*kIntSize)
assert kIntSize == 4
returnStruct.intArray = numpy.frombuffer(s,
dtype=numpy_int_type,
count=numInts)
if numDoubles > 0:
s = self.recvBuffer.read(numDoubles*kDoubleSize)
returnStruct.doubleArray = numpy.frombuffer(s, count=numDoubles,
dtype=numpy_float_type)
if numChars > 0:
s = self.recvBuffer.read(numChars*kCharSize)
returnStruct.charArray = numpy.frombuffer(s, count=numChars,
dtype=numpy_char_type)
return returnStruct
def getObservation(self):
return Observation.fromAbstractType(self.getAbstractType())
def getAction(self):
return Action.fromAbstractType(self.getAbstractType())
def putInt(self,value):
self.sendBuffer.write(struct.pack("!i",value))
def putDouble(self,value):
self.sendBuffer.write(struct.pack("!d",value))
def putString(self,value):
if value == None:
value = ''
self.putInt(len(value))
self.sendBuffer.write(value)
def putObservation(self,obs):
self.putAbstractType(obs)
def putAction(self,action):
self.putAbstractType(action)
def putAbstractType(self, theItem):
self.putInt(len(theItem.intArray))
self.putInt(len(theItem.doubleArray))
self.putInt(len(theItem.charArray))
if len(theItem.intArray) > 0:
self.sendBuffer.write(struct.pack("!%di" % (len(theItem.intArray)),*(theItem.intArray)))
if len(theItem.doubleArray) > 0:
self.sendBuffer.write(struct.pack("!%dd" % (len(theItem.doubleArray)),*(theItem.doubleArray)))
if len(theItem.charArray) > 0:
self.sendBuffer.write(struct.pack("!%dc" % (len(theItem.charArray)),*(theItem.charArray)))
def putRewardObservation(self,rewardObservation):
self.putInt(rewardObservation.terminal);
self.putDouble(rewardObservation.r);
self.putObservation(rewardObservation.o);
def sizeOfAbstractType(self, theItem):
size = kIntSize * 3
intSize = 0
doubleSize = 0
charSize = 0
if theItem != None:
if theItem.intArray != None:
intSize = kIntSize * len(theItem.intArray)
if theItem.doubleArray != None:
doubleSize = kDoubleSize * len(theItem.doubleArray)
if theItem.charArray != None:
charSize = kCharSize * len(theItem.charArray)
return size + intSize + doubleSize + charSize
def sizeOfAction(self,action):
return self.sizeOfAbstractType(action)
def sizeOfObservation(self,observation):
return self.sizeOfAbstractType(observation)
def sizeOfRewardObservation(self,reward_observation):
return kIntSize + kDoubleSize + self.sizeOfObservation(reward_observation.o)
|
mguzdial3/MineCode
|
python-codec/src/rlglue/network/Network.py
|
Python
|
apache-2.0
| 7,403
|
[
"Brian"
] |
47f7c4ef1bbd0c504e765a95e08fa627b77a4808f1241b7c676548df83beb023
|
# -*- coding: utf-8; -*-
"""
Copyright (C) 2007-2013 Guake authors
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301 USA
"""
PALETTES = {
'3024 Day': (
'#090903030000:#DBDB2D2D2020:#0101A2A25252:#FDFDEDED0202:#0101A0A0E4E4:'
'#A1A16A6A9494:#B5B5E4E4F4F4:#A5A5A2A2A2A2:#5C5C58585555:#E8E8BBBBD0D0:'
'#3A3A34343232:#4A4A45454343:#80807D7D7C7C:#D6D6D5D5D4D4:#CDCDABAB5353:'
'#F7F7F7F7F7F7:#4A4A45454343:#F7F7F7F7F7F7'
),
'3024 Night': (
'#090903030000:#DBDB2D2D2020:#0101A2A25252:#FDFDEDED0202:#0101A0A0E4E4:'
'#A1A16A6A9494:#B5B5E4E4F4F4:#A5A5A2A2A2A2:#5C5C58585555:#E8E8BBBBD0D0:'
'#3A3A34343232:#4A4A45454343:#80807D7D7C7C:#D6D6D5D5D4D4:#CDCDABAB5353:'
'#F7F7F7F7F7F7:#A5A5A2A2A2A2:#090903030000'
),
'Adventure Time': (
'#050504040404:#BCBC00001313:#4949B1B11717:#E6E674741D1D:#0F0F4949C6C6:'
'#666659599292:#6F6FA4A49797:#F8F8DBDBC0C0:#4E4E7B7BBFBF:#FCFC5E5E5959:'
'#9D9DFFFF6E6E:#EFEFC1C11A1A:#18189696C6C6:#9A9A59595252:#C8C8F9F9F3F3:'
'#F5F5F4F4FBFB:#F8F8DBDBC0C0:#1E1E1C1C4444'
),
'Afterglow': (
'#151515151515:#ACAC41414242:#7E7E8D8D5050:#E5E5B5B56767:#6C6C9999BABA:'
'#9E9E4E4E8585:#7D7DD5D5CFCF:#D0D0D0D0D0D0:#505050505050:#ACAC41414242:'
'#7E7E8D8D5050:#E5E5B5B56666:#6C6C9999BBBB:#9E9E4E4E8585:#7D7DD5D5CFCF:'
'#F5F5F5F5F5F5:#D0D0D0D0D0D0:#202020202020'
),
'Alien Blood': (
'#111126261515:#7F7F2B2B2626:#2F2F7E7E2525:#70707F7F2323:#2F2F69697F7F:'
'#474757577E7E:#31317F7F7676:#64647D7D7575:#3C3C47471111:#DFDF80800808:'
'#1818E0E00000:#BDBDE0E00000:#0000A9A9DFDF:#00005858DFDF:#0000DFDFC3C3:'
'#7373F9F99090:#63637D7D7575:#0F0F16160F0F'
),
'Argonaut': (
'#222222222222:#FFFF00000F0F:#8C8CE0E00A0A:#FFFFB9B90000:#00008D8DF8F8:'
'#6C6C4343A5A5:#0000D7D7EBEB:#FFFFFFFFFFFF:#444444444444:#FFFF27273F3F:'
'#ABABE0E05A5A:#FFFFD1D14141:#00009292FFFF:#9A9A5F5FEBEB:#6767FFFFEFEF:'
'#FFFFFFFFFFFF:#FFFFFAFAF3F3:#0D0D0F0F1818'
),
'Arthur': (
'#3D3D35352A2A:#CDCD5C5C5C5C:#8686AFAF8080:#E8E8AEAE5B5B:#64649595EDED:'
'#DEDEB8B88787:#B0B0C4C4DEDE:#BBBBAAAA9999:#555544444444:#CCCC55553333:'
'#8888AAAA2222:#FFFFA7A75D5D:#8787CECEEBEB:#999966660000:#B0B0C4C4DEDE:'
'#DDDDCCCCBBBB:#DDDDEEEEDDDD:#1C1C1C1C1C1C'
),
'Atom': (
'#000000000000:#FCFC5E5EF0F0:#8787C3C38A8A:#FFFFD7D7B1B1:#8585BEBEFDFD:'
'#B9B9B5B5FCFC:#8585BEBEFDFD:#DFDFDFDFDFDF:#000000000000:#FCFC5E5EF0F0:'
'#9494F9F93636:#F5F5FFFFA7A7:#9696CBCBFEFE:#B9B9B5B5FCFC:#8585BEBEFDFD:'
'#DFDFDFDFDFDF:#C5C5C8C8C6C6:#161617171818'
),
'Belafonte Day': (
'#202011111B1B:#BEBE10100E0E:#858581816262:#EAEAA5A54949:#42426A6A7979:'
'#979752522C2C:#98989A9A9C9C:#96968C8C8383:#5E5E52525252:#BEBE10100E0E:'
'#858581816262:#EAEAA5A54949:#42426A6A7979:#979752522C2C:#98989A9A9C9C:'
'#D5D5CCCCBABA:#454537373C3C:#D5D5CCCCBABA'
),
'Belafonte Night': (
'#202011111B1B:#BEBE10100E0E:#858581816262:#EAEAA5A54949:#42426A6A7979:'
'#979752522C2C:#98989A9A9C9C:#96968C8C8383:#5E5E52525252:#BEBE10100E0E:'
'#858581816262:#EAEAA5A54949:#42426A6A7979:#979752522C2C:#98989A9A9C9C:'
'#D5D5CCCCBABA:#96968C8C8383:#202011111B1B'
),
'Birdsofparadise': (
'#57573D3D2525:#BEBE2D2D2626:#6B6BA0A08A8A:#E9E99C9C2929:#5A5A8686ACAC:'
'#ABAB8080A6A6:#7474A5A5ACAC:#DFDFDADAB7B7:#9A9A6B6B4949:#E8E845452626:'
'#9494D7D7BABA:#D0D0D0D04F4F:#B8B8D3D3EDED:#D0D09D9DCACA:#9292CECED6D6:'
'#FFFFF9F9D4D4:#DFDFDADAB7B7:#2A2A1E1E1D1D'
),
'Blazer': (
'#000000000000:#B8B87A7A7A7A:#7A7AB8B87A7A:#B8B8B8B87A7A:#7A7A7A7AB8B8:'
'#B8B87A7AB8B8:#7A7AB8B8B8B8:#D9D9D9D9D9D9:#262626262626:#DBDBBDBDBDBD:'
'#BDBDDBDBBDBD:#DBDBDBDBBDBD:#BDBDBDBDDBDB:#DBDBBDBDDBDB:#BDBDDBDBDBDB:'
'#FFFFFFFFFFFF:#D9D9E6E6F2F2:#0D0D19192626'
),
'Bluloco': (
'#505050505050:#FFFF2E2E3F3F:#6F6FD6D65D5D:#FFFF6F6F2323:#34347676FFFF:'
'#98986161F8F8:#0000CDCDB3B3:#FFFFFCFCC2C2:#7C7C7C7C7C7C:#FFFF64648080:'
'#3F3FC5C56B6B:#F9F9C8C85959:#0000B1B1FEFE:#B6B68D8DFFFF:#B3B38B8B7D7D:'
'#FFFFFEFEE3E3:#DEDEE0E0DFDF:#262626262626'
),
'Borland': (
'#4E4E4E4E4E4E:#FFFF6B6B6060:#A7A7FFFF6060:#FFFFFFFFB6B6:#9696CACAFDFD:'
'#FFFF7373FDFD:#C6C6C4C4FDFD:#EEEEEEEEEEEE:#7C7C7C7C7C7C:#FFFFB6B6B0B0:'
'#CECEFFFFABAB:#FFFFFFFFCBCB:#B5B5DCDCFEFE:#FFFF9C9CFEFE:#DFDFDFDFFEFE:'
'#FFFFFFFFFFFF:#FFFFFFFF4E4E:#00000000A4A4'
),
'Broadcast': (
'#000000000000:#DADA49493939:#51519F9F5050:#FFFFD2D24A4A:#6D6D9C9CBEBE:'
'#D0D0D0D0FFFF:#6E6E9C9CBEBE:#FFFFFFFFFFFF:#323232323232:#FFFF7B7B6B6B:'
'#8383D1D18282:#FFFFFFFF7C7C:#9F9FCECEF0F0:#FFFFFFFFFFFF:#A0A0CECEF0F0:'
'#FFFFFFFFFFFF:#E6E6E1E1DCDC:#2B2B2B2B2B2B'
),
'Brogrammer': (
'#1F1F1F1F1F1F:#F7F711111818:#2C2CC5C55D5D:#ECECB9B90F0F:#2A2A8484D2D2:'
'#4E4E5959B7B7:#0F0F8080D5D5:#D6D6DADAE4E4:#D6D6DADAE4E4:#DEDE34342E2E:'
'#1D1DD2D26060:#F2F2BDBD0909:#0F0F8080D5D5:#52524F4FB9B9:#0F0F7C7CDADA:'
'#FFFFFFFFFFFF:#D6D6DADAE4E4:#131313131313'
),
'C64': (
'#090903030000:#888839393232:#5555A0A04949:#BFBFCECE7272:#404031318D8D:'
'#8B8B3F3F9696:#6767B6B6BDBD:#FFFFFFFFFFFF:#000000000000:#888839393232:'
'#5555A0A04949:#BFBFCECE7272:#404031318D8D:#8B8B3F3F9696:#6767B6B6BDBD:'
'#F7F7F7F7F7F7:#78786969C4C4:#404031318D8D'
),
'Chalk': (
'#7C7C8A8A8F8F:#B2B23A3A5151:#78789A9A6969:#B9B9ABAB4A4A:#2A2A7F7FACAC:'
'#BCBC4F4F5A5A:#4444A7A79999:#D2D2D8D8D9D9:#888888888888:#F2F248484040:'
'#8080C4C46F6F:#FFFFEBEB6262:#40409595FFFF:#FBFB51517575:#5252CCCCBDBD:'
'#D2D2D8D8D9D9:#D2D2D8D8D9D9:#2B2B2C2C2E2E'
),
'Chalkboard': (
'#000000000000:#C3C373737272:#7272C3C37373:#C2C2C3C37272:#73737272C3C3:'
'#C3C37272C2C2:#7272C2C2C3C3:#D9D9D9D9D9D9:#323232323232:#DBDBAAAAAAAA:'
'#AAAADBDBAAAA:#DADADBDBAAAA:#AAAAAAAADBDB:#DBDBAAAADADA:#AAAADADADBDB:'
'#FFFFFFFFFFFF:#D9D9E6E6F2F2:#292926262F2F'
),
'Ciapre': (
'#181818181818:#808000000909:#484851513B3B:#CCCC8A8A3E3E:#56566D6D8C8C:'
'#72724C4C7C7C:#5B5B4F4F4A4A:#ADADA3A37E7E:#555555555555:#ABAB38383434:'
'#A6A6A6A65D5D:#DCDCDEDE7B7B:#2F2F9797C6C6:#D3D330306060:#F3F3DADAB1B1:'
'#F3F3F3F3F3F3:#ADADA3A37A7A:#18181C1C2727'
),
'Clrs': (
'#000000000000:#F7F727272929:#323289895C5C:#F9F96F6F1C1C:#12125C5CCFCF:'
'#9F9F0000BCBC:#3232C2C2C0C0:#B2B2B2B2B2B2:#545457575353:#FBFB04041616:'
'#2C2CC6C63131:#FCFCD6D62727:#15156F6FFEFE:#E8E80000B0B0:#3939D5D5CECE:'
'#EDEDEDEDECEC:#262626262626:#FFFFFFFFFFFF'
),
'Cobalt Neon': (
'#141426263030:#FFFF23232020:#3A3AA5A5FFFF:#E9E9E7E75C5C:#8F8FF5F58686:'
'#78781A1AA0A0:#8F8FF5F58686:#BABA4545B1B1:#FFFFF6F68888:#D4D431312E2E:'
'#8F8FF5F58686:#E9E9F0F06D6D:#3C3C7D7DD2D2:#82823030A7A7:#6C6CBCBC6767:'
'#8F8FF5F58686:#8F8FF5F58686:#141428283838'
),
'Cobalt2': (
'#000000000000:#FFFF00000000:#3737DDDD2121:#FEFEE4E40909:#14146060D2D2:'
'#FFFF00005D5D:#0000BBBBBBBB:#BBBBBBBBBBBB:#555555555555:#F4F40D0D1717:'
'#3B3BCFCF1D1D:#ECECC8C80909:#55555555FFFF:#FFFF5555FFFF:#6A6AE3E3F9F9:'
'#FFFFFFFFFFFF:#FFFFFFFFFFFF:#121226263737'
),
'Crayon Pony Fish': (
'#2A2A1A1A1C1C:#909000002A2A:#575795952323:#AAAA30301B1B:#8B8B8787AFAF:'
'#68682E2E5050:#E8E8A7A76666:#686852525959:#3C3C2A2A2E2E:#C5C524245C5C:'
'#8D8DFFFF5656:#C7C737371D1D:#CFCFC9C9FFFF:#FBFB6C6CB9B9:#FFFFCECEAEAE:'
'#AFAF94949D9D:#686852525959:#141406060707'
),
'Dark Pastel': (
'#000000000000:#FFFF55555555:#5555FFFF5555:#FFFFFFFF5555:#55555555FFFF:'
'#FFFF5555FFFF:#5555FFFFFFFF:#BBBBBBBBBBBB:#555555555555:#FFFF55555555:'
'#5555FFFF5555:#FFFFFFFF5555:#55555555FFFF:#FFFF5555FFFF:#5555FFFFFFFF:'
'#FFFFFFFFFFFF:#FFFFFFFFFFFF:#000000000000'
),
'Darkside': (
'#000000000000:#E8E834341C1C:#6868C2C25656:#F2F2D3D32C2C:#1C1C9898E8E8:'
'#8E8E6969C9C9:#1C1C9898E8E8:#BABABABABABA:#000000000000:#DFDF5A5A4F4F:'
'#7676B7B76868:#EEEED6D64A4A:#38387B7BD2D2:#95957B7BBDBD:#3D3D9696E2E2:'
'#BABABABABABA:#BABABABABABA:#222223232424'
),
'Desert': (
'#4D4D4D4D4D4D:#FFFF2B2B2B2B:#9898FBFB9898:#F0F0E6E68C8C:#CDCD85853F3F:'
'#FFFFDEDEADAD:#FFFFA0A0A0A0:#F5F5DEDEB3B3:#555555555555:#FFFF55555555:'
'#5555FFFF5555:#FFFFFFFF5555:#8787CECEFFFF:#FFFF5555FFFF:#FFFFD7D70000:'
'#FFFFFFFFFFFF:#FFFFFFFFFFFF:#333333333333'
),
'Dimmed Monokai': (
'#3A3A3C3C4343:#BEBE3E3E4848:#86869A9A3A3A:#C4C4A5A53535:#4E4E7676A1A1:'
'#85855B5B8D8D:#56568E8EA3A3:#B8B8BCBCB9B9:#888889898787:#FBFB00001E1E:'
'#0E0E71712E2E:#C3C370703333:#17176C6CE3E3:#FBFB00006767:#2D2D6F6F6C6C:'
'#FCFCFFFFB8B8:#B8B8BCBCB9B9:#1E1E1E1E1E1E'
),
'Dracula': (
'#000000000000:#FFFF55555555:#5050FAFA7B7B:#F1F1FAFA8C8C:#BDBD9393F9F9:'
'#FFFF7979C6C6:#8B8BE9E9FDFD:#BBBBBBBBBBBB:#555555555555:#FFFF55555555:'
'#5050FAFA7B7B:#F1F1FAFA8C8C:#BDBD9393F9F9:#FFFF7979C6C6:#8B8BE9E9FDFD:'
'#FFFFFFFFFFFF:#F8F8F8F8F2F2:#1E1E1F1F2828'
),
'Earthsong': (
'#111114141717:#C8C841413434:#8484C4C44B4B:#F4F4AEAE2E2E:#13139797B9B9:'
'#D0D062623C3C:#4F4F94945252:#E5E5C5C5A9A9:#66665E5E5454:#FFFF64645959:'
'#9797E0E03535:#DFDFD5D56161:#5E5ED9D9FFFF:#FFFF91916868:#8383EFEF8888:'
'#F6F6F6F6ECEC:#E5E5C6C6A8A8:#282824242020'
),
'Elemental': (
'#3C3C3B3B3030:#979728280F0F:#474799994242:#7F7F71711010:#49497F7F7D7D:'
'#7E7E4E4E2E2E:#38387F7F5858:#808079797474:#545454544444:#DFDF50502A2A:'
'#6060E0E06F6F:#D6D698982727:#7878D8D8D8D8:#CDCD7C7C5353:#5858D5D59898:'
'#FFFFF1F1E8E8:#808079797373:#212121211C1C'
),
'Elementary Loki': (
'#070736364242:#DCDC32322F2F:#858599990000:#B5B589890000:#26268B8BD2D2:'
'#ECEC00004848:#2A2AA1A19898:#9494A3A3A5A5:#58586E6E7575:#CBCB4B4B1616:'
'#858599990000:#B5B589890000:#26268B8BD2D2:#D3D336368282:#2A2AA1A19898:'
'#EEEEEEEEEEEE:#9494A3A3A5A5:#25252E2E3232'
),
'Espresso Libre': (
'#000000000000:#CCCC00000000:#1A1A92921C1C:#EFEFE4E43A3A:#00006666FFFF:'
'#C5C565656B6B:#050598989A9A:#D3D3D7D7CFCF:#545457575353:#EFEF28282828:'
'#9A9AFFFF8787:#FFFFFAFA5C5C:#4343A8A8EDED:#FFFF80808989:#3434E2E2E2E2:'
'#EDEDEDEDECEC:#B8B8A8A89898:#2A2A21211C1C'
),
'Espresso': (
'#343434343434:#D2D251515151:#A5A5C2C26161:#FFFFC6C66D6D:#6C6C9999BBBB:'
'#D1D19797D9D9:#BEBED6D6FFFF:#EEEEEEEEECEC:#535353535353:#F0F00C0C0C0C:'
'#C2C2E0E07575:#E1E1E3E38B8B:#8A8AB7B7D9D9:#EFEFB5B5F7F7:#DCDCF3F3FFFF:'
'#FFFFFFFFFFFF:#FFFFFFFFFFFF:#323232323232'
),
'Fideloper': (
'#28282F2F3232:#CACA1D1D2C2C:#EDEDB7B7ABAB:#B7B7AAAA9A9A:#2E2E7878C1C1:'
'#C0C022226E6E:#303091918585:#E9E9E2E2CDCD:#090920202727:#D3D35F5F5A5A:'
'#D3D35F5F5A5A:#A8A865657171:#7C7C8484C4C4:#5B5B5D5DB2B2:#818190908F8F:'
'#FCFCF4F4DEDE:#DADAD9D9DFDF:#28282F2F3232'
),
'Fishtank': (
'#030306063C3C:#C6C600004949:#ABABF1F15757:#FDFDCDCD5E5E:#52525F5FB8B8:'
'#97976F6F8181:#969686866262:#ECECEFEFFCFC:#6C6C5A5A3030:#D9D94A4A8A8A:'
'#DADAFFFFA8A8:#FEFEE6E6A8A8:#B1B1BDBDF9F9:#FDFDA4A4CCCC:#A4A4BCBC8686:'
'#F6F6FFFFECEC:#ECECEFEFFDFD:#222224243636'
),
'Flat': (
'#22222D2D3F3F:#A8A823232020:#3232A5A54848:#E5E58D8D1111:#31316767ACAC:'
'#78781A1AA0A0:#2C2C93937070:#B0B0B6B6BABA:#21212C2C3C3C:#D4D431312E2E:'
'#2D2D94944040:#E5E5BEBE0C0C:#3C3C7D7DD2D2:#82823030A7A7:#3535B3B38787:'
'#E7E7ECECEDED:#2C2CC5C55D5D:#000022224040'
),
'Flatland': (
'#1C1C1D1D1919:#F1F182823838:#9E9ED2D26464:#F3F3EFEF6D6D:#4F4F9696BEBE:'
'#69695A5ABBBB:#D5D538386464:#FEFEFFFFFEFE:#1C1C1D1D1919:#D1D12A2A2424:'
'#A7A7D3D32C2C:#FFFF89894848:#6161B8B8D0D0:#69695A5ABBBB:#D5D538386464:'
'#FEFEFFFFFEFE:#B8B8DADAEEEE:#1C1C1E1E2020'
),
'Frontend Delight': (
'#242424242626:#F8F850501A1A:#565657574646:#F9F976761D1D:#2C2C7070B7B7:'
'#F0F02D2D4E4E:#3B3BA0A0A5A5:#ACACACACACAC:#5E5EACAC6C6C:#F6F643431919:'
'#7474EBEB4C4C:#FCFCC2C22424:#33339393C9C9:#E7E75E5E4E4E:#4E4EBCBCE5E5:'
'#8B8B73735A5A:#ACACACACACAC:#1B1B1B1B1D1D'
),
'Frontend Fun Forrest': (
'#000000000000:#D5D525252B2B:#90909B9B0000:#BDBD8A8A1313:#46469898A2A2:'
'#8C8C42423131:#D9D981811212:#DDDDC1C16565:#7E7E69695454:#E4E459591B1B:'
'#BFBFC6C65959:#FFFFCACA1B1B:#7C7CC9C9CECE:#D1D163634949:#E6E6A9A96B6B:'
'#FFFFE9E9A3A3:#DDDDC1C16565:#242412120000'
),
'Frontend Galaxy': (
'#000000000000:#F9F955555F5F:#2020AFAF8989:#FDFDF0F02929:#58589C9CF5F5:'
'#93934D4D9595:#1E1E9E9EE6E6:#BBBBBBBBBBBB:#555555555555:#FAFA8B8B8E8E:'
'#3434BBBB9999:#FFFFFFFF5555:#58589C9CF5F5:#E7E755559898:#39397878BBBB:'
'#FFFFFFFFFFFF:#FFFFFFFFFFFF:#1C1C28283636'
),
'Github': (
'#3E3E3E3E3E3E:#97970B0B1616:#070796962A2A:#F8F8EEEEC7C7:#00003E3E8A8A:'
'#E9E946469191:#8989D1D1ECEC:#FFFFFFFFFFFF:#666666666666:#DEDE00000000:'
'#8787D5D5A2A2:#F1F1D0D00707:#2E2E6C6CBABA:#FFFFA2A29F9F:#1C1CFAFAFEFE:'
'#FFFFFFFFFFFF:#3E3E3E3E3E3E:#F4F4F4F4F4F4'
),
'Grape': (
'#2D2D28283E3E:#ECEC21216060:#1F1FA9A91B1B:#8D8DDCDC1F1F:#48487C7CF4F4:'
'#8C8C3535C8C8:#3A3ADDDDEDED:#9E9E9E9EA0A0:#585850506A6A:#F0F071719A9A:'
'#5252A9A95D5D:#B2B2DCDC8787:#A9A9BBBBEBEB:#ACAC8181C1C1:#9C9CE3E3EAEA:'
'#A1A18888F7F7:#9E9E9E9EA0A0:#161614142323'
),
'Grass': (
'#000000000000:#BBBB00000000:#0000BBBB0000:#E7E7B0B00000:#00000000A3A3:'
'#959500006161:#0000BBBBBBBB:#BBBBBBBBBBBB:#555555555555:#BBBB00000000:'
'#0000BBBB0000:#E7E7B0B00000:#00000000BBBB:#FFFF5555FFFF:#5555FFFFFFFF:'
'#FFFFFFFFFFFF:#FFFFF0F0A5A5:#131377773C3C'
),
'Hardcore': (
'#1B1B1D1D1E1E:#F9F926267272:#A6A6E2E22E2E:#FDFD97971F1F:#6666D9D9EFEF:'
'#9E9E6F6FFEFE:#5E5E71717575:#CCCCCCCCC6C6:#505053535454:#FFFF66669D9D:'
'#BEBEEDED5F5F:#E6E6DBDB7474:#6666D9D9EFEF:#9E9E6F6FFEFE:#A3A3BABABFBF:'
'#F8F8F8F8F2F2:#A0A0A0A0A0A0:#121212121212'
),
'Harper': (
'#010101010101:#F8F8B6B63F3F:#7F7FB5B5E1E1:#D6D6DADA2525:#48489E9E4848:'
'#B2B29696C6C6:#F5F5BFBFD7D7:#A8A8A4A49D9D:#72726E6E6A6A:#F8F8B6B63F3F:'
'#7F7FB5B5E1E1:#D6D6DADA2525:#48489E9E4848:#B2B29696C6C6:#F5F5BFBFD7D7:'
'#FEFEFBFBEAEA:#A8A8A4A49D9D:#010101010101'
),
'Highway': (
'#000000000000:#CFCF0D0D1717:#121280803333:#FFFFCACA3D3D:#00006A6AB3B3:'
'#6A6A26267474:#383845456363:#EDEDEDEDEDED:#5C5C4F4F4949:#EFEF7D7D1717:'
'#B1B1D1D13030:#FFFFF1F11F1F:#4F4FC2C2FDFD:#DEDE00007070:#5C5C4F4F4949:'
'#FEFEFFFFFEFE:#EDEDEDEDEDED:#212122222424'
),
'Hipster Green': (
'#000000000000:#B6B620204A4A:#0000A6A60000:#BEBEBEBE0000:#24246D6DB2B2:'
'#B2B20000B2B2:#0000A6A6B2B2:#BFBFBFBFBFBF:#666666666666:#E5E500000000:'
'#8686A8A83E3E:#E5E5E5E50000:#00000000FFFF:#E5E50000E5E5:#0000E5E5E5E5:'
'#E5E5E5E5E5E5:#8484C1C13737:#0F0F0A0A0505'
),
'Homebrew': (
'#000000000000:#999900000000:#0000A6A60000:#999999990000:#00000000B2B2:'
'#B2B20000B2B2:#0000A6A6B2B2:#BFBFBFBFBFBF:#666666666666:#E5E500000000:'
'#0000D9D90000:#E5E5E5E50000:#00000000FFFF:#E5E50000E5E5:#0000E5E5E5E5:'
'#E5E5E5E5E5E5:#0000FFFF0000:#000000000000'
),
'Hurtado': (
'#575757575757:#FFFF1B1B0000:#A5A5DFDF5555:#FBFBE7E74A4A:#484863638787:'
'#FCFC5E5EF0F0:#8585E9E9FEFE:#CBCBCBCBCBCB:#252525252525:#D4D41C1C0000:'
'#A5A5DFDF5555:#FBFBE7E74949:#8989BDBDFFFF:#BFBF0000C0C0:#8585E9E9FEFE:'
'#DBDBDBDBDBDB:#DADADBDBDADA:#000000000000'
),
'Hybrid': (
'#2A2A2E2E3333:#B7B74D4D5050:#B3B3BEBE5A5A:#E3E3B5B55E5E:#6D6D9090B0B0:'
'#A0A07E7EABAB:#7F7FBEBEB3B3:#B5B5B8B8B6B6:#1D1D1E1E2121:#8C8C2D2D3232:'
'#787883833131:#E5E589894F4F:#4B4B6B6B8888:#6E6E4F4F7979:#4D4D7B7B7373:'
'#5A5A61616969:#B7B7BCBCB9B9:#161617171818'
),
'Ic Green Ppl': (
'#1E1E1E1E1E1E:#FBFB00002929:#32329B9B2424:#64649A9A2525:#14149B9B4545:'
'#5353B8B82B2B:#2B2BB7B76767:#DFDFFEFEEEEE:#030326260F0F:#A6A6FFFF3E3E:'
'#9F9FFFFF6D6D:#D1D1FFFF6D6D:#7272FFFFB5B5:#5050FFFF3D3D:#2222FFFF7171:'
'#DADAEEEED0D0:#D9D9EEEED2D2:#3A3A3C3C3E3E'
),
'Ic Orange Ppl': (
'#000000000000:#C0C039390000:#A3A3A9A90000:#CACAAEAE0000:#BDBD6C6C0000:'
'#FBFB5D5D0000:#F7F794940000:#FFFFC8C88A8A:#6A6A4E4E2929:#FFFF8B8B6767:'
'#F6F6FFFF3F3F:#FFFFE3E36E6E:#FFFFBDBD5454:#FCFC87874F4F:#C5C597975252:'
'#F9F9F9F9FEFE:#FFFFCBCB8383:#262626262626'
),
'Idle Toes': (
'#323232323232:#D2D252525252:#7F7FE1E17373:#FFFFC6C66D6D:#40409898FFFF:'
'#F5F57F7FFFFF:#BEBED6D6FFFF:#EEEEEEEEECEC:#535353535353:#F0F070707070:'
'#9D9DFFFF9090:#FFFFE4E48B8B:#5E5EB7B7F7F7:#FFFF9D9DFFFF:#DCDCF4F4FFFF:'
'#FFFFFFFFFFFF:#FFFFFFFFFFFF:#323232323232'
),
'Ir Black': (
'#4F4F4F4F4F4F:#FAFA6C6C5F5F:#A8A8FEFE6060:#FFFFFEFEB6B6:#9696CACAFDFD:'
'#FAFA7272FCFC:#C6C6C4C4FDFD:#EEEEEDEDEEEE:#7B7B7B7B7B7B:#FCFCB6B6AFAF:'
'#CECEFFFFABAB:#FFFFFEFECCCC:#B5B5DCDCFEFE:#FBFB9B9BFEFE:#DFDFDFDFFDFD:'
'#FEFEFFFFFEFE:#F1F1F1F1F1F1:#000000000000'
),
'Jackie Brown': (
'#2C2C1D1D1616:#EFEF57573434:#2B2BAFAF2B2B:#BDBDBEBE0000:#24246D6DB2B2:'
'#CFCF5E5EC0C0:#0000ACACEEEE:#BFBFBFBFBFBF:#666666666666:#E5E500000000:'
'#8686A8A83E3E:#E5E5E5E50000:#00000000FFFF:#E5E50000E5E5:#0000E5E5E5E5:'
'#E5E5E5E5E5E5:#FFFFCCCC2F2F:#2C2C1C1C1515'
),
'Japanesque': (
'#343438383535:#CECE3E3E6060:#7B7BB7B75B5B:#E8E8B3B32A2A:#4C4C9999D3D3:'
'#A5A57F7FC4C4:#38389A9AACAC:#F9F9FAFAF6F6:#58585A5A5858:#D1D18E8EA6A6:'
'#76767E7E2B2B:#777759592E2E:#131358587979:#5F5F41419090:#7676BBBBCACA:'
'#B1B1B5B5AEAE:#F7F7F6F6ECEC:#1D1D1D1D1D1D'
),
'Jellybeans': (
'#929292929292:#E2E273737373:#9393B9B97979:#FFFFBABA7B7B:#9797BEBEDCDC:'
'#E1E1C0C0FAFA:#000098988E8E:#DEDEDEDEDEDE:#BDBDBDBDBDBD:#FFFFA1A1A1A1:'
'#BDBDDEDEABAB:#FFFFDCDCA0A0:#B1B1D8D8F6F6:#FBFBDADAFFFF:#1A1AB2B2A8A8:'
'#FFFFFFFFFFFF:#DEDEDEDEDEDE:#121212121212'
),
'Kibble': (
'#4D4D4D4D4D4D:#C7C700003131:#2929CFCF1313:#D8D8E3E30E0E:#34344949D1D1:'
'#84840000FFFF:#07079898ABAB:#E2E2D1D1E3E3:#5A5A5A5A5A5A:#F0F015157878:'
'#6C6CE0E05C5C:#F3F3F7F79E9E:#9797A4A4F7F7:#C4C49595F0F0:#6868F2F2E0E0:'
'#FFFFFFFFFFFF:#F7F7F7F7F7F7:#0E0E10100A0A'
),
'Later This Evening': (
'#2B2B2B2B2B2B:#D3D35A5A5F5F:#AFAFBABA6666:#E5E5D2D28989:#A0A0B9B9D5D5:'
'#BFBF9292D5D5:#9191BEBEB6B6:#3B3B3C3C3C3C:#444447474747:#D3D322222E2E:'
'#AAAABBBB3939:#E4E4BDBD3939:#65659999D5D5:#AAAA5252D5D5:#5F5FBFBFADAD:'
'#C0C0C2C2C2C2:#949494949494:#212121212121'
),
'Lavandula': (
'#232300004545:#7C7C15152525:#33337E7E6F6F:#7F7F6F6F4949:#4F4F4A4A7F7F:'
'#59593F3F7E7E:#575776767F7F:#73736E6E7D7D:#37372C2C4646:#DFDF50506666:'
'#5252E0E0C4C4:#E0E0C2C28686:#8E8E8686DFDF:#A6A67575DFDF:#9A9AD3D3DFDF:'
'#8C8C9191FAFA:#73736E6E7D7D:#050500001414'
),
'Linux Console': (
'#000000000000:#aaaa00000000:#0000aaaa0000:#aaaa55550000:#00000000aaaa:'
'#aaaa0000aaaa:#0000aaaaaaaa:#aaaaaaaaaaaa:#555555555556:#ffff55555555:'
'#5555ffff5555:#ffffffff5555:#55555555ffff:#ffff5555ffff:#5555ffffffff:'
'#ffffffffffff:#ffffffffffff:#000000000000'
),
'Liquid Carbon Transparent': (
'#000000000000:#FFFF2F2F2F2F:#54549A9A6F6F:#CCCCACAC0000:#00009999CCCC:'
'#CCCC6868C8C8:#7979C4C4CCCC:#BCBCCCCCCCCC:#000000000000:#FFFF2F2F2F2F:'
'#54549A9A6F6F:#CCCCACAC0000:#00009999CCCC:#CCCC6868C8C8:#7979C4C4CCCC:'
'#BCBCCCCCCCCC:#AFAFC2C2C2C2:#000000000000'
),
'Liquid Carbon': (
'#000000000000:#FFFF2F2F2F2F:#54549A9A6F6F:#CCCCACAC0000:#00009999CCCC:'
'#CCCC6868C8C8:#7979C4C4CCCC:#BCBCCCCCCCCC:#000000000000:#FFFF2F2F2F2F:'
'#54549A9A6F6F:#CCCCACAC0000:#00009999CCCC:#CCCC6868C8C8:#7979C4C4CCCC:'
'#BCBCCCCCCCCC:#AFAFC2C2C2C2:#2F2F2F2F2F2F'
),
'Lucario': (
'#4E4E4E4E4E4E:#FFFF6B6B6060:#FAFAB0B03636:#FFFFFFFFB6B6:#56569696EDED:'
'#FFFF7373FDFD:#8E8EE4E47878:#EEEEEEEEEEEE:#4F4F4F4F4F4F:#F9F968686060:'
'#FAFAB0B03636:#FDFDFFFFB8B8:#6B6B9F9FEDED:#FCFC6E6EF9F9:#8E8EE4E47878:'
'#FFFFFFFFFFFF:#F8F8F8F8F2F2:#2B2B3E3E5050'
),
'Man Page': (
'#000000000000:#CCCC00000000:#0000A6A60000:#999999990000:#00000000B2B2:'
'#B2B20000B2B2:#0000A6A6B2B2:#CCCCCCCCCCCC:#666666666666:#E5E500000000:'
'#0000D9D90000:#E5E5E5E50000:#00000000FFFF:#E5E50000E5E5:#0000E5E5E5E5:'
'#E5E5E5E5E5E5:#000000000000:#FEFEF4F49C9C'
),
'Mathias': (
'#000000000000:#E5E522222222:#A6A6E3E32D2D:#FCFC95951E1E:#C4C48D8DFFFF:'
'#FAFA25257373:#6767D9D9F0F0:#F2F2F2F2F2F2:#555555555555:#FFFF55555555:'
'#5555FFFF5555:#FFFFFFFF5555:#55555555FFFF:#FFFF5555FFFF:#5555FFFFFFFF:'
'#FFFFFFFFFFFF:#BBBBBBBBBBBB:#000000000000'
),
'Medallion': (
'#000000000000:#B5B54C4C0000:#7C7C8A8A1616:#D2D2BDBD2525:#60606B6BAFAF:'
'#8B8B59599090:#90906B6B2525:#C9C9C1C19999:#5E5E51511818:#FFFF91914848:'
'#B1B1C9C93A3A:#FFFFE4E44949:#ABABB8B8FFFF:#FEFE9F9FFFFF:#FFFFBBBB5151:'
'#FEFED5D59797:#CACAC2C29696:#1D1D18180808'
),
'Misterioso': (
'#000000000000:#FFFF42424242:#7474AFAF6868:#FFFFADAD2929:#33338F8F8686:'
'#94941313E5E5:#2323D7D7D7D7:#E1E1E1E1E0E0:#555555555555:#FFFF32324242:'
'#7474CDCD6868:#FFFFB9B92929:#2323D7D7D7D7:#FFFF3737FFFF:#0000EDEDE1E1:'
'#FFFFFFFFFFFF:#E1E1E1E1E0E0:#2D2D37374343'
),
'Molokai': (
'#121212121212:#FAFA25257373:#9797E1E12323:#DFDFD4D46060:#0F0F7F7FCFCF:'
'#87870000FFFF:#4242A7A7CFCF:#BBBBBBBBBBBB:#555555555555:#F5F566669C9C:'
'#B0B0E0E05E5E:#FEFEF2F26C6C:#0000AFAFFFFF:#AFAF8787FFFF:#5050CDCDFEFE:'
'#FFFFFFFFFFFF:#BBBBBBBBBBBB:#121212121212'
),
'Mona Lisa': (
'#34341A1A0D0D:#9B9B28281B1B:#626261613232:#C2C26E6E2727:#51515B5B5C5C:'
'#9B9B1D1D2929:#585880805656:#F6F6D7D75C5C:#878742422727:#FFFF42423030:'
'#B3B3B1B16363:#FFFF95956565:#9E9EB2B2B3B3:#FFFF5B5B6A6A:#8989CCCC8E8E:'
'#FFFFE5E59797:#F6F6D5D56A6A:#11110B0B0D0D'
),
'Monokai Cobalt2': (
'#1C1C1D1D1919:#D0D01B1B2424:#A7A7D3D32C2C:#D8D8CFCF6767:#6161B8B8D0D0:'
'#69695A5ABBBB:#D5D538386464:#FEFEFFFFFEFE:#1C1C1D1D1919:#D0D01B1B2424:'
'#A7A7D3D32C2C:#D8D8CFCF6767:#6161B8B8D0D0:#69695A5ABBBB:#D5D538386464:'
'#FEFEFFFFFEFE:#FFFFFFFFFFFF:#121226263737'
),
'Monokai Soda': (
'#191919191919:#F3F300005F5F:#9797E0E02323:#FAFA84841919:#9C9C6464FEFE:'
'#F3F300005F5F:#5757D1D1EAEA:#C4C4C4C4B5B5:#61615E5E4B4B:#F3F300005F5F:'
'#9797E0E02323:#DFDFD5D56161:#9C9C6464FEFE:#F3F300005F5F:#5757D1D1EAEA:'
'#F6F6F6F6EEEE:#C4C4C4C4B5B5:#191919191919'
),
'Monokai': (
'#1C1C1D1D1919:#D0D01B1B2424:#A7A7D3D32C2C:#D8D8CFCF6767:#6161B8B8D0D0:'
'#69695A5ABBBB:#D5D538386464:#FEFEFFFFFEFE:#1C1C1D1D1919:#D0D01B1B2424:'
'#A7A7D3D32C2C:#D8D8CFCF6767:#6161B8B8D0D0:#69695A5ABBBB:#D5D538386464:'
'#FEFEFFFFFEFE:#F6F6F5F5EEEE:#232325252626'
),
'N0tch2k': (
'#383838383838:#A9A955555151:#666666666666:#A9A980805151:#65657D7D3E3E:'
'#767676767676:#C9C9C9C9C9C9:#D0D0B8B8A3A3:#474747474747:#A9A977777575:'
'#8C8C8C8C8C8C:#A9A991917575:#9898BDBD5E5E:#A3A3A3A3A3A3:#DCDCDCDCDCDC:'
'#D8D8C8C8BBBB:#A0A0A0A0A0A0:#222222222222'
),
'Neopolitan': (
'#000000000000:#808000000000:#6161CECE3C3C:#FBFBDEDE2D2D:#25253B3B7676:'
'#FFFF00008080:#8D8DA6A6CECE:#F8F8F8F8F8F8:#000000000000:#808000000000:'
'#6161CECE3C3C:#FBFBDEDE2D2D:#25253B3B7676:#FFFF00008080:#8D8DA6A6CECE:'
'#F8F8F8F8F8F8:#FFFFFFFFFFFF:#27271F1F1919'
),
'Neutron': (
'#222225252B2B:#B5B53F3F3636:#5A5AB9B97777:#DDDDB5B56666:#6A6A7B7B9292:'
'#A3A379799D9D:#3F3F9393A8A8:#E6E6E8E8EEEE:#222225252B2B:#B5B53F3F3636:'
'#5A5AB9B97777:#DDDDB5B56666:#6A6A7B7B9292:#A3A379799D9D:#3F3F9393A8A8:'
'#EBEBEDEDF2F2:#E6E6E8E8EEEE:#1B1B1D1D2222'
),
'Nightlion V1': (
'#4C4C4C4C4C4C:#BBBB00000000:#5E5EDEDE8F8F:#F2F2F0F06767:#26266A6AD7D7:'
'#BBBB0000BBBB:#0000D9D9DFDF:#BBBBBBBBBBBB:#555555555555:#FFFF55555555:'
'#5555FFFF5555:#FFFFFFFF5555:#55555555FFFF:#FFFF5555FFFF:#5555FFFFFFFF:'
'#FFFFFFFFFFFF:#BBBBBBBBBBBB:#000000000000'
),
'Nightlion V2': (
'#4C4C4C4C4C4C:#BBBB00000000:#0303F6F62222:#F2F2F0F06767:#6363D0D0F0F0:'
'#CECE6F6FDADA:#0000D9D9DFDF:#BBBBBBBBBBBB:#555555555555:#FFFF55555555:'
'#7D7DF6F61C1C:#FFFFFFFF5555:#6262CACAE7E7:#FFFF9A9AF5F5:#0000CCCCD7D7:'
'#FFFFFFFFFFFF:#BBBBBBBBBBBB:#171717171717'
),
'Novel': (
'#000000000000:#CCCC00000000:#000096960000:#D0D06B6B0000:#00000000CCCC:'
'#CCCC0000CCCC:#00008787CCCC:#CCCCCCCCCCCC:#7F7F7F7F7F7F:#CCCC00000000:'
'#000096960000:#D0D06B6B0000:#00000000CCCC:#CCCC0000CCCC:#00008686CBCB:'
'#FFFFFFFFFFFF:#3B3B23232222:#DFDFDBDBC3C3'
),
'Obsidian': (
'#000000000000:#A5A500000101:#0000BBBB0000:#FEFECCCC2222:#39399B9BDADA:'
'#BBBB0000BBBB:#0000BBBBBBBB:#BBBBBBBBBBBB:#555555555555:#FFFF00000303:'
'#9292C7C76363:#FEFEF7F77373:#A0A0D6D6FFFF:#FFFF5555FFFF:#5555FFFFFFFF:'
'#FFFFFFFFFFFF:#CCCCCCCCCCCC:#272730303232'
),
'Ocean': (
'#000000000000:#999900000000:#0000A6A60000:#999999990000:#00000000B2B2:'
'#B2B20000B2B2:#0000A6A6B2B2:#BFBFBFBFBFBF:#666666666666:#E5E500000000:'
'#0000D9D90000:#E5E5E5E50000:#00000000FFFF:#E5E50000E5E5:#0000E5E5E5E5:'
'#E5E5E5E5E5E5:#FFFFFFFFFFFF:#22224F4FBCBC'
),
'Ocean Dark': (
'#2B2B30303B3B:#BFBF61616A6A:#A3A3BEBE8C8C:#EBEBCBCB8B8B:#8F8FA1A1B3B3:'
'#B4B48E8EADAD:#9696B5B5B4B4:#C0C0C5C5CECE:#656573737E7E:#BFBF61616A6A:'
'#A3A3BEBE8C8C:#EBEBCBCB8B8B:#8F8FA1A1B3B3:#B4B48E8EADAD:#9696B5B5B4B4:'
'#EFEFF1F1F5F5:#C0C0C5C5CECE:#2B2B30303B3B'
),
'Ocean Light': (
'#EFEFF1F1F5F5:#BFBF61616A6A:#A3A3BEBE8C8C:#EBEBCBCB8B8B:#8F8FA1A1B3B3:'
'#B4B48E8EADAD:#9696B5B5B4B4:#C0C0C5C5CECE:#656573737E7E:#BFBF61616A6A:'
'#A3A3BEBE8C8C:#EBEBCBCB8B8B:#8F8FA1A1B3B3:#B4B48E8EADAD:#9696B5B5B4B4:'
'#2B2B30303B3B:#4F4F5B5B6666:#EFEFF1F1F5F5'
),
'Oceanic Next Dark': (
'#1B1B2B2B3434:#ECEC5f5f6767:#9999C7C79494:#FAFAC8C86363:#66669999CCCC:'
'#C5C59494C5C5:#5F5FB3B3B3B3:#C0C0C5C5CECE:#656573737E7E:#ECEC5f5f6767:'
'#9999C7C79494:#FAFAC8C86363:#66669999CCCC:#C5C59494C5C5:#5F5FB3B3B3B3:'
'#D8D8DEDEE9E9:#C0C0C5C5CECE:#1B1B2B2B3434'
),
'Oceanic Next Light': (
'#D8D8DEDEE9E9:#ECEC5f5f6767:#9999C7C79494:#FAFAC8C86363:#66669999CCCC:'
'#C5C59494C5C5:#5F5FB3B3B3B3:#C0C0C5C5CECE:#656573737E7E:#ECEC5f5f6767:'
'#9999C7C79494:#FAFAC8C86363:#66669999CCCC:#C5C59494C5C5:#5F5FB3B3B3B3:'
'#1B1B2B2B3434:#4F4F5B5B6666:#D8D8DEDEE9E9'
),
'Ollie': (
'#000000000000:#ABAB2E2E3030:#3131ABAB6060:#ABAB42420000:#2C2C5656ABAB:'
'#AFAF84842727:#1F1FA5A5ABAB:#8A8A8D8DABAB:#5A5A36362525:#FFFF3D3D4848:'
'#3B3BFFFF9999:#FFFF5E5E1E1E:#44448787FFFF:#FFFFC2C21C1C:#1E1EFAFAFFFF:'
'#5B5B6D6DA7A7:#8A8A8D8DAEAE:#212120202424'
),
'One Dark': (
'#000000000000:#B0B058586969:#7676A6A66565:#CFCFB0B07373:#4A4AA4A4B8B8:'
'#A1A16565C1C1:#4A4AA4A4B8B8:#B1B1B1B1B1B1:#4C4C57577272:#B0B058586969:'
'#7676A6A66565:#CFCFB0B07373:#4A4AA4A4B8B8:#A1A16565C1C1:#4A4AA4A4B8B8:'
'#DEDEDEDEDEDE:#4C4C57577171:#171718181C1C'
),
'Paul Millr': (
'#2A2A2A2A2A2A:#FFFF00000000:#7979FFFF0F0F:#E7E7BFBF0000:#38386B6BD7D7:'
'#B3B34949BEBE:#6666CCCCFFFF:#BBBBBBBBBBBB:#666666666666:#FFFF00008080:'
'#6666FFFF6666:#F3F3D6D64E4E:#70709A9AEDED:#DBDB6767E6E6:#7979DFDFF2F2:'
'#FFFFFFFFFFFF:#F2F2F2F2F2F2:#000000000000'
),
'Pencil Dark': (
'#212121212121:#C3C307077171:#1010A7A77878:#A8A89C9C1414:#00008E8EC4C4:'
'#52523C3C7979:#2020A5A5BABA:#D9D9D9D9D9D9:#424242424242:#FBFB00007A7A:'
'#5F5FD7D7AFAF:#F3F3E4E43030:#2020BBBBFCFC:#68685555DEDE:#4F4FB8B8CCCC:'
'#F1F1F1F1F1F1:#F1F1F1F1F1F1:#212121212121'
),
'Pencil Light': (
'#212121212121:#C3C307077171:#1010A7A77878:#A8A89C9C1414:#00008E8EC4C4:'
'#52523C3C7979:#2020A5A5BABA:#D9D9D9D9D9D9:#424242424242:#FBFB00007A7A:'
'#5F5FD7D7AFAF:#F3F3E4E43030:#2020BBBBFCFC:#68685555DEDE:#4F4FB8B8CCCC:'
'#F1F1F1F1F1F1:#424242424242:#F1F1F1F1F1F1'
),
'Pnevma': (
'#2F2F2E2E2D2D:#A3A366666666:#9090A5A57D7D:#D7D7AFAF8787:#7F7FA5A5BDBD:'
'#C7C79E9EC4C4:#8A8ADBDBB4B4:#D0D0D0D0D0D0:#4A4A48484545:#D7D787878787:'
'#AFAFBEBEA2A2:#E4E4C9C9AFAF:#A1A1BDBDCECE:#D7D7BEBEDADA:#B1B1E7E7DDDD:'
'#EFEFEFEFEFEF:#D0D0D0D0D0D0:#1C1C1C1C1C1C'
),
'Pro': (
'#000000000000:#999900000000:#0000A6A60000:#999999990000:#1F1F0808DBDB:'
'#B2B20000B2B2:#0000A6A6B2B2:#BFBFBFBFBFBF:#666666666666:#E5E500000000:'
'#0000D9D90000:#E5E5E5E50000:#00000000FFFF:#E5E50000E5E5:#0000E5E5E5E5:'
'#E5E5E5E5E5E5:#F2F2F2F2F2F2:#000000000000'
),
'Red Alert': (
'#000000000000:#D5D52E2E4D4D:#7171BEBE6B6B:#BEBEB8B86B6B:#47479B9BEDED:'
'#E8E87878D6D6:#6B6BBEBEB8B8:#D6D6D6D6D6D6:#262626262626:#E0E024245353:'
'#AFAFF0F08B8B:#DFDFDDDDB7B7:#6565A9A9F0F0:#DDDDB7B7DFDF:#B7B7DFDFDDDD:'
'#FFFFFFFFFFFF:#FFFFFFFFFFFF:#767624242323'
),
'Red Sands': (
'#000000000000:#FFFF3F3F0000:#0000BBBB0000:#E7E7B0B00000:#00007171FFFF:'
'#BBBB0000BBBB:#0000BBBBBBBB:#BBBBBBBBBBBB:#555555555555:#BBBB00000000:'
'#0000BBBB0000:#E7E7B0B00000:#00007171AEAE:#FFFF5555FFFF:#5555FFFFFFFF:'
'#FFFFFFFFFFFF:#D7D7C9C9A7A7:#797924241E1E'
),
'Rippedcasts': (
'#000000000000:#CDCDAFAF9595:#A7A7FFFF6060:#BFBFBBBB1F1F:#7575A5A5B0B0:'
'#FFFF7373FDFD:#595964647E7E:#BFBFBFBFBFBF:#666666666666:#EEEECBCBADAD:'
'#BCBCEEEE6868:#E5E5E5E50000:#8686BDBDC9C9:#E5E50000E5E5:#8C8C9B9BC3C3:'
'#E5E5E5E5E5E5:#FFFFFFFFFFFF:#2B2B2B2B2B2B'
),
'Royal': (
'#24241F1F2A2A:#909027274B4B:#232380801C1C:#B4B49D9D2727:#64648080AFAF:'
'#66664D4D9696:#8A8AAAAABDBD:#515149496565:#31312D2D3C3C:#D4D434346C6C:'
'#2C2CD8D84545:#FDFDE8E83A3A:#8F8FB9B9F9F9:#A4A47979E2E2:#ABABD3D3EBEB:'
'#9D9D8B8BBDBD:#505048486868:#101008081414'
),
'Rxvt': (
'#000000000000:#cdcd00000000:#0000cdcd0000:#cdcdcdcd0000:#00000000cdcd:'
'#cdcd0000cdcd:#0000cdcdcdcd:#fafaebebd7d7:#404040404040:#ffff00000000:'
'#0000ffff0000:#ffffffff0000:#00000000ffff:#ffff0000ffff:#0000ffffffff:'
'#ffffffffffff:#ffffffffffff:#000000000000'
),
'Sea Shells': (
'#171738384C4C:#D1D150502323:#02027C7C9B9B:#FCFCA0A02F2F:#1E1E49495050:'
'#6868D3D3F1F1:#5050A3A3B5B5:#DEDEB8B88D8D:#42424B4B5252:#D3D386867777:'
'#61618C8C9898:#FDFDD2D29E9E:#1B1BBCBCDDDD:#BBBBE3E3EEEE:#8686ABABB3B3:'
'#FEFEE3E3CDCD:#DEDEB8B88D8D:#080813131A1A'
),
'Seafoam Pastel': (
'#757575757575:#82825D5D4D4D:#71718C8C6161:#ADADA1A16D6D:#4D4D7B7B8282:'
'#8A8A71716767:#717193939393:#E0E0E0E0E0E0:#8A8A8A8A8A8A:#CFCF93937979:'
'#9898D9D9AAAA:#FAFAE7E79D9D:#7979C3C3CFCF:#D6D6B2B2A1A1:#ADADE0E0E0E0:'
'#E0E0E0E0E0E0:#D3D3E7E7D3D3:#242434343434'
),
'Seti': (
'#323232323232:#C2C228283232:#8E8EC4C43D3D:#E0E0C6C64F4F:#4343A5A5D5D5:'
'#8B8B5757B5B5:#8E8EC4C43D3D:#EEEEEEEEEEEE:#323232323232:#C2C228283232:'
'#8E8EC4C43D3D:#E0E0C6C64F4F:#4343A5A5D5D5:#8B8B5757B5B5:#8E8EC4C43D3D:'
'#FFFFFFFFFFFF:#CACACECECDCD:#111112121313'
),
'Shaman': (
'#010120202626:#B1B12F2F2C2C:#0000A9A94040:#5D5D8A8AA9A9:#444499998585:'
'#000059599C9C:#5C5C7E7E1919:#404055555454:#373743435050:#FFFF42424242:'
'#2A2AEAEA5E5E:#8D8DD3D3FDFD:#6161D4D4B9B9:#12129898FFFF:#9898CFCF2828:'
'#5858FAFAD6D6:#404055555555:#000010101414'
),
'Slate': (
'#212121212121:#E1E1A7A7BFBF:#8080D7D77878:#C4C4C9C9BFBF:#25254A4A4949:'
'#A3A38080D3D3:#1414ABAB9C9C:#0202C4C4E0E0:#FFFFFFFFFFFF:#FFFFCCCCD8D8:'
'#BDBDFFFFA8A8:#D0D0CBCBC9C9:#7979AFAFD2D2:#C4C4A7A7D8D8:#8B8BDEDEE0E0:'
'#E0E0E0E0E0E0:#3434B0B0D2D2:#212121212121'
),
'Smyck': (
'#000000000000:#B7B741413131:#7D7DA9A90000:#C4C4A4A40000:#6262A3A3C4C4:'
'#B9B98A8ACCCC:#202073738383:#A0A0A0A0A0A0:#7A7A7A7A7A7A:#D6D683837B7B:'
'#C4C4F0F03636:#FEFEE1E14D4D:#8D8DCFCFF0F0:#F7F79999FFFF:#6969D9D9CFCF:'
'#F7F7F7F7F7F7:#F7F7F7F7F7F7:#1B1B1B1B1B1B'
),
'Soft Server': (
'#000000000000:#A1A168686969:#9999A5A56969:#A2A290906969:#6A6A8F8FA3A3:'
'#69697171A3A3:#6B6BA4A48F8F:#9999A3A3A2A2:#66666C6C6B6B:#DCDC5B5B5F5F:'
'#BFBFDEDE5454:#DEDEB3B35F5F:#6262B1B1DFDF:#5F5F6E6EDEDE:#6464E3E39C9C:'
'#D1D1DFDFDEDE:#9999A3A3A2A2:#242426262626'
),
'Solarized Darcula': (
'#252529292A2A:#F2F248484040:#626296965555:#B6B688880000:#20207575C7C7:'
'#79797F7FD4D4:#151596968D8D:#D2D2D8D8D9D9:#252529292A2A:#F2F248484040:'
'#626296965555:#B6B688880000:#20207575C7C7:#79797F7FD4D4:#151596968D8D:'
'#D2D2D8D8D9D9:#D2D2D8D8D9D9:#3D3D3F3F4141'
),
'Solarized Dark Higher Contrast': (
'#000027273131:#D0D01B1B2424:#6B6BBEBE6C6C:#A5A577770505:#20207575C7C7:'
'#C6C61B1B6E6E:#252591918585:#E9E9E2E2CBCB:#000063638888:#F4F415153B3B:'
'#5050EEEE8484:#B1B17E7E2828:#17178D8DC7C7:#E1E14D4D8E8E:#0000B2B29E9E:'
'#FCFCF4F4DCDC:#9B9BC1C1C2C2:#00001E1E2626'
),
'Solarized Dark': (
'#000027273131:#D0D01B1B2424:#727289890505:#A5A577770505:#20207575C7C7:'
'#C6C61B1B6E6E:#252591918585:#E9E9E2E2CBCB:#00001E1E2626:#BDBD36361212:'
'#46465A5A6161:#525267676F6F:#707081818383:#58585656B9B9:#818190908F8F:'
'#FCFCF4F4DCDC:#707081818383:#00001E1E2626'
),
'Solarized Light': (
'#000027273131:#D0D01B1B2424:#727289890505:#A5A577770505:#20207575C7C7:'
'#C6C61B1B6E6E:#252591918585:#E9E9E2E2CBCB:#00001E1E2626:#BDBD36361212:'
'#46465A5A6161:#525267676F6F:#707081818383:#58585656B9B9:#818190908F8F:'
'#FCFCF4F4DCDC:#525267676F6F:#FCFCF4F4DCDC'
),
'Space Gray Eighties': (
'#151517171C1C:#ECEC5F5F6767:#8080A7A76363:#FDFDC2C25353:#54548585C0C0:'
'#BFBF8383C0C0:#5757C2C2C0C0:#EEEEECECE7E7:#555555555555:#FFFF69697373:'
'#9393D3D39393:#FFFFD1D15656:#4D4D8383D0D0:#FFFF5555FFFF:#8383E8E8E4E4:'
'#FFFFFFFFFFFF:#BDBDB9B9AEAE:#212121212121'
),
'Space Gray': (
'#000000000000:#AFAF4B4B5757:#8787B2B27979:#E5E5C0C07878:#7C7C8F8FA3A3:'
'#A3A379799696:#8484A6A6A4A4:#B2B2B8B8C2C2:#000000000000:#AFAF4B4B5757:'
'#8787B2B27979:#E5E5C0C07878:#7C7C8F8FA3A3:#A3A379799696:#8484A6A6A4A4:'
'#FFFFFEFEFEFE:#B2B2B8B8C2C2:#202023232C2C'
),
'Spacedust': (
'#6E6E52524646:#E3E35A5A0000:#5C5CABAB9696:#E3E3CDCD7B7B:#0E0E54548B8B:'
'#E3E35A5A0000:#0606AFAFC7C7:#F0F0F1F1CECE:#67674C4C3131:#FFFF8A8A3939:'
'#ADADCACAB8B8:#FFFFC7C77777:#6767A0A0CDCD:#FFFF8A8A3939:#8383A6A6B3B3:'
'#FEFEFFFFF0F0:#ECECEFEFC1C1:#0A0A1E1E2424'
),
'Spixel': (
'#000000000000:#A4A43E3E6363:#8A8AB5B54444:#F3F39A9A2626:#51518B8BA3A3:'
'#97977070B3B3:#5B5BA6A6A5A5:#D3D3D7D7CFCF:#707073736D6D:#E8E84A4A8484:'
'#A7A7E3E34646:#F1F1C5C58B8B:#7373B9B9D6D6:#C5E49865E6E6:#8282D9D9D8D8:'
'#EEEEEEEEECEC:#FFFFFFFFFFFF:#262626262222'
),
'Spring': (
'#000000000000:#FFFF4C4C8383:#1F1F8C8C3A3A:#1F1FC9C95A5A:#1C1CD2D2EEEE:'
'#89895959A8A8:#3E3E99999F9F:#FFFFFEFEFEFE:#000000000000:#FFFF00002121:'
'#1F1FC2C23131:#D4D4B7B70606:#1515A9A9FDFD:#89895959A8A8:#3E3E99999F9F:'
'#FFFFFEFEFEFE:#4D4D4D4D4C4C:#FFFFFFFFFFFF'
),
'Square': (
'#050505050505:#E9E989897C7C:#B6B637377D7D:#ECECEBEBBEBE:#A9A9CDCDEBEB:'
'#757550507B7B:#C9C9CACAECEC:#F2F2F2F2F2F2:#141414141414:#F9F992928686:'
'#C3C3F7F78686:#FCFCFBFBCCCC:#B6B6DEDEFBFB:#ADAD7F7FA8A8:#D7D7D9D9FCFC:'
'#E2E2E2E2E2E2:#ACACACACABAB:#1A1A1A1A1A1A'
),
'Sundried': (
'#30302B2B2A2A:#A6A646463D3D:#575776764444:#9C9C5F5F2A2A:#48485A5A9898:'
'#858545455151:#9C9C81814E4E:#C8C8C8C8C8C8:#4D4D4D4D4747:#AAAA00000C0C:'
'#12128C8C2020:#FCFC6A6A2020:#78789898F7F7:#FCFC8989A0A0:#FAFAD3D38484:'
'#FFFFFEFEFEFE:#C8C8C8C8C8C8:#1A1A18181818'
),
'Symphonic': (
'#000000000000:#DCDC32322F2F:#5656DBDB3A3A:#FFFF84840000:#00008484D4D4:'
'#B7B72929D9D9:#CCCCCCCCFFFF:#FFFFFFFFFFFF:#1B1B1D1D2121:#DCDC32322F2F:'
'#5656DBDB3A3A:#FFFF84840000:#00008484D4D4:#B7B72929D9D9:#CCCCCCCCFFFF:'
'#FFFFFFFFFFFF:#FFFFFFFFFFFF:#000000000000'
),
'Tango': (
'#000000000000:#cccc00000000:#4e4e9a9a0606:#c4c4a0a00000:#34346565a4a4:'
'#757550507b7b:#060698209a9a:#d3d3d7d7cfcf:#555557575353:#efef29292929:'
'#8a8ae2e23434:#fcfce9e94f4f:#72729f9fcfcf:#adad7f7fa8a8:#3434e2e2e2e2:'
'#eeeeeeeeecec:#ffffffffffff:#000000000000'
),
'Teerb': (
'#1C1C1C1C1C1C:#D6D686868686:#AEAED6D68686:#D7D7AFAF8787:#8686AEAED6D6:'
'#D6D6AEAED6D6:#8A8ADBDBB4B4:#D0D0D0D0D0D0:#1C1C1C1C1C1C:#D6D686868686:'
'#AEAED6D68686:#E4E4C9C9AFAF:#8686AEAED6D6:#D6D6AEAED6D6:#B1B1E7E7DDDD:'
'#EFEFEFEFEFEF:#D0D0D0D0D0D0:#262626262626'
),
'Terminal Basic': (
'#000000000000:#999900000000:#0000A6A60000:#999999990000:#00000000B2B2:'
'#B2B20000B2B2:#0000A6A6B2B2:#BFBFBFBFBFBF:#666666666666:#E5E500000000:'
'#0000D9D90000:#E5E5E5E50000:#00000000FFFF:#E5E50000E5E5:#0000E5E5E5E5:'
'#E5E5E5E5E5E5:#000000000000:#FFFFFFFFFFFF'
),
'Thayer Bright': (
'#1B1B1D1D1E1E:#F9F926267272:#4D4DF7F74040:#F3F3FDFD2121:#26265656D6D6:'
'#8C8C5454FEFE:#3737C8C8B4B4:#CCCCCCCCC6C6:#505053535454:#FFFF59599595:'
'#B6B6E3E35454:#FEFEEDED6C6C:#3F3F7878FFFF:#9E9E6F6FFEFE:#2323CECED4D4:'
'#F8F8F8F8F2F2:#F8F8F8F8F8F8:#1B1B1D1D1E1E'
),
'Tomorrow Night Blue': (
'#000000000000:#FFFF9D9DA3A3:#D1D1F1F1A9A9:#FFFFEEEEADAD:#BBBBDADAFFFF:'
'#EBEBBBBBFFFF:#9999FFFFFFFF:#FFFFFEFEFEFE:#000000000000:#FFFF9C9CA3A3:'
'#D0D0F0F0A8A8:#FFFFEDEDACAC:#BABADADAFFFF:#EBEBBABAFFFF:#9999FFFFFFFF:'
'#FFFFFEFEFEFE:#FFFFFEFEFEFE:#000024245151'
),
'Tomorrow Night Bright': (
'#000000000000:#D5D54E4E5353:#B9B9CACA4949:#E7E7C5C54747:#7979A6A6DADA:'
'#C3C39797D8D8:#7070C0C0B1B1:#FFFFFEFEFEFE:#000000000000:#D4D44D4D5353:'
'#B9B9C9C94949:#E6E6C4C44646:#7979A6A6DADA:#C3C39696D7D7:#7070C0C0B1B1:'
'#FFFFFEFEFEFE:#E9E9E9E9E9E9:#000000000000'
),
'Tomorrow Night Eighties': (
'#000000000000:#F2F277777979:#9999CCCC9999:#FFFFCCCC6666:#66669999CCCC:'
'#CCCC9999CCCC:#6666CCCCCCCC:#FFFFFEFEFEFE:#000000000000:#F1F177777979:'
'#9999CCCC9999:#FFFFCCCC6666:#66669999CCCC:#CCCC9999CCCC:#6666CCCCCCCC:'
'#FFFFFEFEFEFE:#CCCCCCCCCCCC:#2C2C2C2C2C2C'
),
'Tomorrow Night': (
'#000000000000:#CCCC66666666:#B5B5BDBD6868:#F0F0C6C67474:#8181A2A2BEBE:'
'#B2B29393BBBB:#8A8ABEBEB7B7:#FFFFFEFEFEFE:#000000000000:#CCCC66666666:'
'#B5B5BDBD6868:#F0F0C5C57474:#8080A1A1BDBD:#B2B29494BABA:#8A8ABDBDB6B6:'
'#FFFFFEFEFEFE:#C5C5C8C8C6C6:#1D1D1F1F2121'
),
'Tomorrow': (
'#000000000000:#C8C828282828:#71718C8C0000:#EAEAB7B70000:#41417171AEAE:'
'#89895959A8A8:#3E3E99999F9F:#FFFFFEFEFEFE:#000000000000:#C8C828282828:'
'#70708B8B0000:#E9E9B6B60000:#41417070AEAE:#89895858A7A7:#3D3D99999F9F:'
'#FFFFFEFEFEFE:#4D4D4D4D4C4C:#FFFFFFFFFFFF'
),
'Toy Chest': (
'#2C2C3F3F5757:#BEBE2D2D2626:#191991917171:#DADA8E8E2626:#32325D5D9696:'
'#8A8A5D5DDBDB:#3535A0A08F8F:#2323D0D08282:#323268688989:#DDDD59594343:'
'#3030CFCF7B7B:#E7E7D7D74B4B:#3333A5A5D9D9:#ADAD6B6BDCDC:#4141C3C3ADAD:'
'#D4D4D4D4D4D4:#3030CFCF7B7B:#232336364A4A'
),
'Treehouse': (
'#323212120000:#B1B127270E0E:#4444A9A90000:#A9A981810B0B:#575784849999:'
'#969636363C3C:#B2B259591D1D:#77776B6B5353:#424236362525:#EDED5C5C2020:'
'#5555F2F23737:#F1F1B7B73131:#8585CFCFECEC:#E0E04B4B5A5A:#F0F07D7D1414:'
'#FFFFC8C80000:#77776B6B5353:#191919191919'
),
'Twilight': (
'#141414141414:#C0C06C6C4343:#AFAFB9B97979:#C2C2A8A86C6C:#444446464949:'
'#B4B4BEBE7B7B:#777782828484:#FEFEFFFFD3D3:#262626262626:#DDDD7C7C4C4C:'
'#CBCBD8D88C8C:#E1E1C4C47D7D:#5A5A5D5D6161:#D0D0DBDB8E8E:#8A8A98989A9A:'
'#FEFEFFFFD3D3:#FEFEFFFFD3D3:#141414141414'
),
'Urple': (
'#000000000000:#AFAF42425B5B:#3737A3A31515:#ACAC5B5B4141:#55554D4D9A9A:'
'#6C6C3B3BA1A1:#808080808080:#878779799C9C:#5C5C31312525:#FFFF63638787:'
'#2828E5E51F1F:#F0F080806161:#85857979EDED:#A0A05D5DEEEE:#EAEAEAEAEAEA:'
'#BFBFA3A3FFFF:#868679799A9A:#1B1B1B1B2323'
),
'Vaughn': (
'#242423234F4F:#707050505050:#6060B4B48A8A:#DFDFAFAF8F8F:#55555555FFFF:'
'#F0F08C8CC3C3:#8C8CD0D0D3D3:#707090908080:#707090908080:#DCDCA3A3A3A3:'
'#6060B4B48A8A:#F0F0DFDFAFAF:#55555555FFFF:#ECEC9393D3D3:#9393E0E0E3E3:'
'#FFFFFFFFFFFF:#DCDCDCDCCCCC:#252523234E4E'
),
'Vibrant Ink': (
'#878787878787:#FFFF66660000:#CCCCFFFF0404:#FFFFCCCC0000:#4444B3B3CCCC:'
'#99993333CCCC:#4444B3B3CCCC:#F5F5F5F5F5F5:#555555555555:#FFFF00000000:'
'#0000FFFF0000:#FFFFFFFF0000:#00000000FFFF:#FFFF0000FFFF:#0000FFFFFFFF:'
'#E5E5E5E5E5E5:#FFFFFFFFFFFF:#000000000000'
),
'Warm Neon': (
'#000000000000:#E2E243434545:#3838B1B13939:#DADAE1E14545:#42426060C5C5:'
'#F8F81F1FFBFB:#2929BABAD3D3:#D0D0B8B8A3A3:#FDFDFCFCFCFC:#E8E86F6F7171:'
'#9B9BC0C08F8F:#DDDDD9D97979:#7A7A9090D5D5:#F6F67474B9B9:#5E5ED1D1E4E4:'
'#D8D8C8C8BBBB:#AFAFDADAB6B6:#3F3F3F3F3F3F'
),
'Wez': (
'#000000000000:#CCCC55555555:#5555CCCC5555:#CDCDCDCD5555:#54545555CBCB:'
'#CCCC5555CCCC:#7A7ACACACACA:#CCCCCCCCCCCC:#555555555555:#FFFF55555555:'
'#5555FFFF5555:#FFFFFFFF5555:#55555555FFFF:#FFFF5555FFFF:#5555FFFFFFFF:'
'#FFFFFFFFFFFF:#B3B3B3B3B3B3:#000000000000'
),
'Wild Cherry': (
'#000005050606:#D9D940408585:#2A2AB2B25050:#FFFFD1D16F6F:#88883C3CDCDC:'
'#ECECECECECEC:#C1C1B8B8B7B7:#FFFFF8F8DDDD:#00009C9CC9C9:#DADA6B6BABAB:'
'#F4F4DBDBA5A5:#EAEAC0C06666:#2F2F8B8BB9B9:#AEAE63636B6B:#FFFF91919D9D:'
'#E4E483838D8D:#D9D9FAFAFFFF:#1F1F16162626'
),
'Wombat': (
'#000000000000:#FFFF60605A5A:#B1B1E8E86969:#EAEAD8D89C9C:#5D5DA9A9F6F6:'
'#E8E86A6AFFFF:#8282FFFFF6F6:#DEDED9D9CECE:#313131313131:#F5F58B8B7F7F:'
'#DCDCF8F88F8F:#EEEEE5E5B2B2:#A5A5C7C7FFFF:#DDDDAAAAFFFF:#B6B6FFFFF9F9:'
'#FEFEFFFFFEFE:#DEDED9D9CECE:#171717171717'
),
'Wryan': (
'#333333333333:#8C8C46466565:#282873737373:#7C7C7C7C9999:#393955557373:'
'#5E5E46468C8C:#313165658C8C:#89899C9CA1A1:#3D3D3D3D3D3D:#BFBF4D4D8080:'
'#5353A6A6A6A6:#9E9E9E9ECBCB:#47477A7AB3B3:#7E7E6262B3B3:#60609696BFBF:'
'#C0C0C0C0C0C0:#999999999393:#101010101010'
),
'Xterm': (
'#000000000000:#cdcb00000000:#0000cdcb0000:#cdcbcdcb0000:#1e1a908fffff:'
'#cdcb0000cdcb:#0000cdcbcdcb:#e5e2e5e2e5e2:#4ccc4ccc4ccc:#ffff00000000:'
'#0000ffff0000:#ffffffff0000:#46458281b4ae:#ffff0000ffff:#0000ffffffff:'
'#ffffffffffff:#ffffffffffff:#000000000000'
),
'Zenburn': (
'#4D4D4D4D4D4D:#707050505050:#6060B4B48A8A:#F0F0DFDFAFAF:#505060607070:'
'#DCDC8C8CC3C3:#8C8CD0D0D3D3:#DCDCDCDCCCCC:#707090908080:#DCDCA3A3A3A3:'
'#C3C3BFBF9F9F:#E0E0CFCF9F9F:#9494BFBFF3F3:#ECEC9393D3D3:#9393E0E0E3E3:'
'#FFFFFFFFFFFF:#DCDCDCDCCCCC:#3F3F3F3F3F3F'
),
}
|
Ozzyboshi/guake
|
guake/palettes.py
|
Python
|
gpl-2.0
| 45,484
|
[
"ESPResSo",
"Galaxy"
] |
7fe278f1698025524fd7848f18dee03ff0aad3aaf1a7ecd10ba1d74d542812f2
|
"""
Simulate interlaced spectra.
"""
import os
import glob
from pylab import cm
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import numpy as np
#import pyfits
import astropy.io.fits as pyfits
import unicorn
import unicorn.interlace_fit
import unicorn.utils_c as utils_c
import threedhst
def sim_all():
"""
Run in GRISM_HOME / SIMULATIONS, loop through all pointings and run the
spectra simulation.
"""
import glob
import unicorn
import unicorn.intersim
files = glob.glob('*G141_inter.fits')
for file in files:
root=file.split('-G141')[0]
unicorn.intersim.simspec(root=root)
def simspec(root='COSMOS-19'):
"""
Root is the base image where the noise and direct images come from.
"""
#### Simple model of a gaussian Ha emission line
xflux = np.arange(1.e4,1.8e4)
dv = 100 # km/s
z0 = 1.0
l0 = 6564.61*(1+z0)
dlam = dv/3.e5*l0
yline = 1./np.sqrt(2*np.pi*dlam**2)*np.exp(-(xflux-l0)**2/2/dlam**2)
### Use template emission lines rather than a single gaussian
xflux, yline = np.loadtxt(unicorn.GRISM_HOME+'/templates/dobos11/SF0_0.emline.txt', unpack=True)
xflux *= (1+z0)
#### Add continuum, here with level 0.1*max(line)
ycont = yline.max()*0.1
yflux = ycont+yline
#### Normalize to F140W passband
x_filt, y_filt = np.loadtxt(os.getenv('iref')+'/F140W.dat', unpack=True)
y_filt_int = utils_c.interp_c(xflux, x_filt, y_filt)
filt_norm = np.trapz(y_filt_int*yflux, xflux) / np.trapz(y_filt_int, xflux)
yflux /= filt_norm
yline /= filt_norm
ycont /= filt_norm
ids = [290]
model = unicorn.reduce.GrismModel(root)
ids = model.cat.id[model.cat.mag < 24]
#ids = [245]
#### Generate model where every spectrum is the line template but the mag/shape of the galaxies
#### is as observed
for i,id in enumerate(ids):
print unicorn.noNewLine+'%d (%d/%d)' %(id, i+1, len(ids))
model.compute_object_model(id, lam_spec=xflux, flux_spec=yflux)
model.model += model.object
#### Get error array from the error extension
err = np.random.normal(size=model.model.shape)*model.gris[2].data
mask = (err != 0) & (model.segm[0].data == 0)
#### Compare background flux distributions
#plt.hist(model.gris[1].data[mask].flatten(), range=(-0.1,0.1), bins=100, alpha=0.5)
#plt.hist(err[mask].flatten(), range=(-0.1,0.1), bins=100, alpha=0.5)
#### Store the new model in the grism image data extension so that we can fit it with the
#### various tools (z, line strength, etc)
#old = model.gris[1].data*1.
model.gris[1].data = model.model*(err != 0) + err
model.get_corrected_wcs(verbose=True)
model.init_object_spectra()
model.model*=0
##### Try extracting a spectrum and fitting it
#id=685
#id=343
#id=ids[0]
for id in ids:
obj='%s_%05d' %(root, id)
print '%s.linefit.png' %(obj)
if os.path.exists('%s.linefit.png' %(obj)):
print 'skip'
continue
flam = np.sum(model.flux[model.segm[0].data == id])
fnu = np.sum(model.flux_fnu*(model.segm[0].data == id))
### *Input* line flux, should be able to get this directly from the input spectrum and the
### observed magnitude, but check units.
#plt.plot(xflux, yflux/filt_norm*flam*1.e-17)
ha = np.abs(xflux-6564*(1+z0)) < 100
ha_flux = np.trapz(yline[ha]*flam*1.e-17, xflux[ha])
ha_eqw = np.trapz(yline[ha]/ycont, xflux[ha])
s2 = np.abs(xflux-6731*(1+z0)) < 100
s2_flux = np.trapz(yline[s2]*flam*1.e-17, xflux[s2])
s2_eqw = np.trapz(yline[s2]/ycont, xflux[s2])
model.twod_spectrum(id, refine=True, verbose=True)
if not model.twod_status:
continue
model.show_2d(savePNG=True)
spec = unicorn.reduce.Interlace1D(root+'_%05d.1D.fits' %(id), PNG=True)
#### Redshift fit, set template to flat and the redshift prior to a broad gaussian centered
#### on the input value, z0
zgrid = np.arange(0,4,0.005)
pz = np.exp(-(zgrid-z0)**2/2/0.5**2)
lnprob = np.log(pz)
gris = unicorn.interlace_fit.GrismSpectrumFit(root=obj, lowz_thresh=0.01, FIGURE_FORMAT='png')
if not gris.status:
continue
gris.zout.z_spec = gris.zout.z_spec*0.+z0
gris.zout.l99 = gris.zout.l99*0.+z0-0.1
gris.zout.u99 = gris.zout.l99+0.2
gris.z_peak = 1
gris.best_fit = gris.best_fit*0+1
gris.phot_zgrid = zgrid
gris.phot_lnprob = lnprob
try:
gris.fit_in_steps(dzfirst=0.005, dzsecond=0.0005, zrfirst=(z0-0.2,z0+0.2))
except:
continue
if not gris.status:
continue
#### Emission line fit
try:
gris.fit_free_emlines(ztry=gris.z_max_spec, verbose=True, NTHREADS=1, NWALKERS=50, NSTEP=100, FIT_REDSHIFT=False, FIT_WIDTH=False, line_width0=100)
except:
continue
status = os.system('cat %s.linefit.dat' %(obj))
print '\n -- input --\nSII %6.2f %6.2f' %(s2_flux/1.e-17, s2_eqw)
print ' Ha %6.2f %6.2f' %(ha_flux/1.e-17, ha_eqw)
def get_results(force_new=False):
"""
Collate the results from the simulated spectra and the input catalogs into single output
catalogs suitable for reading and plotting.
for field in ['AEGIS','COSMOS','UDS','GOODS-S']:
os.chdir(unicorn.GRISM_HOME+'%s/PREP_FLT' %(field))
unicorn.intersim.get_results()
os.chdir(unicorn.GRISM_HOME+'SIMULATIONS')
status = os.system('cat ../AEGIS/PREP_FLT/simspec.dat ../COSMOS/PREP_FLT/simspec.dat ../GOODS-S/PREP_FLT/simspec.dat ../UDS/PREP_FLT/simspec.dat > all_simspec.dat')
"""
import threedhst.catIO as catIO
files=glob.glob('*linefit.dat')
cat = None
if (not os.path.exists('simspec.dat')) | force_new:
fp = open('simspec.dat','w')
fp.write('# object sky_avg sky_lo sky_hi mag r50 r90 z_fit continuum_sn ha_flux ha_flux_err ha_eqw ha_eq_err s2_flux s2_flux_err s2_eqw s2_eq_err\n')
fp.write('dummy 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n')
fp.close()
log = catIO.Readfile('simspec.dat')
for ii, file in enumerate(files):
root = file.split('.linefit')[0]
print unicorn.noNewLine+'%s (%d/%d)' %(root, ii+1, len(files))
if root in log.object:
continue
#
fp = open('simspec.dat','a')
pointing = root.split('_')[0]
id = int(root.split('_')[1])
if cat is None:
cat = threedhst.sex.mySexCat(pointing+'_inter.cat')
### Get sky background
asn = threedhst.utils.ASNFile(pointing+'-G141_asn.fits')
bg = []
for exp in asn.exposures:
flt = pyfits.open(exp+'_flt.fits')
bg.append(flt[0].header['SKYSCALE'])
#
bg_avg = np.mean(bg)
bg_lo = np.min(bg)
bg_hi = np.max(bg)
else:
if not cat.filename.startswith(pointing+'-'):
cat = threedhst.sex.mySexCat(pointing+'_inter.cat')
asn = threedhst.utils.ASNFile(pointing+'-G141_asn.fits')
bg = []
for exp in asn.exposures:
flt = pyfits.open(exp+'_flt.fits')
bg.append(flt[0].header['SKYSCALE'])
#
bg_avg = np.mean(bg)
bg_lo = np.min(bg)
bg_hi = np.max(bg)
#
gris = unicorn.interlace_fit.GrismSpectrumFit(root, verbose=False)
if not gris.status:
fp.close()
continue
#
result = gris.stats()
if result is False:
fp.close()
continue
#
DIRECT_MAG, Q_Z, F_COVER, F_FLAGGED, MAX_CONTAM, INT_CONTAM, F_NEGATIVE = result
#
lwindow = (gris.oned.data.wave > 1.4e4) & (gris.oned.data.wave < 1.6e4)
if (lwindow.sum() < 10) | (INT_CONTAM > 0.3):
fp.close()
continue
#
continuum_sn = np.median((gris.oned.data.flux/gris.oned.data.error)[lwindow])
#
lfit = catIO.Readfile(root+'.linefit.dat')
if lfit.status is None:
fp.close()
continue
#
if 'Ha' in lfit.line:
ix = np.arange(len(lfit.line))[lfit.line == 'Ha'][0]
ha_flux, ha_flux_err, ha_eqw, ha_eqw_err = lfit.flux[ix], lfit.error[ix], lfit.eqw_obs[ix], lfit.eqw_obs_err[ix]
else:
ha_flux, ha_flux_err, ha_eqw, ha_eqw_err = -1,-1,-1,-1
#
if 'SII' in lfit.line:
ix = np.arange(len(lfit.line))[lfit.line == 'SII'][0]
s2_flux, s2_flux_err, s2_eqw, s2_eqw_err = lfit.flux[ix], lfit.error[ix], lfit.eqw_obs[ix], lfit.eqw_obs_err[ix]
else:
s2_flux, s2_flux_err, s2_eqw, s2_eqw_err = -1,-1,-1,-1
#
ic = np.arange(cat.nrows)[cat.id == id][0]
fp.write(' %s %5.2f %5.2f %5.2f %6.3f %6.2f %6.2f %6.4f %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f\n' %(root, bg_avg, bg_lo, bg_hi, DIRECT_MAG, float(cat.FLUX_RADIUS[ic]), float(cat.FLUX_RADIUS2[ic]), gris.z_max_spec, continuum_sn, ha_flux, ha_flux_err, ha_eqw, ha_eqw_err, s2_flux, s2_flux_err, s2_eqw, s2_eqw_err))
#
fp.close()
def show_results(use_tex=False):
import threedhst.catIO as catIO
stats = catIO.Readfile('all_simspec.dat')
ha_model, s2_model = unicorn.intersim.get_line_fluxes(z0=1.0, mag=stats.mag)
xstar = [14.5, 24.1]
ystar = [3.00, 2.13]
yi = np.interp(stats.mag, xstar, ystar)
#plt.scatter(stats.mag, yi, s=0.1, color='black')
is_star = stats.r50 < yi
plt.scatter(stats.mag[is_star], stats.r50[is_star], alpha=0.5)
plt.scatter(stats.mag[~is_star], stats.r50[~is_star], alpha=0.2, color='red')
#### Color by r50/r90 concentration
concentration = stats.r50/stats.r90
msize = np.maximum((concentration/0.2)**4,4)
mcol = np.minimum((np.maximum(concentration,0.3)-0.3)/0.2,1)
plt.scatter(stats.mag, concentration, c=mcol, alpha=0.5)
mcol = np.minimum(np.log10(stats.r50-1.1),1)
stats.sky_avg += np.random.normal(size=stats.sky_avg.shape)*0.01
sky_col = np.minimum((stats.sky_avg - 0.8)/0.8,1)
plt.scatter(stats.mag, stats.sky_avg, c=sky_col, alpha=0.5)
#### Continuum depth
BINWIDTH=92
bin_sn = np.sqrt(BINWIDTH/22)
binned = stats.continuum_sn*bin_sn
#### Get correction functions
xm, ym, ys, nn = threedhst.utils.runmed(stats.mag, binned, NBIN=80)
ymag = np.interp(stats.mag, xm, ym)
sub = (stats.mag > 19) & (stats.mag < 22.5) & (stats.continuum_sn > 0) & (stats.ha_flux > 0) #& (~is_star)
xm, ym, ys, nn = threedhst.utils.runmed(stats.r50[sub], (binned/ymag)[sub], NBIN=20)
ysize = np.interp(stats.r50, xm, ym)
xm, ym, ys, nn = threedhst.utils.runmed(stats.sky_avg[sub], (binned/ymag/ysize)[sub], NBIN=25)
ysky = np.interp(stats.sky_avg, xm, ym)
xm, ym, ys, nn = threedhst.utils.runmed(concentration[sub], (binned/ymag/ysize/ysky)[sub], NBIN=25)
ycons = np.interp(concentration, xm, ym)
fig = unicorn.catalogs.plot_init(xs=8, aspect=1./4, left=0.07, use_tex=use_tex)
#fig.subplots_adjust(wspace=0.27, hspace=0.25, left=0.12) # 2x2
fig.subplots_adjust(wspace=0.38, hspace=0.25, left=0.074, bottom=0.22)
si = 4
mark = 'o'
cmap = cm.jet
bins = [80,80]
ax = fig.add_subplot(141)
#plt.scatter(stats.mag, stats.continuum_sn*bin_sn, alpha=0.5, c=mcol)
use = np.isfinite(binned) & (binned > 0)
#plt.scatter(stats.mag[use], (binned/ysize/ysky)[use], alpha=0.5, c=mcol[use], s=si, marker=mark)
unicorn.intersim.show_hist_contour(stats.mag[use], (binned/ysize/ysky)[use], axrange=[[20,24],[0.5,100]], ylog=True, cmap=cmap, bins=bins)
xm, ym, ys, nn = threedhst.utils.runmed(stats.mag[use], (binned/ysize/ysky)[use], NBIN=80)
plt.plot(xm, ym, linewidth=2, color='white', alpha=0.5, zorder=100)
plt.plot(xm, ym, linewidth=1, color='black', alpha=0.8, zorder=100)
plt.plot([0,20],[1,1], linewidth=1, alpha=0.4, zorder=101, color='black')
plt.ylim(0.5,100)
plt.plot([20,24],[5,5], color='black', alpha=0.4)
plt.xlim(20,24)
plt.semilogy()
if use_tex:
plt.xlabel(r'MAG\_AUTO $m_{140}$')
else:
plt.xlabel(r'MAG_AUTO $m_{140}$')
plt.ylabel('continuum S/N')
ax.xaxis.set_major_locator(unicorn.analysis.MyLocator(6, integer=True))
ax.xaxis.set_minor_locator(MultipleLocator(0.5))
ax.set_yticks([1,10,100]); ax.set_yticklabels(['1','10','100'])
sn5_limit = np.interp(5,ym[::-1],xm[::-1])
print 'Continuum, S/N=5 @ %.3f' %(sn5_limit)
print threedhst.utils.biweight(stats.r50[sub], both=True)
ax = fig.add_subplot(142)
#plt.scatter(stats.r50[sub], (binned/ymag/ysky)[sub], c=mcol[sub], alpha=0.5, s=si)
unicorn.intersim.show_hist_contour(stats.r50[sub]*0.06, (binned/ymag/ysky)[sub], axrange=[[0,20*0.06],[0.3,1.7]], bins=bins, cmap=cmap)
xm, ym, ys, nn = threedhst.utils.runmed(stats.r50[sub]*0.06, (binned/ymag/ysky)[sub], NBIN=20)
plt.plot(xm, ym, linewidth=2, color='white', alpha=0.5, zorder=100)
plt.plot(xm, ym, linewidth=1, color='black', alpha=0.8, zorder=100)
plt.plot([0,20],[1,1], linewidth=1, alpha=0.4, zorder=101, color='black')
plt.fill_betweenx([0,10],[1.7*0.06,1.7*0.06],[2.5*0.06,2.5*0.06], alpha=0.15, color='black')
#plt.xlabel(r'$R_{50}$ [$0.\!\!^{\prime\prime}06$ pix]')
plt.xlabel(r'$R_{50}$ [arcsec]')
plt.ylabel(r'$\delta$ cont. S/N')
plt.ylim(0.3,1.7)
#plt.ylim(0.3,2.5)
plt.xlim(0,15*0.06)
majorLocator = MultipleLocator(0.2)
minorLocator = MultipleLocator(0.1)
ax.xaxis.set_major_locator(majorLocator)
ax.xaxis.set_minor_locator(minorLocator)
# x0 = np.interp(1,ym[::-1],xm[::-1])
# plt.plot(xm,(x0/xm), color='red')
# plt.plot(xm,(x0/xm)**0.5, color='red')
x0 = np.interp(1,ym[::-1],xm[::-1])
plt.plot(xm,(x0/xm)**(0.5), color='white', alpha=0.5, linewidth=2)
plt.plot(xm,(x0/xm)**(0.5), color='red', alpha=0.8)
ysize = np.interp(stats.r50*0.06, xm, ym)
# plt.scatter(stats.r50[sub], (binned/ymag/ysize)[sub], c=sky_col[sub], alpha=0.5)
# xm, ym, ys, nn = threedhst.utils.runmed(stats.r50[sub], (binned/ymag/ysize)[sub], NBIN=10)
# plt.plot(xm, ym, linewidth=2, color='black', alpha=0.5)
ax = fig.add_subplot(143)
#plt.scatter(stats.sky_avg[sub], (binned/ymag/ysize)[sub], c=mcol[sub], alpha=0.5, s=si)
unicorn.intersim.show_hist_contour(stats.sky_avg[sub], (binned/ymag/ysize)[sub], axrange=[[0.5,3.5],[0.3,1.7]], bins=bins, cmap=cmap)
xm, ym, ys, nn = threedhst.utils.runmed(stats.sky_avg[sub], (binned/ymag/ysize)[sub], NBIN=25)
plt.plot(xm, ym, linewidth=2, color='white', alpha=0.5, zorder=100)
plt.plot(xm, ym, linewidth=1, color='black', alpha=0.8, zorder=100)
plt.plot([0,20],[1,1], linewidth=1, alpha=0.4, zorder=101, color='black')
plt.ylim(0.3,1.7)
plt.xlim(0.5,3.5)
plt.xlabel(r'Background [e$^-$ / s]')
plt.ylabel(r'$\delta$ cont S/N')
ax.xaxis.set_major_locator(unicorn.analysis.MyLocator(6, integer=True))
x0 = np.interp(1,ym[::-1],xm[::-1])
plt.plot(xm,(x0/xm)**(0.5), color='white', alpha=0.5, linewidth=2)
plt.plot(xm,(x0/xm)**(0.5), color='red', alpha=0.7)
ysky = np.interp(stats.sky_avg, xm, ym)
### Very little residual trend with concentration
ax = fig.add_subplot(144)
#plt.scatter(concentration[sub], (binned/ymag/ysize/ysky)[sub], c=mcol[sub], s=si, alpha=0.5)
unicorn.intersim.show_hist_contour(concentration[sub], (binned/ymag/ysize/ysky)[sub], axrange=[[0.25,0.60],[0.3,1.7]], bins=bins, cmap=cmap)
xm, ym, ys, nn = threedhst.utils.runmed(concentration[sub], (binned/ymag/ysize/ysky)[sub], NBIN=25)
plt.plot(xm, ym, linewidth=2, color='white', alpha=0.5, zorder=100)
plt.plot(xm, ym, linewidth=1, color='black', alpha=0.8, zorder=100)
plt.plot([0,20],[1,1], linewidth=1, alpha=0.4, zorder=101, color='black')
plt.xlim(0.25,0.60)
plt.ylim(0.3,1.7)
#plt.ylim(0.5,1.5)
plt.xlabel(r'$C = R_{50}/R_{90}$')
plt.ylabel(r'$\delta$ cont S/N')
#ax.xaxis.set_major_locator(unicorn.analysis.MyLocator(5, prune=None))
ax.xaxis.set_major_locator(MultipleLocator(0.1))
ycons = np.interp(concentration, xm, ym)
plt.savefig('grism_cont_sensitivity.pdf')
# #### Test
# plt.scatter(stats.mag, binned, alpha=0.5, c=sky_col, s=4)
# xm, ym, ys, nn = threedhst.utils.runmed(stats.mag, binned, NBIN=80)
# plt.errorbar(xm, ym, ys, linewidth=2, color='black', alpha=0.5)
# plt.ylim(0.1,2000)
# plt.plot([17,24],[5,5], color='black', alpha=0.4)
# plt.xlim(17,24)
# plt.semilogy()
#### Line fluxes
ha_sn = stats.ha_flux/stats.ha_flux_err
show = np.isfinite(ha_sn) & (ha_sn > 0) & (stats.ha_flux > 0)
xm, ym, ys, nn = threedhst.utils.runmed(stats.ha_flux[~is_star & show], ha_sn[~is_star & show], NBIN=25)
yline_flux = np.interp(stats.ha_flux, xm, ym)
#sub = (stats.ha_flux > 6) & (stats.ha_flux < 100) & (stats.mag > 18) & (np.isfinite(ha_sn)) # & (~is_star)
#sub = (stats.mag > 19) & (stats.mag < 22.5) & (stats.continuum_sn > 0) & (stats.ha_flux > 0) #& (~is_star)
xm, ym, ys, nn = threedhst.utils.runmed(stats.r50[sub], (ha_sn/yline_flux)[sub], NBIN=30)
yline_r50 = np.interp(stats.r50, xm, ym)
xm, ym, ys, nn = threedhst.utils.runmed(stats.sky_avg[sub], (ha_sn/yline_flux/yline_r50)[sub], NBIN=20)
yline_sky = np.interp(stats.sky_avg, xm, ym)
xm, ym, ys, nn = threedhst.utils.runmed(concentration[sub], (ha_sn/yline_flux/yline_r50/yline_sky)[sub], NBIN=10)
yline_con = np.interp(concentration, xm, ym)
plt.errorbar(ha_model, stats.ha_flux, stats.ha_flux_err, marker='o', markersize=0.1, linestyle='None', color='0.5')
plt.scatter(ha_model, stats.ha_flux, c=mcol, zorder=100, alpha=0.5)
#plt.scatter(stats.s2_flux, s2_model, alpha=0.8, c=mc)
plt.plot([0.1,1000],[0.1,1000], color='black', alpha=0.5)
plt.xlim(0.5,1000)
plt.ylim(0.5,1000)
plt.loglog()
# 2x2
#fig = unicorn.catalogs.plot_init(xs=5.5, aspect=1, left=0.08)
#fig.subplots_adjust(wspace=0.27, hspace=0.25, left=0.12)
fig = unicorn.catalogs.plot_init(xs=8, aspect=1./4, left=0.07, use_tex=use_tex)
fig.subplots_adjust(wspace=0.38, hspace=0.25, left=0.074, bottom=0.22)
ax = fig.add_subplot(141)
si = 4
show = np.isfinite(ha_sn) & (ha_sn > 0) & (stats.ha_flux > 0)
#plt.scatter(stats.ha_flux[show], ha_sn[show], c=mcol[show], s=si, zorder=100, alpha=0.3)
unicorn.intersim.show_hist_contour(stats.ha_flux[show], (ha_sn/yline_r50/yline_sky/yline_con)[show], axrange=[[0.5,100],[0.5,100]], bins=bins, cmap=cmap, xlog=True, ylog=True)
xm, ym, ys, nn = threedhst.utils.runmed(stats.ha_flux[~is_star & show], (ha_sn/yline_r50/yline_sky/yline_con)[~is_star & show], NBIN=25)
plt.plot(xm, ym, linewidth=2, color='white', alpha=0.5, zorder=100)
plt.plot(xm, ym, linewidth=1, color='black', alpha=0.8, zorder=100)
plt.plot([0,20],[1,1], linewidth=1, alpha=0.4, zorder=101, color='black')
plt.plot([0.5,100],[5,5], color='black', alpha=0.4)
plt.xlim(0.5,100)
plt.ylim(0.5,100)
plt.loglog()
plt.xlabel(r'line flux [$10^{-17}$ ergs / s / cm$^2$]')
plt.ylabel('line S/N')
ax.set_yticks([1,10,100]); ax.set_yticklabels(['1','10','100'])
ax.set_xticks([1,10,100]); ax.set_xticklabels(['1','10','100'])
sn5_limit = np.interp(5,ym,xm)
print 'Line, S/N=5 @ %.3e' %(sn5_limit)
print threedhst.utils.biweight(stats.r50[sub], both=True)
yline_flux = np.interp(stats.ha_flux, xm, ym)
#plt.scatter(stats.ha_flux, ha_sn/yline_flux, c=mcol, alpha=0.2)
#### Nice: line flux with respect to concentration after taking out the overall trend with
#### line strength
ax = fig.add_subplot(142)
#plt.scatter(stats.r50[sub], (ha_sn/yline_flux)[sub], c=mcol[sub], s=si, alpha=0.3)
unicorn.intersim.show_hist_contour(stats.r50[sub]*0.06, (ha_sn/yline_flux/yline_sky/yline_con)[sub], axrange=[[0,15*0.06],[0.3,2.5]], bins=bins, cmap=cmap)
xm, ym, ys, nn = threedhst.utils.runmed(stats.r50[sub]*0.06, (ha_sn/yline_flux/yline_sky/yline_con)[sub], NBIN=30)
plt.plot(xm, ym, linewidth=2, color='white', alpha=0.5, zorder=100)
plt.plot(xm, ym, linewidth=1, color='black', alpha=0.8, zorder=100)
plt.plot([0,20*0.06],[1,1], linewidth=1, alpha=0.4, zorder=101, color='black')
plt.fill_betweenx([0,10],[1.7*0.06,1.7*0.06],[2.5*0.06,2.5*0.06], alpha=0.15, color='black')
plt.ylim(0.3,2.5)
plt.xlim(0,15*0.06)
#plt.xlabel(r'$R_{50}$ [$0.\!\!^{\prime\prime}06$ pix]')
plt.ylabel(r'$\delta$ line S/N')
#plt.semilogy()
# x0 = np.interp(1,ym[::-1],xm[::-1])
# plt.plot(xm,(x0/xm), color='red')
# plt.plot(xm,(x0/xm)**0.5, color='red')
plt.xlabel(r'$R_{50}$ [arcsec]')
ax.xaxis.set_major_locator(MultipleLocator(0.2))
ax.xaxis.set_minor_locator(MultipleLocator(0.1))
x0 = np.interp(1,ym[::-1],xm[::-1])
plt.plot(xm,(x0/xm)**(0.5), color='red', alpha=0.7)
yline_r50 = np.interp(stats.r50*0.06, xm, ym)
ax = fig.add_subplot(143)
#plt.scatter(stats.sky_avg[sub], (ha_sn/yline_flux/yline_r50)[sub], c=mcol[sub], s=si, alpha=0.3)
unicorn.intersim.show_hist_contour(stats.sky_avg[sub], (ha_sn/yline_flux/yline_r50/yline_con)[sub], axrange=[[0.5,3.5],[0.3,1.7]], bins=bins, cmap=cmap)
xm, ym, ys, nn = threedhst.utils.runmed(stats.sky_avg[sub], (ha_sn/yline_flux/yline_r50/yline_con)[sub], NBIN=20)
plt.plot(xm, ym, linewidth=2, color='white', alpha=0.5, zorder=100)
plt.plot(xm, ym, linewidth=1, color='black', alpha=0.8, zorder=100)
plt.plot([0,20],[1,1], linewidth=1, alpha=0.4, zorder=101, color='black')
plt.ylim(0.3,1.7)
plt.xlim(0.5,3.5)
plt.xlabel(r'Background [e$^-$ / s]')
plt.ylabel(r'$\delta$ line S/N')
ax.xaxis.set_major_locator(unicorn.analysis.MyLocator(6, integer=True))
yline_sky = np.interp(stats.sky_avg, xm, ym)
x0 = np.interp(1,ym[::-1],xm[::-1])
plt.plot(xm,(x0/xm)**(0.5), color='red', alpha=0.7)
ax = fig.add_subplot(144)
#plt.scatter(concentration[sub], (ha_sn/yline_flux/yline_r50/yline_sky)[sub], c=mcol[sub], s=si, alpha=0.3)
unicorn.intersim.show_hist_contour(concentration[sub], (ha_sn/yline_flux/yline_r50/yline_sky)[sub], axrange=[[0.25,0.60],[0.3,1.7]], bins=bins, cmap=cmap)
xm, ym, ys, nn = threedhst.utils.runmed(concentration[sub], (ha_sn/yline_flux/yline_r50/yline_sky)[sub], NBIN=10)
plt.plot(xm, ym, linewidth=2, color='white', alpha=0.5, zorder=100)
plt.plot(xm, ym, linewidth=1, color='black', alpha=0.8, zorder=100)
plt.plot([0,20],[1,1], linewidth=1, alpha=0.4, zorder=101, color='black')
plt.xlim(0.25,0.60)
plt.ylim(0.3,1.7)
plt.xlabel(r'$C = R_{50}/R_{90}$')
plt.ylabel(r'$\delta$ line S/N')
ax.xaxis.set_major_locator(MultipleLocator(0.1))
yline_con = np.interp(concentration, xm, ym)
plt.savefig('grism_line_sensitivity.pdf')
# #### Test:
# show = (np.isfinite(ha_sn)) & (stats.ha_flux > 0)
# plt.scatter(stats.ha_flux[show], (ha_sn/yline_sky)[show], c=mcol[show], zorder=100, alpha=0.2)
# xm, ym, ys, nn = threedhst.utils.runmed(stats.ha_flux[show], (ha_sn/yline_sky)[show], NBIN=25)
# plt.plot(xm, ym, linewidth=2, color='black', alpha=0.5, zorder=100)
# plt.plot([0.5,1000],[5,5], color='black', alpha=0.4)
# plt.xlim(0.5,1000)
# plt.ylim(0.5,300)
# plt.loglog()
#plt.semilogy()
#
plt.scatter(stats.mag, stats.ha_flux, c=mcol, zorder=100, alpha=0.5)
plt.ylim(0.1,5000)
plt.semilogy()
#### EQW
dha = stats.ha_eqw-130.
hy, hx, hh = plt.hist(dha/stats.ha_eq_err, range=(-5,5), bins=50, alpha=0.7)
threedhst.utils.biweight(dha/stats.ha_eq_err, both=True)
#### redshift
dz = (stats.z_fit-1)/2.
plt.scatter(stats.mag, dz, c=mcol, alpha=0.5)
plt.scatter(stats.ha_flux, dz, c=mcol, alpha=0.5)
plt.xlim(0.1,5000)
plt.semilogx()
#### surface density
mu = stats.mag-2*np.log(stats.r90*0.06)
plt.scatter(stats.mag, mu, c=mcol)
def show_hist_contour(xin, yin, axrange=None, bins=[50,50], xlog=False, ylog=False, ax=None, Vbins=[2, 4, 8, 16, 32, 64, 128, 256, 512, 4096], cmap=cm.jet, fill=True, *args, **kwargs):
import matplotlib.colors as co
if xlog:
xdata = np.log10(xin)
else:
xdata = xin
if ylog:
ydata = np.log10(yin)
else:
ydata = yin
if axrange is None:
axrange = [[np.min(xdata),np.max(xdata)],[np.min(ydata),np.max(ydata)]]
if xlog:
for i in range(2):
axrange[0][i] = np.log10(axrange[0][i])
if ylog:
for i in range(2):
axrange[1][i] = np.log10(axrange[1][i])
hist, xedge, yedge = np.histogram2d(xdata, ydata, bins=bins, range=axrange)
#Vbins = [2, 4, 8, 16, 32, 64, 128, 256, 512, 4096]
values = 1.-np.arange(len(Vbins))*1./len(Vbins)
Vcolors = []
for i in range(len(Vbins)):
Vcolors.append('%f' %(values[i]))
if xlog:
xx = 10**((xedge[:-1]+xedge[1:])/2.)
else:
xx = (xedge[:-1]+xedge[1:])/2.
if ylog:
yy = 10**((yedge[:-1]+yedge[1:])/2.)
else:
yy = (yedge[:-1]+yedge[1:])/2.
norml = co.BoundaryNorm(Vbins, 312)
if ax is None:
if fill:
plt.contourf(xx, yy, hist.transpose(), Vbins, linethick=2, norm=norml, cmap=cmap, *args, **kwargs)
else:
plt.contour(xx, yy, hist.transpose(), Vbins, linethick=2, norm=norml, cmap=cmap, *args, **kwargs)
else:
if fill:
ax.contourf(xx, yy, hist.transpose(), Vbins, linethick=2, norm=norml, cmap=cmap, *args, **kwargs)
else:
ax.contour(xx, yy, hist.transpose(), Vbins, linethick=2, norm=norml, cmap=cmap, *args, **kwargs)
def get_line_fluxes(z0=1.0, mag=21):
"""
Get emission line fluxes for a given continuum magnitude.
"""
print z0
xflux, yline = np.loadtxt(unicorn.GRISM_HOME+'/templates/dobos11/SF0_0.emline.txt', unpack=True)
xflux *= (1+z0)
#### Add continuum, here with level 0.1*max(line)
ycont = yline.max()*0.1
yflux = ycont+yline
#### Normalize to F140W passband
x_filt, y_filt = np.loadtxt(os.getenv('iref')+'/F140W.dat', unpack=True)
y_filt_int = utils_c.interp_c(xflux, x_filt, y_filt)
filt_norm = np.trapz(y_filt_int*yflux, xflux) / np.trapz(y_filt_int, xflux)
yflux /= filt_norm
yline /= filt_norm
ycont /= filt_norm
fnu = 10**(-0.4*(mag+48.6))
flam = fnu*3.e18/(6564.*(1+z0))**2/1.e-17
ha = np.abs(xflux-6564*(1+z0)) < 100
ha_flux = np.trapz(yline[ha], xflux[ha])
s2 = np.abs(xflux-6731*(1+z0)) < 100
s2_flux = np.trapz(yline[s2], xflux[s2])
return ha_flux*flam, s2_flux*flam
# #### Trying to figure out units
# plt.plot(gris.twod.im['WAVE'].data, gris.twod.im['SENS'].data)
# plt.plot(unicorn.reduce.sens_files['A'].field('WAVELENGTH'), unicorn.reduce.sens_files['A'].field('SENSITIVITY')*1.e-17*np.median(np.diff(gris.twod.im['WAVE'].data))/2**2)
#
# # test, FLT errors
# flt = pyfits.open('ibhm47gwq_flt.fits')
# err_flt = np.random.normal(size=flt[1].data.shape)*flt[2].data
# mask_flt = (flt[1].data < 0.1) & (err_flt != 0)
# threedhst.utils.biweight(flt[1].data[mask_flt].flatten())
# threedhst.utils.biweight(err_flt[mask_flt].flatten())
|
gbrammer/unicorn
|
intersim.py
|
Python
|
mit
| 28,215
|
[
"Gaussian"
] |
81f14e2dd08ba1cb074e6ebe0a9687db3c5b9a2af76cbbbfd83c683ee166b6e1
|
import sys
from ase.atoms import string2symbols
from asap3 import EMT
from asap3.Tools.ParameterOptimization import ParameterPerformance
from asap3.Tools.ParameterOptimization.EMT import *
from asap3.Tools.MaterialProperties import MaterialPropertiesData
def get_parameters(file, number=2):
file = open(file)
text = file.read()
file.close()
# Find elements
s = -1
for i in range(number):
s = text.find('Optimization', s + 1)
s = text.find('\n', s) + 1
e = text.find('parameters', s)
elements = tuple(string2symbols(text[s:e].strip()))
# Find parameters
s = text.find('\n', e) + 1
e = text.find('Fitting', s) - 4
parameters = []
for line in text[s:e].split('\n'):
rows = line.split(' ')
parameters.append(float(rows[2]))
return elements, parameters
param_files = {'Ag': (3, '151113'),
'Al': (5, '151113'),
'Au': (2, '151113'),
'Cu': (5, '151113'),
'Ni': (0, '151113'),
'Pd': (6, '151113'),
'Pt_1': (1, '181113'),
#'Pt_2': (3, '181113'),
#'Pt_3': (6, '151113'),
}
temp_metal_prop = [('lattice_constant_a', 'fcc', 'a', 0.001),
('bulk_modulus', 'fcc', 'B', 0.01),
('elastic_anisotropy', 'fcc', 'A', 0.03),
('elastic_constant_C11', 'fcc', 'C11', 0.03),
('elastic_constant_C12', 'fcc', 'C12', 0.03),
('elastic_constant_C44', 'fcc', 'C44', 0.01),
('cohesive_energy', 'fcc', 'Ecoh', 0.001),
('surface_energy', 'fcc111', 'E111', 0.02),
('surface_energy', 'fcc100', 'E100', 0.02),
('surface_ratio', 'fcc111-fcc100', 'E111_100', 0.01),
('stacking_fault', 'fcc', 'Esf', 0.01),
]
mp = MaterialPropertiesData(['properties_metals.dat', 'properties_alloys.dat'])
for i, (m, (id, folder)) in enumerate(param_files.items()):
m = m.split('_')[0]
paramfile = '%s_%s/fit-%i.dat' % (m, folder, id)
e, p = get_parameters(paramfile)
parameters = {e: p}
#parameters = {e: EMTStdParameters(m, 'delta')}
print paramfile, parameters
latticeconstants = [('fcc', m, mp.get(m, 'a'))]
quantities = []
for j, (name, struct, id, weight) in enumerate(temp_metal_prop):
if id == 'E111_100':
value = mp.get(m, 'E111') / mp.get(m, 'E100')
else:
value = mp.get(m, id)
quantities.append((name, struct, m, value, weight))
#calculator = EMT()
calculator = EMT2011Fit([m], parameters, 'delta')
ParameterPerformance(calculator, quantities, latticeconstants, debug=False)
|
auag92/n2dm
|
Asap-3.8.4/Projects/ParameterOptimization/performance.py
|
Python
|
mit
| 2,770
|
[
"ASE"
] |
917ab44796d548e63c68ad4b0c835ab6301fdf881d5adfe5919a121a2bb56808
|
import unittest
from rdkit import Chem
from rdkit.Geometry.rdGeometry import Point3D
from rdkit.Chem.Features.FeatDirUtilsRD import GetDonor2FeatVects
class TestCase(unittest.TestCase):
def assertListAlmostEqual(self, list1, list2, msg, tol=7):
self.assertEqual(len(list1), len(list2), msg)
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, tol, msg)
def setUp(self):
#Define molecule for using in tests of GetDonor2FeatVects
self.mol = Chem.MolFromSmiles('C=CCONC')
emol = Chem.RWMol(self.mol)
emol = Chem.AddHs(emol)
emol.AddConformer(Chem.Conformer(15))
emol.GetConformer().SetAtomPosition(0, [-2.8272, -0.2716, 0.4130]) #C
emol.GetConformer().SetAtomPosition(1, [-1.7908, -0.1146, -0.4177]) #C
emol.GetConformer().SetAtomPosition(2, [-0.5452, 0.6287, -0.0653]) #C
emol.GetConformer().SetAtomPosition(3, [0.5603, -0.2584, -0.1671]) #O
emol.GetConformer().SetAtomPosition(4, [1.7601, 0.4902, 0.1811]) #N
emol.GetConformer().SetAtomPosition(5, [2.8427, -0.4743, 0.0559]) #C
emol.GetConformer().SetAtomPosition(6, [-3.7065, -0.8216, 0.0959]) #H
emol.GetConformer().SetAtomPosition(7, [-2.8190, 0.1408, 1.4159]) #H
emol.GetConformer().SetAtomPosition(8, [-1.8508, -0.5462, -1.4133]) #H
emol.GetConformer().SetAtomPosition(9, [-0.6006, 1.0355, 0.9521]) #H
emol.GetConformer().SetAtomPosition(10, [-0.4226, 1.4609, -0.7693]) #H
emol.GetConformer().SetAtomPosition(11, [1.8283, 1.1371, -0.6054]) #H
emol.GetConformer().SetAtomPosition(12, [2.8648, -0.9271, -0.9408]) #H
emol.GetConformer().SetAtomPosition(13, [2.7437, -1.2668, 0.8044]) #H
emol.GetConformer().SetAtomPosition(14, [3.8013, 0.0259, 0.2227]) #H
self.mol = Chem.Mol(emol)
def test1_GetDonor2FeatVects(self):
'''Case 1: two hydrogens'''
conf = self.mol.GetConformer(-1)
case1 = GetDonor2FeatVects(conf, [2], scale=1.5)
pos_heavy_atom = conf.GetAtomPosition(2)
#Check if there are two vectors
self.assertEqual(len(case1[0]), 2, 'Incorrect number of vectors')
#Check initial points of the vectors
self.assertListAlmostEqual(case1[0][0][0], pos_heavy_atom,
'Incorrect starting point of vector 1')
self.assertListAlmostEqual(case1[0][1][0], pos_heavy_atom,
'Incorrect starting point of vector 2')
#Check directions of the vectors
vec_h1 = conf.GetAtomPosition(9) - pos_heavy_atom
vec_h2 = conf.GetAtomPosition(10) - pos_heavy_atom
vec_1 = case1[0][0][1] - case1[0][0][0]
vec_2 = case1[0][1][1] - case1[0][1][0]
self.assertListAlmostEqual(vec_1.CrossProduct(vec_h1), Point3D(0,0,0),
'Incorrect direction of vector 1')
self.assertTrue(vec_1.DotProduct(vec_h1) > 0,
'Incorrect direction of vector 1')
self.assertListAlmostEqual(vec_2.CrossProduct(vec_h2), Point3D(0,0,0),
'Incorrect direction of vector 2')
self.assertTrue(vec_2.DotProduct(vec_h2) > 0,
'Incorrect direction of vector 2')
#Check length of the vectors
self.assertAlmostEqual(vec_1.Length(), 1.5,
msg='Incorrect length of vector 1')
self.assertAlmostEqual(vec_2.Length(), 1.5,
msg='Incorrect length of vector 2')
def test2_1_GetDonor2FeatVects(self):
'''Case 2.1: one hydrogen with sp2 arrangement'''
conf = self.mol.GetConformer(-1)
case21 = GetDonor2FeatVects(conf, [1], scale=1.5)
pos_heavy_atom = conf.GetAtomPosition(1)
#Check if there is one vector
self.assertEqual(len(case21[0]), 1, 'Incorrect number of vectors')
#Check initial point of the vector
self.assertListAlmostEqual(case21[0][0][0], pos_heavy_atom,
'Incorrect starting point of vector')
#Check direction of the vector
vec_h = conf.GetAtomPosition(8) - (pos_heavy_atom)
vec = case21[0][0][1] - case21[0][0][0]
self.assertListAlmostEqual(vec.CrossProduct(vec_h), Point3D(0,0,0),
'Incorrect direction of vector')
self.assertTrue(vec.DotProduct(vec_h) > 0,
'Incorrect direction of vector')
#Check length of the vector
self.assertAlmostEqual(vec.Length(), 1.5,
msg='Incorrect length of vector')
def test2_2_GetDonor2FeatVects(self):
#Case 2.2: one hydrogen with sp3 arrangement
conf = self.mol.GetConformer(-1)
case22 = GetDonor2FeatVects(conf, [4], scale=1.5)
pos_heavy_atom = conf.GetAtomPosition(4)
#Check if there are two vectors
self.assertEqual(len(case22[0]), 2, 'Incorrect number of vectors')
#Check initial points of the vectors
self.assertListAlmostEqual(case22[0][0][0], pos_heavy_atom,
'Incorrect starting point of vector 1')
self.assertListAlmostEqual(case22[0][1][0], pos_heavy_atom,
'Incorrect starting point of vector 2')
#Check directions of the vectors
vec_h = conf.GetAtomPosition(11) - pos_heavy_atom
vec_nbr1 = conf.GetAtomPosition(3) - pos_heavy_atom
vec_nbr1.Normalize()
vec_nbr2 = conf.GetAtomPosition(5) - pos_heavy_atom
vec_nbr2.Normalize()
avg_vec = (vec_nbr1 + vec_nbr2)
vec_1 = case22[0][0][1] - case22[0][0][0]
vec_2 = case22[0][1][1] - case22[0][1][0]
self.assertListAlmostEqual(vec_1.CrossProduct(vec_h), Point3D(0,0,0),
'Incorrect direction of vector 1')
self.assertTrue(vec_1.DotProduct(vec_h) > 0,
'Incorrect direction of vector 1')
self.assertListAlmostEqual(vec_2.CrossProduct(avg_vec), Point3D(0,0,0),
'Incorrect direction of vector 2')
self.assertTrue(vec_2.DotProduct(avg_vec) < 0,
'Incorrect direction of vector 2')
#Check length of the vectors
self.assertAlmostEqual(vec_1.Length(), 1.5,
msg='Incorrect length of vector 1')
self.assertAlmostEqual(vec_2.Length(), 1.5,
msg='Incorrect length of vector 2')
def test3_GetDonor2FeatVects(self):
'''Case 3: no hydrogens'''
conf = self.mol.GetConformer(-1)
case3 = GetDonor2FeatVects(conf, [3], scale=1.5)
pos_heavy_atom = conf.GetAtomPosition(3)
#Check if there is one vector
self.assertEqual(len(case3[0]), 1, 'Incorrect number of vectors')
#Check initial point of the vector
self.assertListAlmostEqual(case3[0][0][0], pos_heavy_atom,
'Incorrect starting point of vector')
#Check direction of the vector
vec_nbr1 = conf.GetAtomPosition(2) - pos_heavy_atom
vec_nbr1.Normalize()
vec_nbr2 = conf.GetAtomPosition(4) - pos_heavy_atom
vec_nbr2.Normalize()
avg_vec = (vec_nbr1 + vec_nbr2)
vec = case3[0][0][1] - case3[0][0][0]
self.assertListAlmostEqual(vec.CrossProduct(avg_vec), Point3D(0,0,0),
'Incorrect direction of vector')
self.assertTrue(vec.DotProduct(avg_vec) < 0,
'Incorrect direction of vector')
#Check length of the vector
self.assertAlmostEqual(vec.Length(), 1.5,
msg='Incorrect length of vector')
if __name__ == '__main__':
unittest.main()
|
rdkit/rdkit
|
rdkit/Chem/Features/UnitTestFeatDirUtilsRD.py
|
Python
|
bsd-3-clause
| 7,519
|
[
"RDKit"
] |
276e6055ca17e7388ed212abba0b76c20bb08949e20b037d539e44a42778649a
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Created on Mar 18, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 18, 2012"
import unittest
import os
import warnings
from pymatgen.apps.borg.hive import VaspToComputedEntryDrone, \
SimpleVaspToComputedEntryDrone, GaussianToComputedEntryDrone
from pymatgen.entries.computed_entries import ComputedStructureEntry
class VaspToComputedEntryDroneTest(unittest.TestCase):
def setUp(self):
self.test_dir = os.path.join(os.path.dirname(__file__), "..", "..",
"..", "..", 'test_files')
self.drone = VaspToComputedEntryDrone(data=["efermi"])
self.structure_drone = VaspToComputedEntryDrone(True)
def test_get_valid_paths(self):
for path in os.walk(self.test_dir):
if path[0] == self.test_dir:
self.assertTrue(len(self.drone.get_valid_paths(path)) > 0)
def test_assimilate(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
entry = self.drone.assimilate(self.test_dir)
for p in ["hubbards", "is_hubbard", "potcar_spec", "run_type"]:
self.assertIn(p, entry.parameters)
self.assertAlmostEqual(entry.data["efermi"], -6.62148548)
self.assertEqual(entry.composition.reduced_formula, "Xe")
self.assertAlmostEqual(entry.energy, 0.5559329)
entry = self.structure_drone.assimilate(self.test_dir)
self.assertEqual(entry.composition.reduced_formula, "Xe")
self.assertAlmostEqual(entry.energy, 0.5559329)
self.assertIsInstance(entry, ComputedStructureEntry)
self.assertIsNotNone(entry.structure)
# self.assertEqual(len(entry.parameters["history"]), 2)
def test_to_from_dict(self):
d = self.structure_drone.as_dict()
drone = VaspToComputedEntryDrone.from_dict(d)
self.assertEqual(type(drone), VaspToComputedEntryDrone)
class SimpleVaspToComputedEntryDroneTest(unittest.TestCase):
def setUp(self):
self.test_dir = os.path.join(os.path.dirname(__file__), "..", "..",
"..", "..", 'test_files')
self.drone = SimpleVaspToComputedEntryDrone()
self.structure_drone = SimpleVaspToComputedEntryDrone(True)
def test_get_valid_paths(self):
for path in os.walk(self.test_dir):
if path[0] == self.test_dir:
self.assertTrue(len(self.drone.get_valid_paths(path)) > 0)
def test_to_from_dict(self):
d = self.structure_drone.as_dict()
drone = SimpleVaspToComputedEntryDrone.from_dict(d)
self.assertEqual(type(drone), SimpleVaspToComputedEntryDrone)
class GaussianToComputedEntryDroneTest(unittest.TestCase):
def setUp(self):
self.test_dir = os.path.join(os.path.dirname(__file__), "..", "..",
"..", "..", 'test_files', "molecules")
self.drone = GaussianToComputedEntryDrone(data=["corrections"])
self.structure_drone = GaussianToComputedEntryDrone(True)
def test_get_valid_paths(self):
for path in os.walk(self.test_dir):
if path[0] == self.test_dir:
self.assertTrue(len(self.drone.get_valid_paths(path)) > 0)
def test_assimilate(self):
test_file = os.path.join(self.test_dir, "methane.log")
entry = self.drone.assimilate(test_file)
for p in ["functional", "basis_set", "charge",
"spin_multiplicity", "route_parameters"]:
self.assertIn(p, entry.parameters)
for p in ["corrections"]:
self.assertIn(p, entry.data)
self.assertEqual(entry.composition.reduced_formula, "H4C")
self.assertAlmostEqual(entry.energy, -39.9768775602)
entry = self.structure_drone.assimilate(test_file)
self.assertEqual(entry.composition.reduced_formula, "H4C")
self.assertAlmostEqual(entry.energy, -39.9768775602)
self.assertIsInstance(entry, ComputedStructureEntry)
self.assertIsNotNone(entry.structure)
for p in ["properly_terminated", "stationary_type"]:
self.assertIn(p, entry.data)
def test_to_from_dict(self):
d = self.structure_drone.as_dict()
drone = GaussianToComputedEntryDrone.from_dict(d)
self.assertEqual(type(drone), GaussianToComputedEntryDrone)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
dongsenfo/pymatgen
|
pymatgen/apps/borg/tests/test_hive.py
|
Python
|
mit
| 4,722
|
[
"pymatgen"
] |
2c178db24cbb230532361bd202942567d06a615ebfac464f66ffc806afc9163b
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License
"""
Module for reading Lobster output files. For more information
on LOBSTER see www.cohp.de.
"""
import collections
import fnmatch
import itertools
import os
import re
import warnings
from collections import defaultdict
from typing import Dict, Any, Optional, List
import numpy as np
import spglib
from monty.io import zopen
from monty.json import MSONable
from monty.serialization import loadfn
from pymatgen.core.structure import Structure
from pymatgen.electronic_structure.bandstructure import LobsterBandStructureSymmLine
from pymatgen.electronic_structure.core import Spin, Orbital
from pymatgen.electronic_structure.dos import Dos, LobsterCompleteDos
from pymatgen.io.vasp.inputs import Incar, Kpoints, Potcar
from pymatgen.io.vasp.outputs import Vasprun
from pymatgen.symmetry.bandstructure import HighSymmKpath
__author__ = "Janine George, Marco Esters"
__copyright__ = "Copyright 2017, The Materials Project"
__version__ = "0.2"
__maintainer__ = "Janine George, Marco Esters "
__email__ = "janine.george@uclouvain.be, esters@uoregon.edu"
__date__ = "Dec 13, 2017"
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
class Cohpcar:
"""
Class to read COHPCAR/COOPCAR files generated by LOBSTER.
.. attribute: cohp_data
Dict that contains the COHP data of the form:
{bond: {"COHP": {Spin.up: cohps, Spin.down:cohps},
"ICOHP": {Spin.up: icohps, Spin.down: icohps},
"length": bond length,
"sites": sites corresponding to the bond}
Also contains an entry for the average, which does not have
a "length" key.
.. attribute: efermi
The Fermi energy in eV.
.. attribute: energies
Sequence of energies in eV. Note that LOBSTER shifts the energies
so that the Fermi energy is at zero.
.. attribute: is_spin_polarized
Boolean to indicate if the calculation is spin polarized.
.. attribute: orb_res_cohp
orb_cohp[label] = {bond_data["orb_label"]: {"COHP": {Spin.up: cohps, Spin.down:cohps},
"ICOHP": {Spin.up: icohps, Spin.down: icohps},
"orbitals": orbitals,
"length": bond lengths,
"sites": sites corresponding to the bond}}
"""
def __init__(self, are_coops: bool = False, filename: str = None):
"""
Args:
are_coops: Determines if the file is a list of COHPs or COOPs.
Default is False for COHPs.
filename: Name of the COHPCAR file. If it is None, the default
file name will be chosen, depending on the value of are_coops.
"""
self.are_coops = are_coops
if filename is None:
filename = "COOPCAR.lobster" if are_coops \
else "COHPCAR.lobster"
with zopen(filename, "rt") as f:
contents = f.read().split("\n")
# The parameters line is the second line in a COHPCAR file. It
# contains all parameters that are needed to map the file.
parameters = contents[1].split()
# Subtract 1 to skip the average
num_bonds = int(parameters[0]) - 1
self.efermi = float(parameters[-1])
if int(parameters[1]) == 2:
spins = [Spin.up, Spin.down]
self.is_spin_polarized = True
else:
spins = [Spin.up]
self.is_spin_polarized = False
# The COHP data start in row num_bonds + 3
data = np.array([np.array(row.split(), dtype=float) for row in contents[num_bonds + 3:]]).transpose()
data = np.array([np.array(row.split(), dtype=float) for row in contents[num_bonds + 3:]]).transpose()
self.energies = data[0]
cohp_data = {"average": {"COHP": {spin: data[1 + 2 * s * (num_bonds + 1)]
for s, spin in enumerate(spins)},
"ICOHP": {spin: data[2 + 2 * s * (num_bonds + 1)]
for s, spin in enumerate(spins)}}} # type: Dict[Any, Any]
orb_cohp = {} # type: Dict[str, Any]
# present for Lobster versions older than Lobster 2.2.0
veryold = False
# the labeling had to be changed: there are more than one COHP for each atom combination
# this is done to make the labeling consistent with ICOHPLIST.lobster
bondnumber = 0
for bond in range(num_bonds):
bond_data = self._get_bond_data(contents[3 + bond])
label = str(bondnumber)
orbs = bond_data["orbitals"]
cohp = {spin: data[2 * (bond + s * (num_bonds + 1)) + 3]
for s, spin in enumerate(spins)}
icohp = {spin: data[2 * (bond + s * (num_bonds + 1)) + 4]
for s, spin in enumerate(spins)}
if orbs is None:
bondnumber = bondnumber + 1
label = str(bondnumber)
cohp_data[label] = {"COHP": cohp, "ICOHP": icohp,
"length": bond_data["length"],
"sites": bond_data["sites"]}
elif label in orb_cohp:
orb_cohp[label].update(
{bond_data["orb_label"]: {"COHP": cohp, "ICOHP": icohp, "orbitals": orbs,
"length": bond_data["length"], "sites": bond_data["sites"]}})
else:
# present for Lobster versions older than Lobster 2.2.0
if bondnumber == 0:
veryold = True
if veryold:
bondnumber += 1
label = str(bondnumber)
orb_cohp[label] = {bond_data["orb_label"]: {"COHP": cohp,
"ICOHP": icohp,
"orbitals": orbs,
"length": bond_data["length"],
"sites": bond_data["sites"]}}
# present for lobster older than 2.2.0
if veryold:
for bond_str in orb_cohp:
cohp_data[bond_str] = {"COHP": None, "ICOHP": None,
"length": bond_data["length"],
"sites": bond_data["sites"]}
self.orb_res_cohp = orb_cohp if orb_cohp else None
self.cohp_data = cohp_data
@staticmethod
def _get_bond_data(line: str) -> dict:
"""
Subroutine to extract bond label, site indices, and length from
a LOBSTER header line. The site indices are zero-based, so they
can be easily used with a Structure object.
Example header line: No.4:Fe1->Fe9(2.4524893531900283)
Example header line for orbtial-resolved COHP:
No.1:Fe1[3p_x]->Fe2[3d_x^2-y^2](2.456180552772262)
Args:
line: line in the COHPCAR header describing the bond.
Returns:
Dict with the bond label, the bond length, a tuple of the site
indices, a tuple containing the orbitals (if orbital-resolved),
and a label for the orbitals (if orbital-resolved).
"""
orb_labs = ["s", "p_y", "p_z", "p_x", "d_xy", "d_yz", "d_z^2",
"d_xz", "d_x^2-y^2", "f_y(3x^2-y^2)", "f_xyz",
"f_yz^2", "f_z^3", "f_xz^2", "f_z(x^2-y^2)", "f_x(x^2-3y^2)"]
line_new = line.rsplit("(", 1)
# bondnumber = line[0].replace("->", ":").replace(".", ":").split(':')[1]
length = float(line_new[-1][:-1])
sites = line_new[0].replace("->", ":").split(":")[1:3]
site_indices = tuple(int(re.split(r"\D+", site)[1]) - 1
for site in sites)
# species = tuple(re.split(r"\d+", site)[0] for site in sites)
if "[" in sites[0]:
orbs = [re.findall(r"\[(.*)\]", site)[0] for site in sites]
orbitals = [tuple((int(orb[0]), Orbital(orb_labs.index(orb[1:])))) for orb in
orbs] # type: Any
orb_label = "%d%s-%d%s" % (orbitals[0][0], orbitals[0][1].name,
orbitals[1][0], orbitals[1][1].name) # type: Any
else:
orbitals = None
orb_label = None
# a label based on the species alone is not feasible, there can be more than one bond for each atom combination
# label = "%s" % (bondnumber)
bond_data = {"length": length, "sites": site_indices,
"orbitals": orbitals, "orb_label": orb_label}
return bond_data
class Icohplist:
"""
Class to read ICOHPLIST/ICOOPLIST files generated by LOBSTER.
.. attribute: are_coops
Boolean to indicate if the populations are COOPs or COHPs.
.. attribute: is_spin_polarized
Boolean to indicate if the calculation is spin polarized.
.. attribute: Icohplist
Dict containing the listfile data of the form:
{bond: "length": bond length,
"number_of_bonds": number of bonds
"icohp": {Spin.up: ICOHP(Ef) spin up, Spin.down: ...}}
.. attribute: IcohpCollection
IcohpCollection Object
"""
def __init__(self, are_coops: bool = False, filename: str = None):
"""
Args:
are_coops: Determines if the file is a list of ICOHPs or ICOOPs.
Defaults to False for ICOHPs.
filename: Name of the ICOHPLIST file. If it is None, the default
file name will be chosen, depending on the value of are_coops.
"""
self.are_coops = are_coops
if filename is None:
filename = "ICOOPLIST.lobster" if are_coops \
else "ICOHPLIST.lobster"
# LOBSTER list files have an extra trailing blank line
# and we don't need the header.
with zopen(filename, 'rt') as f:
data = f.read().split("\n")[1:-1]
if len(data) == 0:
raise IOError("ICOHPLIST file contains no data.")
# Which Lobster version?
if len(data[0].split()) == 8:
version = '3.1.1'
elif len(data[0].split()) == 6:
version = '2.2.1'
warnings.warn('Please consider using the new Lobster version. See www.cohp.de.')
else:
raise ValueError
# If the calculation is spin polarized, the line in the middle
# of the file will be another header line.
if "distance" in data[len(data) // 2]:
num_bonds = len(data) // 2
if num_bonds == 0:
raise IOError("ICOHPLIST file contains no data.")
self.is_spin_polarized = True
else:
num_bonds = len(data)
self.is_spin_polarized = False
list_labels = []
list_atom1 = []
list_atom2 = []
list_length = []
list_translation = []
list_num = []
list_icohp = []
for bond in range(num_bonds):
line = data[bond].split()
icohp = {}
if version == '2.2.1':
label = "%s" % (line[0])
atom1 = str(line[1])
atom2 = str(line[2])
length = float(line[3])
icohp[Spin.up] = float(line[4])
num = int(line[5])
translation = [0, 0, 0]
if self.is_spin_polarized:
icohp[Spin.down] = float(data[bond + num_bonds + 1].split()[4])
elif version == '3.1.1':
label = "%s" % (line[0])
atom1 = str(line[1])
atom2 = str(line[2])
length = float(line[3])
translation = [int(line[4]), int(line[5]), int(line[6])]
icohp[Spin.up] = float(line[7])
num = int(1)
if self.is_spin_polarized:
icohp[Spin.down] = float(data[bond + num_bonds + 1].split()[7])
list_labels.append(label)
list_atom1.append(atom1)
list_atom2.append(atom2)
list_length.append(length)
list_translation.append(translation)
list_num.append(num)
list_icohp.append(icohp)
# to avoid circular dependencies
from pymatgen.electronic_structure.cohp import IcohpCollection
self._icohpcollection = IcohpCollection(are_coops=are_coops, list_labels=list_labels, list_atom1=list_atom1,
list_atom2=list_atom2, list_length=list_length,
list_translation=list_translation, list_num=list_num,
list_icohp=list_icohp, is_spin_polarized=self.is_spin_polarized)
@property
def icohplist(self) -> Dict[Any, Dict[str, Any]]:
"""
Returns: icohplist compatible with older version of this class
"""
icohplist_new = {}
for key, value in self._icohpcollection._icohplist.items():
icohplist_new[key] = {"length": value._length, "number_of_bonds": value._num,
"icohp": value._icohp, "translation": value._translation}
return icohplist_new
@property
def icohpcollection(self):
"""
Returns: IcohpCollection object
"""
return self._icohpcollection
class Doscar:
"""
Class to deal with Lobster's projected DOS and local projected DOS.
The beforehand quantum-chemical calculation was performed with VASP
.. attribute:: completedos
LobsterCompleteDos Object
.. attribute:: pdos
List of Dict including numpy arrays with pdos. Access as pdos[atomindex]['orbitalstring']['Spin.up/Spin.down']
.. attribute:: tdos
Dos Object of the total density of states
.. attribute:: energies
numpy array of the energies at which the DOS was calculated (in eV, relative to Efermi)
.. attribute:: tdensities
tdensities[Spin.up]: numpy array of the total density of states for the Spin.up contribution at each of the
energies
tdensities[Spin.down]: numpy array of the total density of states for the Spin.down contribution at each of the
energies
if is_spin_polarized=False:
tdensities[Spin.up]: numpy array of the total density of states
.. attribute:: itdensities:
itdensities[Spin.up]: numpy array of the total density of states for the Spin.up contribution at each of the
energies
itdensities[Spin.down]: numpy array of the total density of states for the Spin.down contribution at each of the
energies
if is_spin_polarized=False:
itdensities[Spin.up]: numpy array of the total density of states
.. attribute:: is_spin_polarized
Boolean. Tells if the system is spin polarized
"""
def __init__(self, doscar: str = "DOSCAR.lobster", structure_file: str = "POSCAR", dftprogram: str = "Vasp"):
"""
Args:
doscar: DOSCAR filename, typically "DOSCAR.lobster"
structure_file: for vasp, this is typically "POSCAR"
dftprogram: so far only "vasp" is implemented
"""
self._doscar = doscar
if dftprogram == "Vasp":
self._final_structure = Structure.from_file(structure_file)
self._parse_doscar()
def _parse_doscar(self):
doscar = self._doscar
tdensities = {}
itdensities = {}
f = open(doscar)
natoms = int(f.readline().split()[0])
efermi = float([f.readline() for nn in range(4)][3].split()[17])
dos = []
orbitals = []
for atom in range(natoms + 1):
line = f.readline()
ndos = int(line.split()[2])
orbitals.append(line.split(';')[-1].split())
line = f.readline().split()
cdos = np.zeros((ndos, len(line)))
cdos[0] = np.array(line)
for nd in range(1, ndos):
line = f.readline().split()
cdos[nd] = np.array(line)
dos.append(cdos)
f.close()
doshere = np.array(dos[0])
if len(doshere[0, :]) == 5:
self._is_spin_polarized = True
elif len(doshere[0, :]) == 3:
self._is_spin_polarized = False
else:
raise ValueError("There is something wrong with the DOSCAR. Can't extract spin polarization.")
energies = doshere[:, 0]
if not self._is_spin_polarized:
tdensities[Spin.up] = doshere[:, 1]
itdensities[Spin.up] = doshere[:, 2]
pdoss = []
spin = Spin.up
for atom in range(natoms):
pdos = defaultdict(dict)
data = dos[atom + 1]
_, ncol = data.shape
orbnumber = 0
for j in range(1, ncol):
orb = orbitals[atom + 1][orbnumber]
pdos[orb][spin] = data[:, j]
orbnumber = orbnumber + 1
pdoss.append(pdos)
else:
tdensities[Spin.up] = doshere[:, 1]
tdensities[Spin.down] = doshere[:, 2]
itdensities[Spin.up] = doshere[:, 3]
itdensities[Spin.down] = doshere[:, 4]
pdoss = []
for atom in range(natoms):
pdos = defaultdict(dict)
data = dos[atom + 1]
_, ncol = data.shape
orbnumber = 0
for j in range(1, ncol):
if j % 2 == 0:
spin = Spin.down
else:
spin = Spin.up
orb = orbitals[atom + 1][orbnumber]
pdos[orb][spin] = data[:, j]
if j % 2 == 0:
orbnumber = orbnumber + 1
pdoss.append(pdos)
self._efermi = efermi
self._pdos = pdoss
self._tdos = Dos(efermi, energies, tdensities)
self._energies = energies
self._tdensities = tdensities
self._itdensities = itdensities
final_struct = self._final_structure
pdossneu = {final_struct[i]: pdos for i, pdos in enumerate(self._pdos)}
self._completedos = LobsterCompleteDos(final_struct, self._tdos, pdossneu)
@property
def completedos(self) -> LobsterCompleteDos:
"""
:return: CompleteDos
"""
return self._completedos
@property
def pdos(self) -> list:
"""
:return: Projected DOS
"""
return self._pdos
@property
def tdos(self) -> Dos:
"""
:return: Total DOS
"""
return self._tdos
@property
def energies(self) -> np.array:
"""
:return: Energies
"""
return self._energies
@property
def tdensities(self) -> np.array:
"""
:return: total densities as a np.array
"""
return self._tdensities
@property
def itdensities(self) -> np.array:
"""
:return: integrated total densities as a np.array
"""
return self._itdensities
@property
def is_spin_polarized(self) -> bool:
"""
:return: Whether run is spin polarized.
"""
return self._is_spin_polarized
class Charge:
"""
Class to read CHARGE files generated by LOBSTER
.. attribute: atomlist
List of atoms in CHARGE.lobster
.. attribute: types
List of types of atoms in CHARGE.lobster
.. attribute: Mulliken
List of Mulliken charges of atoms in CHARGE.lobster
.. attribute: Loewdin
List of Loewdin charges of atoms in CHARGE.Loewdin
.. attribute: num_atoms
Number of atoms in CHARGE.lobster
"""
def __init__(self, filename: str = "CHARGE.lobster"):
"""
Args:
filename: filename for the CHARGE file, typically "CHARGE.lobster"
"""
with zopen(filename, 'rt') as f:
data = f.read().split("\n")[3:-3]
if len(data) == 0:
raise IOError("CHARGES file contains no data.")
self.num_atoms = len(data)
self.atomlist = [] # type: List[str]
self.types = [] # type: List[str]
self.Mulliken = [] # type: List[float]
self.Loewdin = [] # type: List[float]
for atom in range(0, self.num_atoms):
line = data[atom].split()
self.atomlist.append(line[1] + line[0])
self.types.append(line[1])
self.Mulliken.append(float(line[2]))
self.Loewdin.append(float(line[3]))
def get_structure_with_charges(self, structure_filename):
"""
get a Structure with Mulliken and Loewdin charges as site properties
Args:
structure_filename: filename of POSCAR
Returns:
Structure Object with Mulliken and Loewdin charges as site properties
"""
struct = Structure.from_file(structure_filename)
Mulliken = self.Mulliken
Loewdin = self.Loewdin
site_properties = {"Mulliken Charges": Mulliken, "Loewdin Charges": Loewdin}
new_struct = struct.copy(site_properties=site_properties)
return new_struct
class Lobsterout:
"""
Class to read in the lobsterout and evaluate the spilling, save the basis, save warnings, save infos
.. attribute: basis_functions
list of basis functions that were used in lobster run as strings
.. attribute: basis_type
list of basis type that were used in lobster run as strings
.. attribute: chargespilling
list of charge spilling (first entry: result for spin 1, second entry: result for spin 2 or not present)
.. attribute: dftprogram
string representing the dft program used for the calculation of the wave function
.. attribute: elements
list of strings of elements that were present in lobster calculation
.. attribute: has_CHARGE
Boolean, indicates that CHARGE.lobster is present
.. attribute: has_COHPCAR
Boolean, indicates that COHPCAR.lobster and ICOHPLIST.lobster are present
.. attribute: has_COOPCAR
Boolean, indicates that COOPCAR.lobster and ICOOPLIST.lobster are present
.. attribute: has_DOSCAR
Boolean, indicates that DOSCAR.lobster is present
.. attribute: has_Projection
Boolean, indcates that projectionData.lobster is present
.. attribute: has_bandoverlaps
Boolean, indcates that bandOverlaps.lobster is present
.. attribute: has_density_of_energies
Boolean, indicates that DensityOfEnergy.lobster is present
.. attribute: has_fatbands
Boolean, indicates that fatband calculation was performed
.. attribute: has_grosspopulation
Boolean, indicates that GROSSPOP.lobster is present
.. attribute: info_lines
string with additional infos on the run
.. attribute: info_orthonormalization
string with infos on orthonormalization
.. attribute: is_restart_from_projection
Boolean that indicates that calculation was restartet from existing projection file
.. attribute: lobster_version
string that indicates Lobster version
.. attribute: number_of_spins
Integer indicating the number of spins
.. attribute: number_of_threads
integer that indicates how many threads were used
.. attribute: timing
dict with infos on timing
.. attribute: totalspilling
list of values indicating the total spilling for spin channel 1 (and spin channel 2)
.. attribute: warninglines
string with all warnings
"""
def __init__(self, filename="lobsterout"):
"""
Args:
filename: filename of lobsterout
"""
warnings.warn("Make sure the lobsterout is read in correctly. This is a brand new class.")
# read in file
with zopen(filename, 'rt') as f:
data = f.read().split("\n") # [3:-3]
if len(data) == 0:
raise IOError("lobsterout does not contain any data")
# check if Lobster starts from a projection
self.is_restart_from_projection = self._starts_from_projection(data=data)
self.lobster_version = self._get_lobster_version(data=data)
self.number_of_threads = int(self._get_threads(data=data))
self.dftprogram = self._get_dft_program(data=data)
self.number_of_spins = self._get_number_of_spins(data=data)
chargespilling, totalspilling = self._get_spillings(data=data, number_of_spins=self.number_of_spins)
self.chargespilling = chargespilling
self.totalspilling = totalspilling
elements, basistype, basisfunctions = self._get_elements_basistype_basisfunctions(data=data)
self.elements = elements
self.basis_type = basistype
self.basis_functions = basisfunctions
wall_time, user_time, sys_time = self._get_timing(data=data)
timing = {}
timing['walltime'] = wall_time
timing['usertime'] = user_time
timing['sys_time'] = sys_time
self.timing = timing
warninglines = self._get_all_warning_lines(data=data)
self.warninglines = warninglines
orthowarning = self._get_warning_orthonormalization(data=data)
self.info_orthonormalization = orthowarning
infos = self._get_all_info_lines(data=data)
self.info_lines = infos
self.has_DOSCAR = self._has_DOSCAR(data=data)
self.has_COHPCAR = self._has_COOPCAR(data=data)
self.has_COOPCAR = self._has_COHPCAR(data=data)
self.has_CHARGE = self._has_CHARGE(data=data)
self.has_Projection = self._has_projection(data=data)
self.has_bandoverlaps = self._has_bandoverlaps(data=data)
self.has_fatbands = self._has_fatband(data=data)
self.has_grosspopulation = self._has_grosspopulation(data=data)
self.has_density_of_energies = self._has_density_of_energies(data=data)
def get_doc(self):
"""
Returns: LobsterDict with all the information stored in lobsterout
"""
LobsterDict = {}
# check if Lobster starts from a projection
LobsterDict['restart_from_projection'] = self.is_restart_from_projection
LobsterDict['lobster_version'] = self.lobster_version
LobsterDict['threads'] = self.number_of_threads
LobsterDict['Dftprogram'] = self.dftprogram
LobsterDict['chargespilling'] = self.chargespilling
LobsterDict['totalspilling'] = self.totalspilling
LobsterDict['elements'] = self.elements
LobsterDict['basistype'] = self.basis_type
LobsterDict['basisfunctions'] = self.basis_functions
LobsterDict['timing'] = self.timing
LobsterDict['warnings'] = self.warninglines
LobsterDict['orthonormalization'] = self.info_orthonormalization
LobsterDict['infos'] = self.info_lines
LobsterDict['hasDOSCAR'] = self.has_DOSCAR
LobsterDict['hasCOHPCAR'] = self.has_COHPCAR
LobsterDict['hasCOOPCAR'] = self.has_COOPCAR
LobsterDict['hasCHARGE'] = self.has_CHARGE
LobsterDict['hasProjection'] = self.has_Projection
LobsterDict['hasbandoverlaps'] = self.has_bandoverlaps
LobsterDict['hasfatband'] = self.has_fatbands
LobsterDict['hasGrossPopuliation'] = self.has_grosspopulation
LobsterDict['hasDensityOfEnergies'] = self.has_density_of_energies
return LobsterDict
def _get_lobster_version(self, data):
for row in data:
splitrow = row.split()
if len(splitrow) > 1:
if splitrow[0] == "LOBSTER":
return splitrow[1]
def _has_bandoverlaps(self, data):
if 'WARNING: I dumped the band overlap matrices to the file bandOverlaps.lobster.' in data:
return True
else:
return False
def _starts_from_projection(self, data):
if 'loading projection from projectionData.lobster...' in data:
return True
else:
return False
def _has_DOSCAR(self, data):
if 'writing DOSCAR.lobster...' in data and 'SKIPPING writing DOSCAR.lobster...' not in data:
return True
else:
return False
def _has_COOPCAR(self, data):
if 'writing COOPCAR.lobster and ICOOPLIST.lobster...' in data and \
'SKIPPING writing COOPCAR.lobster and ICOOPLIST.lobster...' not in data:
return True
else:
return False
def _has_COHPCAR(self, data):
if 'writing COHPCAR.lobster and ICOHPLIST.lobster...' in data and \
'SKIPPING writing COHPCAR.lobster and ICOHPLIST.lobster...' not in data:
return True
else:
return False
def _has_CHARGE(self, data):
# weitere optionen testen -> auch hier kann uebersprungen werden
if 'SKIPPING writing CHARGE.lobster...' not in data:
return True
else:
return False
def _has_grosspopulation(self, data):
if 'writing CHARGE.lobster and GROSSPOP.lobster...' in data:
return True
else:
return False
def _has_projection(self, data):
if 'saving projection to projectionData.lobster...' in data:
return True
else:
return False
def _has_fatband(self, data):
for row in data:
splitrow = row.split()
if len(splitrow) > 1:
if splitrow[1] == 'FatBand':
return True
return False
def _has_density_of_energies(self, data):
if "writing DensityOfEnergy.lobster..." in data:
return True
else:
return False
def _get_dft_program(self, data):
for row in data:
splitrow = row.split()
if len(splitrow) > 4:
if splitrow[3] == "program...":
return splitrow[4]
def _get_number_of_spins(self, data):
if "spillings for spin channel 2" in data:
return 2
else:
return 1
def _get_threads(self, data):
for row in data:
splitrow = row.split()
if len(splitrow) > 11:
if (splitrow[11]) == "threads" or (splitrow[11] == "thread"):
return splitrow[10]
def _get_spillings(self, data, number_of_spins):
charge_spilling = []
total_spilling = []
for row in data:
splitrow = row.split()
if len(splitrow) > 2:
if splitrow[2] == 'spilling:':
if splitrow[1] == 'charge':
charge_spilling.append(np.float(splitrow[3].replace('%', '')) / 100.0)
if splitrow[1] == 'total':
total_spilling.append(np.float(splitrow[3].replace('%', '')) / 100.0)
if len(charge_spilling) == number_of_spins and len(total_spilling) == number_of_spins:
break
return charge_spilling, total_spilling
def _get_elements_basistype_basisfunctions(self, data):
begin = False
end = False
elements = []
basistype = []
basisfunctions = []
for row in data:
if begin and not end:
splitrow = row.split()
if splitrow[0] not in ['INFO:', 'WARNING:', 'setting', 'calculating', 'post-processing', 'saving',
'spillings', 'writing']:
elements.append(splitrow[0])
basistype.append(splitrow[1].replace('(', '').replace(')', ''))
# last sign is a ''
basisfunctions.append(splitrow[2:])
else:
end = True
if "setting up local basis functions..." in row:
begin = True
return elements, basistype, basisfunctions
def _get_timing(self, data):
# will give back wall, user and sys time
begin = False
# end=False
# time=[]
for row in data:
splitrow = row.split()
if 'finished' in splitrow:
begin = True
if begin:
if 'wall' in splitrow:
wall_time = (splitrow[2:10])
if 'user' in splitrow:
user_time = (splitrow[0:8])
if 'sys' in splitrow:
sys_time = (splitrow[0:8])
wall_time_dict = {"h": wall_time[0], "min": wall_time[2], "s": wall_time[4], "ms": wall_time[6]}
user_time_dict = {"h": user_time[0], "min": user_time[2], "s": user_time[4], "ms": user_time[6]}
sys_time_dict = {"h": sys_time[0], "min": sys_time[2], "s": sys_time[4], "ms": sys_time[6]}
return wall_time_dict, user_time_dict, sys_time_dict
def _get_warning_orthonormalization(self, data):
orthowarning = []
for row in data:
splitrow = row.split()
if 'orthonormalized' in splitrow:
orthowarning.append(" ".join(splitrow[1:]))
return orthowarning
def _get_all_warning_lines(self, data):
warnings = []
for row in data:
splitrow = row.split()
if len(splitrow) > 0:
if splitrow[0] == 'WARNING:':
warnings.append(" ".join(splitrow[1:]))
return warnings
def _get_all_info_lines(self, data):
infos = []
for row in data:
splitrow = row.split()
if len(splitrow) > 0:
if splitrow[0] == 'INFO:':
infos.append(" ".join(splitrow[1:]))
return infos
class Fatband:
"""
Reads in FATBAND_x_y.lobster files
.. attribute: efermi
efermi that was read in from vasprun.xml
.. attribute: eigenvals
{Spin.up:[][],Spin.down:[][]}, the first index of the array
[][] refers to the band and the second to the index of the
kpoint. The kpoints are ordered according to the order of the
kpoints array. If the band structure is not spin polarized, we
only store one data set under Spin.up.
.. attribute: is_spinpolarized
Boolean that tells you whether this was a spin-polarized calculation
.. attribute: kpoints_array
list of kpoint as numpy arrays, in frac_coords of the given lattice by default
.. attribute: label_dict
(dict) of {} this link a kpoint (in frac coords or cartesian coordinates depending on the coords).
.. attribute: lattice
lattice object of reciprocal lattice as read in from vasprun.xml
.. attribute: nbands
number of bands used in the calculation
.. attribute: p_eigenvals
dict of orbital projections as {spin: array of dict}.
The indices of the array are [band_index, kpoint_index].
The dict is then built the following way:
{"string of element": "string of orbital as read in from FATBAND file"}
If the band structure is not spin polarized, we only store one data set under Spin.up.
.. attribute: structure
structure read in from vasprun.xml
"""
def __init__(self, filenames=".", vasprun='vasprun.xml', Kpointsfile='KPOINTS'):
"""
Args:
filenames (list or string): can be a list of file names or a path to a folder folder from which all
"FATBAND_*" files will be read
vasprun: corresponding vasprun file
Kpointsfile: KPOINTS file for bandstructure calculation, typically "KPOINTS"
"""
warnings.warn('Make sure all relevant FATBAND files were generated and read in!')
warnings.warn('Use Lobster 3.2.0 or newer for fatband calculations!')
VASPRUN = Vasprun(filename=vasprun, ionic_step_skip=None,
ionic_step_offset=0, parse_dos=True,
parse_eigen=False, parse_projected_eigen=False,
parse_potcar_file=False, occu_tol=1e-8,
exception_on_bad_xml=True)
self.structure = VASPRUN.final_structure
self.lattice = self.structure.lattice.reciprocal_lattice
self.efermi = VASPRUN.efermi
kpoints_object = Kpoints.from_file(Kpointsfile)
atomtype = []
atomnames = []
orbital_names = []
if not isinstance(filenames, list) or filenames is None:
filenames_new = []
if filenames is None:
filenames = '.'
for file in os.listdir(filenames):
if fnmatch.fnmatch(file, 'FATBAND_*.lobster'):
filenames_new.append(os.path.join(filenames, file))
filenames = filenames_new
if len(filenames) == 0:
raise ValueError("No FATBAND files in folder or given")
for ifilename, filename in enumerate(filenames):
with zopen(filename, "rt") as f:
contents = f.read().split("\n")
# TODO: could be replaced for future versions of Lobster, get atomname from filename
atomnames.append(os.path.split(filename)[1].split('_')[1].capitalize())
parameters = contents[0].split()
atomtype.append(re.split(r"[0-9]+", parameters[3])[0].capitalize())
orbital_names.append(parameters[4])
# get atomtype orbital dict
atom_orbital_dict = {}
for iatom, atom in enumerate(atomnames):
if atom not in atom_orbital_dict:
atom_orbital_dict[atom] = []
atom_orbital_dict[atom].append(orbital_names[iatom])
# test if there are the same orbitals twice or if two different formats were used or if all necessary orbitals
# are there
for key, items in atom_orbital_dict.items():
if len(set(items)) != len(items):
raise (ValueError("The are two FATBAND files for the same atom and orbital. The program will stop."))
split = []
for item in items:
split.append(item.split("_")[0])
for orb, number in collections.Counter(split).items():
if number != 1 and number != 3 and number != 5 and number != 7:
raise ValueError(
"Make sure all relevant orbitals were generated and that no duplicates (2p and 2p_x) are "
"present")
kpoints_array = []
for ifilename, filename in enumerate(filenames):
with zopen(filename, "rt") as f:
contents = f.read().split("\n")
if ifilename == 0:
self.nbands = int(parameters[6])
self.number_kpts = kpoints_object.num_kpts - int(contents[1].split()[2]) + 1
if len(contents[1:]) == self.nbands + 2:
self.is_spinpolarized = False
elif len(contents[1:]) == self.nbands * 2 + 2:
self.is_spinpolarized = True
else:
linenumbers = []
for iline, line in enumerate(contents[1:self.nbands * 2 + 4]):
if line.split()[0] == '#':
linenumbers.append(iline)
if ifilename == 0:
if len(linenumbers) == 2:
self.is_spinpolarized = True
else:
self.is_spinpolarized = False
if ifilename == 0:
eigenvals = {}
eigenvals[Spin.up] = [[collections.defaultdict(float)
for i in range(self.number_kpts)]
for j in range(self.nbands)]
if self.is_spinpolarized:
eigenvals[Spin.down] = [[collections.defaultdict(float)
for i in range(self.number_kpts)]
for j in range(self.nbands)]
p_eigenvals = {}
p_eigenvals[Spin.up] = [
[{str(e): {str(orb): collections.defaultdict(float) for orb in atom_orbital_dict[e]}
for e in atomnames}
for i in range(self.number_kpts)]
for j in range(self.nbands)]
if self.is_spinpolarized:
p_eigenvals[Spin.down] = [
[{str(e): {str(orb): collections.defaultdict(float) for orb in atom_orbital_dict[e]}
for e in atomnames}
for i in range(self.number_kpts)]
for j in range(self.nbands)]
ikpoint = -1
for iline, line in enumerate(contents[1:-1]):
if line.split()[0] == '#':
KPOINT = np.array([float(line.split()[4]), float(line.split()[5]), float(line.split()[6])])
if ifilename == 0:
kpoints_array.append(KPOINT)
linenumber = 0
iband = 0
ikpoint += 1
if linenumber == self.nbands:
iband = 0
if line.split()[0] != '#':
if linenumber < self.nbands:
if ifilename == 0:
eigenvals[Spin.up][iband][ikpoint] = float(line.split()[1]) + self.efermi
p_eigenvals[Spin.up][iband][ikpoint][atomnames[ifilename]][orbital_names[ifilename]] = float(
line.split()[2])
if linenumber >= self.nbands and self.is_spinpolarized:
if ifilename == 0:
eigenvals[Spin.down][iband][ikpoint] = float(line.split()[1]) + self.efermi
p_eigenvals[Spin.down][iband][ikpoint][atomnames[ifilename]][
orbital_names[ifilename]] = float(line.split()[2])
linenumber += 1
iband += 1
self.kpoints_array = kpoints_array
self.eigenvals = eigenvals
self.p_eigenvals = p_eigenvals
label_dict = {}
for ilabel, label in enumerate(kpoints_object.labels[-self.number_kpts:], start=0):
if label is not None:
label_dict[label] = kpoints_array[ilabel]
self.label_dict = label_dict
def get_bandstructure(self):
"""
returns a LobsterBandStructureSymmLine object which can be plotted with a normal BSPlotter
"""
return LobsterBandStructureSymmLine(kpoints=self.kpoints_array, eigenvals=self.eigenvals, lattice=self.lattice,
efermi=self.efermi, labels_dict=self.label_dict,
structure=self.structure,
projections=self.p_eigenvals)
class Lobsterin(dict, MSONable):
"""
This class can handle and generate lobsterin files
Furthermore, it can also modify INCAR files for lobster, generate KPOINT files for fatband calculations in Lobster,
and generate the standard primitive cells in a POSCAR file that are needed for the fatband calculations.
There are also several standard lobsterin files that can be easily generated.
"""
# all keywords known to this class so far
# reminder: lobster is not case sensitive
AVAILABLEKEYWORDS = ['COHPstartEnergy', 'COHPendEnergy', 'basisSet', 'cohpGenerator',
'gaussianSmearingWidth', 'saveProjectionToFile', 'basisfunctions', 'skipdos',
'skipcohp', 'skipcoop', 'skipPopulationAnalysis', 'skipGrossPopulation',
'userecommendedbasisfunctions', 'loadProjectionFromFile', 'forceEnergyRange',
'DensityOfEnergy', 'BWDF', 'BWDFCOHP', 'skipProjection', 'createFatband',
'writeBasisFunctions', 'writeMatricesToFile', 'realspaceHamiltonian',
'realspaceOverlap', 'printPAWRealSpaceWavefunction', 'printLCAORealSpaceWavefunction',
'noFFTforVisualization', 'RMSp', 'onlyReadVasprun.xml', 'noMemoryMappedFiles',
'skipPAWOrthonormalityTest', 'doNotIgnoreExcessiveBands', 'doNotUseAbsoluteSpilling',
'skipReOrthonormalization', 'forceV1HMatrix', 'useOriginalTetrahedronMethod',
'useDecimalPlaces', 'kSpaceCOHP']
# keyword + one float can be used in file
FLOATKEYWORDS = ['COHPstartEnergy', 'COHPendEnergy', 'gaussianSmearingWidth', 'useDecimalPlaces', 'COHPSteps']
# one of these keywords +endstring can be used in file
STRINGKEYWORDS = ['basisSet', 'cohpGenerator', 'realspaceHamiltonian', 'realspaceOverlap',
'printPAWRealSpaceWavefunction', 'printLCAORealSpaceWavefunction', 'kSpaceCOHP']
# the keyword alone will turn on or off a function
BOOLEANKEYWORDS = ['saveProjectionToFile', 'skipdos', 'skipcohp', 'skipcoop', 'loadProjectionFromFile',
'forceEnergyRange', 'DensityOfEnergy', 'BWDF', 'BWDFCOHP', 'skipPopulationAnalysis',
'skipGrossPopulation', 'userecommendedbasisfunctions', 'skipProjection',
'writeBasisFunctions', 'writeMatricesToFile', 'noFFTforVisualization', 'RMSp',
'onlyReadVasprun.xml', 'noMemoryMappedFiles', 'skipPAWOrthonormalityTest',
'doNotIgnoreExcessiveBands', 'doNotUseAbsoluteSpilling', 'skipReOrthonormalization',
'forceV1HMatrix', 'useOriginalTetrahedronMethod', 'forceEnergyRange', 'bandwiseSpilling',
'kpointwiseSpilling']
# several of these keywords + ending can be used in a lobsterin file:
LISTKEYWORDS = ['basisfunctions', 'cohpbetween', 'createFatband']
def __init__(self, settingsdict: dict):
"""
Args:
settingsdict: dict to initialize Lobsterin
"""
super().__init__()
# check for duplicates
listkey = [key.lower() for key in settingsdict.keys()]
if len(listkey) != len(list(set(listkey))):
raise IOError("There are duplicates for the keywords! The program will stop here.")
self.update(settingsdict)
def __setitem__(self, key, val):
"""
Add parameter-val pair to Lobsterin. Warns if parameter is not in list of
valid lobsterintags. Also cleans the parameter and val by stripping
leading and trailing white spaces. Similar to INCAR class.
"""
# due to the missing case sensitivity of lobster, the following code is neccessary
found = False
for key_here in self.keys():
if key.strip().lower() == key_here.lower():
new_key = key_here
found = True
if not found:
new_key = key
if new_key.lower() not in [element.lower() for element in Lobsterin.AVAILABLEKEYWORDS]:
raise (ValueError("Key is currently not available"))
super().__setitem__(new_key, val.strip() if isinstance(val, str) else val)
def __getitem__(self, item):
"""
implements getitem from dict to avoid problems with cases
"""
found = False
for key_here in self.keys():
if item.strip().lower() == key_here.lower():
new_key = key_here
found = True
if not found:
new_key = item
val = dict.__getitem__(self, new_key)
return val
def diff(self, other):
"""
Diff function for lobsterin. Compares two lobsterin and indicates which parameters are the same.
Similar to the diff in INCAR.
Args:
other (Lobsterin): Lobsterin object to compare to
Returns:
dict with differences and similarities
"""
similar_param = {}
different_param = {}
key_list_others = [element.lower() for element in other.keys()]
for k1, v1 in self.items():
k1lower = k1.lower()
if k1lower not in key_list_others:
different_param[k1.upper()] = {"lobsterin1": v1, "lobsterin2": None}
else:
for key_here in other.keys():
if k1.lower() == key_here.lower():
new_key = key_here
if isinstance(v1, str):
if v1.strip().lower() != other[new_key].strip().lower():
different_param[k1.upper()] = {"lobsterin1": v1, "lobsterin2": other[new_key]}
else:
similar_param[k1.upper()] = v1
elif isinstance(v1, list):
new_set1 = set([element.strip().lower() for element in v1])
new_set2 = set([element.strip().lower() for element in other[new_key]])
if new_set1 != new_set2:
different_param[k1.upper()] = {"lobsterin1": v1, "lobsterin2": other[new_key]}
else:
if v1 != other[new_key]:
different_param[k1.upper()] = {"lobsterin1": v1, "lobsterin2": other[new_key]}
else:
similar_param[k1.upper()] = v1
for k2, v2 in other.items():
if k2.upper() not in similar_param and k2.upper() not in different_param:
for key_here in self.keys():
if k2.lower() == key_here.lower():
new_key = key_here
else:
new_key = k2
if new_key not in self:
different_param[k2.upper()] = {"lobsterin1": None, "lobsterin2": v2}
return {"Same": similar_param, "Different": different_param}
def _get_nbands(self, structure: Structure):
"""
get number of nbands
"""
if self.get("basisfunctions") is None:
raise IOError("No basis functions are provided. The program cannot calculate nbands.")
else:
basis_functions = [] # type: List[str]
for string_basis in self["basisfunctions"]:
# string_basis.lstrip()
string_basis_raw = string_basis.strip().split(" ")
while "" in string_basis_raw:
string_basis_raw.remove("")
for i in range(0, int(structure.composition.element_composition[string_basis_raw[0]])):
basis_functions.extend(string_basis_raw[1:])
no_basis_functions = 0
for basis in basis_functions:
if "s" in basis:
no_basis_functions = no_basis_functions + 1
elif "p" in basis:
no_basis_functions = no_basis_functions + 3
elif "d" in basis:
no_basis_functions = no_basis_functions + 5
elif "f" in basis:
no_basis_functions = no_basis_functions + 7
return int(no_basis_functions)
def write_lobsterin(self, path="lobsterin", overwritedict=None):
"""
writes a lobsterin file
Args:
path (str): filename of the lobsterin file that will be written
overwritedict (dict): dict that can be used to overwrite lobsterin, e.g. {"skipdos": True}
"""
# will overwrite previous entries
# has to search first if entry is already in Lobsterindict (due to case insensitivity)
if overwritedict is not None:
for key, entry in overwritedict.items():
found = False
for key2 in self.keys():
if key.lower() == key2.lower():
self.get[key2] = entry
found = True
if not found:
self.get[key] = entry
filename = path
with open(filename, 'w') as f:
for key in Lobsterin.AVAILABLEKEYWORDS:
if key.lower() in [element.lower() for element in self.keys()]:
if key.lower() in [element.lower() for element in Lobsterin.FLOATKEYWORDS]:
f.write(key + ' ' + str(self.get(key)) + '\n')
elif key.lower() in [element.lower() for element in Lobsterin.BOOLEANKEYWORDS]:
# checks if entry is True or False
for key_here in self.keys():
if key.lower() == key_here.lower():
new_key = key_here
if self.get(new_key):
f.write(key + '\n')
elif key.lower() in [element.lower() for element in Lobsterin.STRINGKEYWORDS]:
f.write(key + ' ' + str(self.get(key) + '\n'))
elif key.lower() in [element.lower() for element in Lobsterin.LISTKEYWORDS]:
for entry in self.get(key):
f.write(key + ' ' + str(entry) + '\n')
def as_dict(self):
"""
:return: MSONable dict
"""
d = dict(self)
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: Lobsterin
"""
return Lobsterin({k: v for k, v in d.items() if k not in ["@module",
"@class"]})
def write_INCAR(self, incar_input: str = "INCAR", incar_output: str = "INCAR.lobster", poscar_input: str = "POSCAR",
isym: int = -1,
further_settings: dict = None):
"""
Will only make the run static, insert nbands, make ISYM=-1, set LWAVE=True and write a new INCAR.
You have to check for the rest.
Args:
incar_input (str): path to input INCAR
incar_output (str): path to output INCAR
poscar_input (str): path to input POSCAR
isym (int): isym equal to -1 or 0 are possible. Current Lobster version only allow -1.
further_settings (dict): A dict can be used to include further settings, e.g. {"ISMEAR":-5}
"""
# reads old incar from file, this one will be modified
incar = Incar.from_file(incar_input)
warnings.warn("Please check your incar_input before using it. This method only changes three settings!")
if isym == -1:
incar["ISYM"] = -1
elif isym == 0:
incar["ISYM"] = 0
else:
ValueError("isym has to be -1 or 0.")
incar["NSW"] = 0
incar["LWAVE"] = True
# get nbands from _get_nbands (use basis set that is inserted)
incar["NBANDS"] = self._get_nbands(Structure.from_file(poscar_input))
if further_settings is not None:
for key, item in further_settings.items():
incar[key] = further_settings[key]
# print it to file
incar.write_file(incar_output)
@staticmethod
def _get_basis(structure: Structure, potcar_symbols: list,
address_basis_file: str = os.path.join(MODULE_DIR, "BASIS_PBE_54.yaml")):
"""
will get the basis from given potcar_symbols (e.g., ["Fe_pv","Si"]
#include this in lobsterin class
Args:
structure (Structure): Structure object
potcar_symbols: list of potcar symbols
Returns:
returns basis
"""
Potcar_names = [name for name in potcar_symbols]
AtomTypes_Potcar = [name.split('_')[0] for name in Potcar_names]
AtomTypes = structure.symbol_set
if set(AtomTypes) != set(AtomTypes_Potcar):
raise IOError("Your POSCAR does not correspond to your POTCAR!")
BASIS = loadfn(address_basis_file)['BASIS']
basis_functions = []
list_forin = []
for itype, type in enumerate(Potcar_names):
if type not in BASIS:
raise ValueError("You have to provide the basis for" + str(
type) + "manually. We don't have any information on this POTCAR.")
basis_functions.append(BASIS[type].split())
tojoin = str(AtomTypes_Potcar[itype]) + " "
tojoin2 = "".join(str(str(e) + " ") for e in BASIS[type].split())
list_forin.append(str(tojoin + tojoin2))
return list_forin
@staticmethod
def write_POSCAR_with_standard_primitive(POSCAR_input="POSCAR", POSCAR_output="POSCAR.lobster", symprec=0.01):
"""
writes a POSCAR with the standard primitive cell. This is needed to arrive at the correct kpath
Args:
POSCAR_input (str): filename of input POSCAR
POSCAR_output (str): filename of output POSCAR
symprec (float): precision to find symmetry
"""
structure = Structure.from_file(POSCAR_input)
kpath = HighSymmKpath(structure, symprec=symprec)
new_structure = kpath.prim
new_structure.to(fmt='POSCAR', filename=POSCAR_output)
@staticmethod
def write_KPOINTS(POSCAR_input: str = "POSCAR", KPOINTS_output="KPOINTS.lobster", reciprocal_density: int = 100,
isym: int = -1, from_grid: bool = False, input_grid: list = [5, 5, 5], line_mode: bool = True,
kpoints_line_density: int = 20, symprec: float = 0.01):
"""
writes a KPOINT file for lobster (only ISYM=-1 and ISYM=0 are possible), grids are gamma centered
Args:
POSCAR_input (str): path to POSCAR
KPOINTS_output (str): path to output KPOINTS
reciprocal_density (int): Grid density
isym (int): either -1 or 0. Current Lobster versions only allow -1.
from_grid (bool): If True KPOINTS will be generated with the help of a grid given in input_grid. Otherwise,
they will be generated from the reciprocal_density
input_grid (list): grid to generate the KPOINTS file
line_mode (bool): If True, band structure will be generated
kpoints_line_density (int): density of the lines in the band structure
symprec (float): precision to determine symmetry
"""
structure = Structure.from_file(POSCAR_input)
# should this really be static? -> make it similar to INCAR?
if not from_grid:
kpointgrid = Kpoints.automatic_density_by_vol(structure, reciprocal_density).kpts
mesh = kpointgrid[0]
else:
mesh = input_grid
# The following code is taken from: SpacegroupAnalyzer
# we need to switch off symmetry here
latt = structure.lattice.matrix
positions = structure.frac_coords
unique_species = [] # type: List[Any]
zs = []
magmoms = []
for species, g in itertools.groupby(structure,
key=lambda s: s.species):
if species in unique_species:
ind = unique_species.index(species)
zs.extend([ind + 1] * len(tuple(g)))
else:
unique_species.append(species)
zs.extend([len(unique_species)] * len(tuple(g)))
for site in structure:
if hasattr(site, 'magmom'):
magmoms.append(site.magmom)
elif site.is_ordered and hasattr(site.specie, 'spin'):
magmoms.append(site.specie.spin)
else:
magmoms.append(0)
# For now, we are setting magmom to zero. (Taken from INCAR class)
cell = latt, positions, zs, magmoms
# TODO: what about this shift?
mapping, grid = spglib.get_ir_reciprocal_mesh(mesh, cell, is_shift=[0, 0, 0])
# exit()
# get the kpoints for the grid
if isym == -1:
kpts = []
weights = []
all_labels = []
for gp in grid:
kpts.append(gp.astype(float) / mesh)
weights.append(float(1))
all_labels.append("")
elif isym == 0:
# time reversal symmetry: k and -k are equivalent
kpts = []
weights = []
all_labels = []
newlist = [list(gp) for gp in list(grid)]
mapping = []
for gp in newlist:
minusgp = [-k for k in gp]
if minusgp in newlist and minusgp not in [[0, 0, 0]]:
mapping.append(newlist.index(minusgp))
else:
mapping.append(newlist.index(gp))
for igp, gp in enumerate(newlist):
if mapping[igp] > igp:
kpts.append(np.array(gp).astype(float) / mesh)
weights.append(float(2))
all_labels.append("")
elif mapping[igp] == igp:
kpts.append(np.array(gp).astype(float) / mesh)
weights.append(float(1))
all_labels.append("")
else:
ValueError("Only isym=-1 and isym=0 are allowed.")
# line mode
if line_mode:
kpath = HighSymmKpath(structure, symprec=symprec)
if not np.allclose(kpath.prim.lattice.matrix, structure.lattice.matrix):
raise ValueError(
"You are not using the standard primitive cell. The k-path is not correct. Please generate a "
"standard primitive cell first.")
frac_k_points, labels = kpath.get_kpoints(
line_density=kpoints_line_density,
coords_are_cartesian=False)
for k in range(len(frac_k_points)):
kpts.append(frac_k_points[k])
weights.append(0.0)
all_labels.append(labels[k])
if isym == -1:
comment = (
"ISYM=-1, grid: " + str(mesh) if not line_mode else "ISYM=-1, grid: " + str(mesh) + " plus kpoint path")
elif isym == 0:
comment = (
"ISYM=0, grid: " + str(mesh) if not line_mode else "ISYM=0, grid: " + str(mesh) + " plus kpoint path")
KpointObject = Kpoints(comment=comment,
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(kpts), kpts=kpts, kpts_weights=weights,
labels=all_labels)
KpointObject.write_file(filename=KPOINTS_output)
@classmethod
def from_file(cls, lobsterin: str):
"""
Args:
lobsterin (str): path to lobsterin
Returns:
Lobsterin object
"""
with zopen(lobsterin, 'rt') as f:
data = f.read().split("\n")
if len(data) == 0:
raise IOError("lobsterin file contains no data.")
Lobsterindict = {} # type: Dict
for datum in data:
# will remove all commments to avoid complications
raw_datum = datum.split('!')[0]
raw_datum = raw_datum.split('//')[0]
raw_datum = raw_datum.split('#')[0]
raw_datum = raw_datum.split(' ')
while "" in raw_datum:
raw_datum.remove("")
if len(raw_datum) > 1:
# check which type of keyword this is, handle accordingly
if raw_datum[0].lower() not in [datum2.lower() for datum2 in Lobsterin.LISTKEYWORDS]:
if raw_datum[0].lower() not in [datum2.lower() for datum2 in Lobsterin.FLOATKEYWORDS]:
if raw_datum[0].lower() not in Lobsterindict:
Lobsterindict[raw_datum[0].lower()] = " ".join(raw_datum[1:])
else:
raise ValueError("Same keyword " + str(raw_datum[0].lower()) + "twice!")
else:
if raw_datum[0].lower() not in Lobsterindict:
Lobsterindict[raw_datum[0].lower()] = float(raw_datum[1])
else:
raise ValueError("Same keyword " + str(raw_datum[0].lower()) + "twice!")
else:
if raw_datum[0].lower() not in Lobsterindict:
Lobsterindict[raw_datum[0].lower()] = [" ".join(raw_datum[1:])]
else:
Lobsterindict[raw_datum[0].lower()].append(" ".join(raw_datum[1:]))
elif len(raw_datum) > 0:
Lobsterindict[raw_datum[0].lower()] = True
return cls(Lobsterindict)
@staticmethod
def _get_potcar_symbols(POTCAR_input: str) -> list:
"""
will return the name of the species in the POTCAR
Args:
POTCAR_input(str): string to potcar file
Returns:
list of the names of the species in string format
"""
potcar = Potcar.from_file(POTCAR_input)
for pot in potcar:
if pot.potential_type != "PAW":
raise IOError("Lobster only works with PAW! Use different POTCARs")
if potcar.functional != "PBE":
raise IOError("We only have BASIS options for PBE so far")
Potcar_names = [name["symbol"] for name in potcar.spec]
return Potcar_names
@classmethod
def standard_calculations_from_vasp_files(cls, POSCAR_input: str = "POSCAR", INCAR_input: str = "INCAR",
POTCAR_input: Optional[str] = None,
dict_for_basis: Optional[dict] = None,
option: str = 'standard'):
"""
will generate Lobsterin with standard settings
Args:
POSCAR_input(str): path to POSCAR
INCAR_input(str): path to INCAR
POTCAR_input (str): path to POTCAR
dict_for_basis (dict): can be provided: it should look the following:
dict_for_basis={"Fe":'3p 3d 4s 4f', "C": '2s 2p'} and will overwrite all settings from POTCAR_input
option (str): 'standard' will start a normal lobster run where COHPs, COOPs, DOS, CHARGE etc. will be
calculated
'standard_from_projection' will start a normal lobster run from a projection
'standard_with_fatband' will do a fatband calculation, run over all orbitals
'onlyprojection' will only do a projection
'onlydos' will only calculate a projected dos
'onlycohp' will only calculate cohp
'onlycoop' will only calculate coop
'onlycohpcoop' will only calculate cohp and coop
Returns:
Lobsterin Object with standard settings
"""
warnings.warn(
"Always check and test the provided basis functions. The spilling of your Lobster calculation might help")
# warn that fatband calc cannot be done with tetrahedron method at the moment
if option not in ['standard', 'standard_from_projection', 'standard_with_fatband', 'onlyprojection', 'onlydos',
'onlycohp', 'onlycoop', 'onlycohpcoop']:
raise ValueError("The option is not valid!")
Lobsterindict = {} # type: Dict[Any,Any]
# this basis set covers most elements
Lobsterindict['basisSet'] = 'pbeVaspFit2015'
# energies around e-fermi
Lobsterindict['COHPstartEnergy'] = -15.0
Lobsterindict['COHPendEnergy'] = 5.0
if option in ['standard', 'onlycohp', 'onlycoop', 'onlycohpcoop', 'standard_with_fatband']:
# every interaction with a distance of 6.0 is checked
Lobsterindict['cohpGenerator'] = "from 0.1 to 6.0 orbitalwise"
# the projection is saved
Lobsterindict['saveProjectionToFile'] = True
if option == 'standard_from_projection':
Lobsterindict['cohpGenerator'] = "from 0.1 to 6.0 orbitalwise"
Lobsterindict['loadProjectionFromFile'] = True
if option == 'onlycohp':
Lobsterindict['skipdos'] = True
Lobsterindict['skipcoop'] = True
Lobsterindict['skipPopulationAnalysis'] = True
Lobsterindict['skipGrossPopulation'] = True
if option == 'onlycoop':
Lobsterindict['skipdos'] = True
Lobsterindict['skipcohp'] = True
Lobsterindict['skipPopulationAnalysis'] = True
Lobsterindict['skipGrossPopulation'] = True
if option == 'onlycohpcoop':
Lobsterindict['skipdos'] = True
Lobsterindict['skipPopulationAnalysis'] = True
Lobsterindict['skipGrossPopulation'] = True
if option == 'onlydos':
Lobsterindict['skipcohp'] = True
Lobsterindict['skipcoop'] = True
Lobsterindict['skipPopulationAnalysis'] = True
Lobsterindict['skipGrossPopulation'] = True
if option == 'onlyprojection':
Lobsterindict['skipdos'] = True
Lobsterindict['skipcohp'] = True
Lobsterindict['skipcoop'] = True
Lobsterindict['skipPopulationAnalysis'] = True
Lobsterindict['skipGrossPopulation'] = True
Lobsterindict['saveProjectionToFile'] = True
incar = Incar.from_file(INCAR_input)
if incar["ISMEAR"] == 0:
Lobsterindict['gaussianSmearingWidth'] = incar["SIGMA"]
if incar["ISMEAR"] != 0 and option == "standard_with_fatband":
raise ValueError("ISMEAR has to be 0 for a fatband calculation with Lobster")
if dict_for_basis is not None:
# dict_for_basis={"Fe":'3p 3d 4s 4f', "C": '2s 2p'}
# will just insert this basis and not check with poscar
basis = [key + ' ' + value for key, value in dict_for_basis.items()]
elif POTCAR_input is not None:
# get basis from POTCAR
potcar_names = Lobsterin._get_potcar_symbols(POTCAR_input=POTCAR_input)
basis = Lobsterin._get_basis(structure=Structure.from_file(POSCAR_input),
potcar_symbols=potcar_names)
else:
raise ValueError("basis cannot be generated")
Lobsterindict["basisfunctions"] = basis
if option == 'standard_with_fatband':
Lobsterindict['createFatband'] = basis
return cls(Lobsterindict)
class Bandoverlaps:
"""
Class to read in bandOverlaps.lobster files. These files are not created during every Lobster run.
.. attribute: bandoverlapsdict is a dict of the following form:
{spin:{"kpoint as string": {"maxDeviation": float that describes the max deviation, "matrix": 2D
array of the size number of bands times number of bands including the overlap matrices with } }}
.. attribute: maxDeviation is a list of floats describing the maximal Deviation for each problematic kpoint
"""
def __init__(self, filename: str = "bandOverlaps.lobster"):
"""
Args:
filename: filename of the "bandOverlaps.lobster" file
"""
with zopen(filename, "rt") as f:
contents = f.read().split("\n")
self._read(contents)
def _read(self, contents: list):
"""
will read in all contents of the file
Args:
contents: list of strings
"""
self.bandoverlapsdict = {} # type: Dict
self.max_deviation = [] # type: List
# This has to be done like this because there can be different numbers of problematic k-points per spin
for line in contents:
if "Overlap Matrix (abs) of the orthonormalized projected bands for spin 0" in line:
spin = Spin.up
elif "Overlap Matrix (abs) of the orthonormalized projected bands for spin 1" in line:
spin = Spin.down
elif "k-point" in line:
kpoint = line.split(" ")
kpoint_array = []
for kpointel in kpoint:
if kpointel not in ["at", "k-point", ""]:
kpoint_array.append(str(kpointel))
elif "maxDeviation" in line:
if spin not in self.bandoverlapsdict:
self.bandoverlapsdict[spin] = {}
if not " ".join(kpoint_array) in self.bandoverlapsdict[spin]:
self.bandoverlapsdict[spin][" ".join(kpoint_array)] = {}
maxdev = line.split(" ")[2]
self.bandoverlapsdict[spin][" ".join(kpoint_array)]["maxDeviation"] = float(maxdev)
self.max_deviation.append(float(maxdev))
self.bandoverlapsdict[spin][" ".join(kpoint_array)]["matrix"] = []
else:
overlaps = []
for el in (line.split(" ")):
if el not in [""]:
overlaps.append(float(el))
self.bandoverlapsdict[spin][" ".join(kpoint_array)]["matrix"].append(overlaps)
def has_good_quality_maxDeviation(self, limit_maxDeviation: float = 0.1) -> bool:
"""
will check if the maxDeviation from the ideal bandoverlap is smaller or equal to limit_maxDeviation
Args:
limit_maxDeviation: limit of the maxDeviation
Returns:
Boolean that will give you information about the quality of the projection
"""
for deviation in self.max_deviation:
if deviation > limit_maxDeviation:
return False
return True
def has_good_quality_check_occupied_bands(self, number_occ_bands_spin_up: int,
number_occ_bands_spin_down: Optional[int] = None,
spin_polarized: bool = False, limit_deviation: float = 0.1) -> bool:
"""
will check if the deviation from the ideal bandoverlap of all occupied bands is smaller or equal to
limit_deviation
Args:
number_occ_bands_spin_up (int): number of occupied bands of spin up
number_occ_bands_spin_down (int): number of occupied bands of spin down
spin_polarized (bool): If True, then it was a spin polarized calculation
limit_deviation (float): limit of the maxDeviation
Returns:
Boolean that will give you information about the quality of the projection
"""
for matrix in self.bandoverlapsdict[Spin.up].values():
for iband1, band1 in enumerate(matrix["matrix"]):
for iband2, band2 in enumerate(band1):
if iband1 < number_occ_bands_spin_up and iband2 < number_occ_bands_spin_up:
if iband1 == iband2:
if abs(band2 - 1.0) > limit_deviation:
return False
else:
if band2 > limit_deviation:
return False
if spin_polarized:
for matrix in self.bandoverlapsdict[Spin.down].values():
for iband1, band1 in enumerate(matrix["matrix"]):
for iband2, band2 in enumerate(band1):
if number_occ_bands_spin_down is not None:
if iband1 < number_occ_bands_spin_down and iband2 < number_occ_bands_spin_down:
if iband1 == iband2:
if abs(band2 - 1.0) > limit_deviation:
return False
else:
if band2 > limit_deviation:
return False
else:
ValueError("number_occ_bands_spin_down has to be specified")
return True
class Grosspop:
"""
Class to read in GROSSPOP.lobster files.
.. attribute: list_dict_grosspop
which is a list of dicts including all information about the grosspopulations, one sample dict looks like this:
{'element': 'O', 'Mulliken GP': {'2s': '1.80', '2p_y': '1.83', '2p_z': '1.79', '2p_x': '1.75', 'total': '7.18'},
'Loewdin GP': {'2s': '1.60', '2p_y': '1.82', '2p_z': '1.77', '2p_x': '1.73', 'total': '6.92'}}
The 0. entry of the list refers to the first atom in GROSSPOP.lobster and so on.
"""
def __init__(self, filename: str = "GROSSPOP.lobster"):
"""
Args:
filename: filename of the "GROSSPOP.lobster" file
"""
# opens file
with zopen(filename, "rt") as f:
contents = f.read().split("\n")
self.list_dict_grosspop = [] # type: List[Any]
# transfers content of file to list of dict
for line in contents[3:]:
cleanline = [i for i in line.split(" ") if not i == '']
if len(cleanline) == 5:
smalldict = {}
smalldict["element"] = cleanline[1]
smalldict["Mulliken GP"] = {}
smalldict["Loewdin GP"] = {}
smalldict["Mulliken GP"][cleanline[2]] = float(cleanline[3])
smalldict["Loewdin GP"][cleanline[2]] = float(cleanline[4])
elif len(cleanline) > 0:
smalldict["Mulliken GP"][cleanline[0]] = float(cleanline[1])
smalldict["Loewdin GP"][cleanline[0]] = float(cleanline[2])
if 'total' in cleanline[0]:
self.list_dict_grosspop.append(smalldict)
def get_structure_with_total_grosspop(self, structure_filename: str) -> Structure:
"""
get a Structure with Mulliken and Loewdin total grosspopulations as site properties
Args:
structure_filename (str): filename of POSCAR
Returns:
Structure Object with Mulliken and Loewdin total grosspopulations as site properties
"""
struct = Structure.from_file(structure_filename)
site_properties = {} # type: Dict[str, Any]
mullikengp = []
loewdingp = []
for grosspop in self.list_dict_grosspop:
mullikengp.append(grosspop["Mulliken GP"]["total"])
loewdingp.append(grosspop["Loewdin GP"]["total"])
site_properties = {"Total Mulliken GP": mullikengp, "Total Loewdin GP": loewdingp}
new_struct = struct.copy(site_properties=site_properties)
return new_struct
|
gVallverdu/pymatgen
|
pymatgen/io/lobster.py
|
Python
|
mit
| 79,306
|
[
"VASP",
"pymatgen"
] |
cdb0a42abc2cb9938c5a2fe7b7fb4ec2f5495286cf7ead7f04ddc022a24c3f7f
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
*******************************************
**DumpGRO** - IO Object
*******************************************
* `dump()`
write configuration to trajectory GRO file. By default filename is "out.gro",
coordinates are folded.
Properties
* `filename`
Name of trajectory file. By default trajectory file name is "out.gro"
* `unfolded`
False if coordinates are folded, True if unfolded. By default - False
* `append`
True if new trajectory data is appended to existing trajectory file. By default - True
* `length_factor`
If length dimension in current system is nm, and unit is 0.23 nm, for example, then
length_factor should be 0.23
* `length_unit`
It is length unit. Can be LJ, nm or A. By default - LJ
usage:
writing down trajectory
>>> dump_conf_gro = espresso.io.DumpGRO(system, integrator, filename='trajectory.gro')
>>> for i in range (200):
>>> integrator.run(10)
>>> dump_conf_gro.dump()
writing down trajectory using ExtAnalyze extension
>>> dump_conf_gro = espresso.io.DumpGRO(system, integrator, filename='trajectory.gro')
>>> ext_analyze = espresso.integrator.ExtAnalyze(dump_conf_gro, 10)
>>> integrator.addExtension(ext_analyze)
>>> integrator.run(2000)
Both exapmles will give the same result: 200 configurations in trajectory .gro file.
setting up length scale
For example, the Lennard-Jones model for liquid argon with :math:`\sigma=0.34 [nm]`
>>> dump_conf_gro = espresso.io.DumpGRO(system, integrator, filename='trj.gro', unfolded=False, length_factor=0.34, length_unit='nm', append=True)
will produce trj.gro with in nanometers
"""
from espresso.esutil import cxxinit
from espresso import pmi
from espresso.ParticleAccess import *
from _espresso import io_DumpGRO
class DumpGROLocal(ParticleAccessLocal, io_DumpGRO):
'The (local) storage of configurations.'
def __init__(self, system, integrator, filename='out.gro', unfolded=False, length_factor=1.0, length_unit='LJ', append=True):
cxxinit(self, io_DumpGRO, system, integrator, filename, unfolded, length_factor, length_unit, append)
def dump(self):
if not pmi._PMIComm or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.dump(self)
if pmi.isController :
class DumpGRO(ParticleAccess):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.io.DumpGROLocal',
pmicall = [ 'dump' ],
pmiproperty = ['filename', 'unfolded', 'length_factor', 'length_unit', 'append']
)
|
BackupTheBerlios/espressopp
|
src/io/DumpGRO.py
|
Python
|
gpl-3.0
| 3,344
|
[
"ESPResSo"
] |
63dc128beb24ceac653f28e92c702be4bbd244c10203633a3d4127d9c99ff787
|
#!/usr/bin/env python
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Written (W) 2013 Evangelos Anagnostopoulos
#
def parse_arguments():
import argparse
parser = argparse.ArgumentParser(description=
"Solve binary classification problems stored in libsvm format, "
"using Random Fourier features and SVMOcas")
parser.add_argument('--dataset', required=True, type=str,
help='Path to training dataset in LibSVM format.')
parser.add_argument('--testset', type=str,
help='Path to test dataset in LibSVM format.')
parser.add_argument('-D', default=300, type=int,
help='The number of samples to use')
parser.add_argument('-C', default=0.1, type=float,
help='SVMOcas regularization constant')
parser.add_argument('--epsilon', default=0.01, type=float,
help='SVMOcas epsilon parameter')
parser.add_argument('--width', default=8, type=float,
help='Width of the Gaussian Kernel to approximate')
parser.add_argument('--dimension', type=int,
help='Dimension of input dataset')
return parser.parse_args()
def evaluate(predicted_labels, labels, prefix="Results"):
from modshogun import PRCEvaluation, ROCEvaluation, AccuracyMeasure
prc_evaluator = PRCEvaluation()
roc_evaluator = ROCEvaluation()
acc_evaluator = AccuracyMeasure()
auPRC = prc_evaluator.evaluate(predicted_labels, labels)
auROC = roc_evaluator.evaluate(predicted_labels, labels)
acc = acc_evaluator.evaluate(predicted_labels, labels)
print ('{0}: auPRC = {1:.5f}, auROC = {2:.5f}, acc = {3:.5f} '+
'({4}% incorrectly classified)').format(
prefix, auPRC, auROC, acc, (1-acc)*100)
def load_sparse_data(filename, dimension=None):
input_file = LibSVMFile(args.dataset)
sparse_feats = SparseRealFeatures()
label_array = sparse_feats.load_with_labels(input_file)
labels = BinaryLabels(label_array)
if dimension!=None:
sparse_feats.set_num_features(dimension)
return {'data':sparse_feats, 'labels':labels}
if __name__=='__main__':
from modshogun import SparseRealFeatures, RandomFourierDotFeatures, GAUSSIAN
from modshogun import LibSVMFile, BinaryLabels, SVMOcas
from modshogun import Time
from numpy import array
args = parse_arguments()
print 'Loading training data...'
sparse_data = load_sparse_data(args.dataset,args.dimension)
kernel_params = array([args.width], dtype=float)
rf_feats = RandomFourierDotFeatures(sparse_data['data'], args.D, GAUSSIAN,
kernel_params)
svm = SVMOcas(args.C, rf_feats, sparse_data['labels'])
svm.set_epsilon(args.epsilon)
print 'Starting training.'
timer = Time()
svm.train()
timer.stop()
print 'Training completed, took {0:.2f}s.'.format(timer.time_diff_sec())
predicted_labels = svm.apply()
evaluate(predicted_labels, sparse_data['labels'], 'Training results')
if args.testset!=None:
random_coef = rf_feats.get_random_coefficients()
# removing current dataset from memory in order to load the test dataset,
# to avoid running out of memory
rf_feats = None
svm.set_features(None)
svm.set_labels(None)
sparse_data = None
print 'Loading test data...'
sparse_data = load_sparse_data(args.testset, args.dimension)
rf_feats = RandomFourierDotFeatures(sparse_data['data'], args.D, GAUSSIAN,
kernel_params, random_coef)
predicted_labels = svm.apply(rf_feats)
evaluate(predicted_labels, sparse_data['labels'], 'Test results')
|
AzamYahya/shogun
|
applications/classification/random_fourier_classification.py
|
Python
|
gpl-3.0
| 3,552
|
[
"Gaussian"
] |
751015d28c8988972ed337901949d75bd61f7b30812d892d1aa3cce94ead5404
|
import pytest
from uwg import SimParam, Weather, Forcing
import os
EPW_PATH = \
os.path.join(os.path.dirname(__file__), 'epw', 'SGP_Singapore.486980_IWEC.epw')
def test_forcing():
"""Test for forcing.py"""
# setup_forcing
dtSim = 300 # Sim time step
dtWeather = 3600 # Weather data time-step
MONTH = 1 # Begin month
DAY = 1 # Begin day of the month
NUM_DAYS = 31 # Number of days of simulation
simTime = SimParam(dtSim, dtWeather, MONTH, DAY, NUM_DAYS)
print(EPW_PATH)
weather = Weather(EPW_PATH, simTime.timeInitial, simTime.timeFinal)
# initialized Forcing class
forcIP = Forcing(weather.staTemp, weather)
# Forcing tests
assert forcIP.deepTemp == pytest.approx(299.8392473118278, abs=1e-12)
assert forcIP.waterTemp == pytest.approx(299.8392473118278, abs=1e-12)
assert forcIP.wind[0] == pytest.approx(3.2, abs=1e-10)
|
chriswmackey/UWG_Python
|
tests/test_forcing.py
|
Python
|
gpl-3.0
| 954
|
[
"EPW"
] |
2116d3005ed2c6c5991444f7db833314c6a160eddac0342c77d16327d7eeb781
|
"""
.. py:module:: mappers
:platform: Unix
Various mapper implementations. Mappers are functions that map possible feature
value's to the interval [-1, 1]. In Creamas, they are used by individual
agent's to represent agent's preferences over features values.
"""
from creamas.math import gaus_pdf, logistic
from creamas.rules.mapper import Mapper
__all__ = ['BooleanMapper', 'LinearMapper', 'DoubleLinearMapper',
'GaussianMapper', 'LogisticMapper']
class BooleanMapper(Mapper):
"""Boolean mapper that has four different modes.
Depending on the mode, True and False are mapped either to 1, 0, or -1.
======= ======= =======
mode True False
======= ======= =======
'10' 1.0 0.0
'01' 0.0 1.0
'1-1' 1.0 -1.0
'-11' -1.0 1.0
======= ======= =======
"""
modes = ['10', '01', '1-1', '-11']
def __init__(self, mode='10'):
self._value_set = {bool}
self.mode = mode
self._mode_maps = {'10': self._map10, '01': self._map01,
'1-1': self._map1_1, '-11': self._map_11}
def __str__(self):
return "{}({})".format(self.__class__.__name__, self._mode)
@property
def mode(self):
"""Mode of the mapper."""
return self._mode
@mode.setter
def mode(self, value):
if value not in self.modes:
raise ValueError('Value ({}) not found from modes.'.format(value))
self._mode = value
def map(self, value):
return self._mode_maps[self._mode](value)
def _map10(self, value):
return 1.0 if value else 0.0
def _map01(self, value):
return 0.0 if value else 1.0
def _map1_1(self, value):
return 1.0 if value else -1.0
def _map_11(self, value):
return -1.0 if value else 1.0
class LinearMapper(Mapper):
"""Mapper that maps values in given interval linearly.
Can be used for features that return either 'int' or 'float' values.
Based on its mode, maps *lo* and *hi* to different end points and values
between them to a straight line. Depending on the mode, *lo* and *hi* have
following end points:
======= ===== =====
mode lo hi
======= ===== =====
'10' 1.0 0.0
'01' 0.0 1.0
'1-1' 1.0 -1.0
'-11' -1.0 1.0
======= ===== =====
"""
_value_set = {int, float}
modes = ['10', '01', '1-1', '-11']
def __init__(self, lo, hi, mode='01'):
if lo > hi:
raise ValueError('lo ({}) must be smaller than hi ({}).'
.format(lo, hi))
self._lo = lo
self._hi = hi
self._mode_maps = {'10': self._map10, '01': self._map01,
'1-1': self._map1_1, '-11': self._map_11}
self.mode = mode
def __str__(self):
return "{}({}-{},{})".format(self.__class__.__name__, self._lo,
self._hi, self._mode)
@property
def mode(self):
"""Mode of the mapper."""
return self._mode
@mode.setter
def mode(self, value):
if value not in self.modes:
raise ValueError('Value ({}) not found from modes.'.format(value))
self._mode = value
@property
def value_set(self):
"""Accepted value types, i.e. this mapper can be used for the features
that return these types of values."""
return self._value_set
def map(self, value):
return self._mode_maps[self._mode](self._lo, self._hi, value)
def _map10(self, lo, hi, value):
if value < lo:
return 1.0
if value > hi:
return 0.0
diff = hi - lo
val_diff = value - lo
return 1.0 - (float(val_diff) / diff)
def _map01(self, lo, hi, value):
if value < lo:
return 0.0
if value > hi:
return 1.0
diff = hi - lo
val_diff = value - lo
return 0.0 + (float(val_diff) / diff)
def _map1_1(self, lo, hi, value):
if value < lo:
return 1.0
if value > hi:
return -1.0
diff = hi - lo
val_diff = value - lo
return 1.0 - (2 * (float(val_diff) / diff))
def _map_11(self, lo, hi, value):
if value < lo:
return -1.0
if value > hi:
return 1.0
diff = hi - lo
val_diff = value - lo
return -1.0 + (2 * (float(val_diff) / diff))
class DoubleLinearMapper(LinearMapper):
"""Mapper that concatenates two linear mappers.
Can be used for features that return either 'int' or 'float' values.
First line is created from *lo* to *mid* and second line from *mid* to
*hi*. Depending on the mode, *lo*, *mid* and *hi* are mapped to following
end points.
======= ===== ====== ======
mode lo mid hi
======= ===== ====== ======
'10' 1.0 0.0 1.0
'01' 0.0 1.0 0.0
'1-1' 1.0 -1.0 1.0
'-11' -1.0 1.0 -1.0
======= ===== ====== ======
"""
# Reverse modes (modes for second line) for the modes described in the
# LinearMapper.
reverse_modes = ['01', '10', '-11', '1-1']
def __init__(self, lo, mid, hi, mode='01'):
if lo >= mid:
raise ValueError('lo ({}) must be smaller than mid ({}).'
.format(lo, mid))
if mid >= hi:
raise ValueError('mid ({}) must be smaller than hi ({}).'
.format(mid, hi))
self._lo = lo
self._mid = mid
self._hi = hi
self._mode_maps = {'10': self._map10, '01': self._map01,
'1-1': self._map1_1, '-11': self._map_11}
self.mode = mode
self._rmode = self._get_reverse_mode(mode)
def __str__(self):
return "{}({}-{}-{},{})".format(self.__class__.__name__, self._lo,
self._mid, self._hi, self._mode)
def _get_reverse_mode(self, mode):
return self.reverse_modes[self.modes.index(mode)]
@property
def mode(self):
"""Mode of the mapper.
"""
return self._mode
@mode.setter
def mode(self, value):
if value not in self.modes:
raise ValueError('Value ({}) not found from modes.'.format(value))
self._mode = value
self._rmode = self._get_reverse_mode(self._mode)
def map(self, value):
if value <= self._mid:
return self._mode_maps[self._mode](self._lo, self._mid, value)
return self._mode_maps[self._rmode](self._mid, self._hi, value)
class GaussianMapper(Mapper):
"""Gaussian distribution mapper.
The mapped value is relative to given Gaussian distribution's
maximum point (*pmax*, evaluated at point *loc*) and the probability
density function's value at given evaluation point (*pval*).
The actual value calculation changes with the mode of the mapper:
======= =======================
mode mapped value
======= =======================
'10' :math:`1.0 - (pval / pmax)`
'01' :math:`pval / pmax`
'1-1' :math:`1.0 - 2(pval / pmax)`
'-11' :math:`-1.0 + 2(pval / pmax)`
======= =======================
"""
_value_set = {int, float}
modes = ['10', '01', '1-1', '-11']
def __init__(self, mean, std, mode='01'):
"""
:param float mean: mean of the mapping distribution
:param float std: standard deviation of the mapping distribution
:param mode: mode of the mapper: '10', '01', '1-1' or '-11'.
"""
self._mean = mean
self._std = std
self.mode = mode
self._mode_maps = {'10': self._map10, '01': self._map01,
'1-1': self._map1_1, '-11': self._map_11}
def __str__(self):
return "{}({}-{},{})".format(self.__class__.__name__, self._mean,
self._std, self._mode)
@property
def mode(self):
"""Mode of the mapper."""
return self._mode
@mode.setter
def mode(self, value):
if value not in self.modes:
raise ValueError('Value ({}) not found from modes.'.format(value))
self._mode = value
def map(self, value):
return self._mode_maps[self._mode](self._mean, self._std, value)
def _map10(self, mean, std, value):
lmax = gaus_pdf(mean, mean, std)
pdf = gaus_pdf(value, mean, std)
return 1.0 - (pdf / lmax)
def _map01(self, mean, std, value):
lmax = gaus_pdf(mean, mean, std)
pdf = gaus_pdf(value, mean, std)
return pdf / lmax
def _map1_1(self, mean, std, value):
lmax = gaus_pdf(mean, mean, std)
pdf = gaus_pdf(value, mean, std)
return 1.0 - 2 * (pdf / lmax)
def _map_11(self, mean, std, value):
lmax = gaus_pdf(mean, mean, std)
pdf = gaus_pdf(value, mean, std)
return -1.0 + 2 * (pdf / lmax)
class LogisticMapper(Mapper):
"""Logistic function mapper.
The mapped value is relative to the logistic function's value in the
mapping point. Depending on the mode, some transformations (mirroring,
shifting), might be applied to the mapped value.
"""
_value_set = {int, float}
modes = ['10', '01', '1-1', '-11']
def __init__(self, x0, k, mode='01'):
"""
:param float x0: sigmoid's midpoint
:param float k: steepness of the curve
:param mode: mode of the mapper: '10', '01', '1-1' or '-11'.
"""
self._x0 = x0
self._k = k
self.mode = mode
self._mode_maps = {'10': self._map10, '01': self._map01,
'1-1': self._map1_1, '-11': self._map_11}
@property
def mode(self):
"""Mode of the mapper."""
return self._mode
@mode.setter
def mode(self, value):
if value not in self.modes:
raise ValueError('Value ({}) not found from modes.'.format(value))
self._mode = value
def __str__(self):
return "{}({}-{},{})".format(self.__class__.__name__, self._x0,
self._k, self._mode)
def map(self, value):
return self._mode_maps[self._mode](self._x0, self._k, value)
def _map10(self, x0, k, value):
diff = value - x0
mir_value = x0 - diff
return logistic(mir_value, x0, k, 1.0)
def _map01(self, x0, k, value):
return logistic(value, x0, k, 1.0)
def _map1_1(self, x0, k, value):
diff = value - x0
mir_value = x0 - diff
return logistic(mir_value, x0, k, 2.0) - 1.0
def _map_11(self, x0, k, value):
return logistic(value, x0, k, 2.0) - 1.0
|
assamite/creamas
|
creamas/mappers.py
|
Python
|
gpl-2.0
| 10,909
|
[
"Gaussian"
] |
883a1f52bbeebdd28b06591be49a6948ed4fbb29eb5d655fd325d1c3fcee6b41
|
import sys
sys.path.insert(1, "../../../")
import h2o, tests
def weights_and_distributions():
htable = h2o.upload_file(h2o.locate("smalldata/gbm_test/moppe.csv"))
htable["premiekl"] = htable["premiekl"].asfactor()
htable["moptva"] = htable["moptva"].asfactor()
htable["zon"] = htable["zon"]
# gamma
dl = h2o.deeplearning(x=htable[0:3],y=htable["medskad"],training_frame=htable,distribution="gamma",weights_column="antskad")
predictions = dl.predict(htable)
# gaussian
dl = h2o.deeplearning(x=htable[0:3],y=htable["medskad"],training_frame=htable,distribution="gaussian",weights_column="antskad")
predictions = dl.predict(htable)
# poisson
dl = h2o.deeplearning(x=htable[0:3],y=htable["medskad"],training_frame=htable,distribution="poisson",weights_column="antskad")
predictions = dl.predict(htable)
# tweedie
dl = h2o.deeplearning(x=htable[0:3],y=htable["medskad"],training_frame=htable,distribution="tweedie",weights_column="antskad")
predictions = dl.predict(htable)
if __name__ == "__main__":
tests.run_test(sys.argv, weights_and_distributions)
|
brightchen/h2o-3
|
h2o-py/tests/testdir_algos/deeplearning/pyunit_weights_and_distributionsDeeplearning.py
|
Python
|
apache-2.0
| 1,124
|
[
"Gaussian"
] |
06058c56ab86c719444461da398bc6c0d67bfc508f8495a8d15851b7c8bcdcde
|
# -*- coding: utf-8 -*-
# This script is modified version of GraupnerBrunel2012 model by Aditya Gilra.
# Modification is following:
# - Added global seed.
# - Removed some messages.
# - Added assertion.
#
# NOTE: This script is used for testing random number generators on various
# platform. This should not be used in any tutorial or scientific demo.
import moose
print( 'Using moose from %s' % moose.__file__ )
import numpy as np
moose.seed( 10 )
def test_GB2012_STDP():
"""
Simulate a pseudo-STDP protocol and plot the STDP kernel
that emerges from Ca plasticity of Graupner and Brunel 2012.
Author: Aditya Gilra, NCBS, Bangalore, October, 2014.
"""
# ###########################################
# Neuron models
# ###########################################
## Leaky integrate and fire neuron
Vrest = -65e-3 # V # resting potential
Vt_base = -45e-3 # V # threshold
Vreset = -55e-3 # V # in current steps, Vreset is same as pedestal
R = 1e8 # Ohm
tau = 10e-3 # s
refrT = 2e-3 # s
# ###########################################
# Initialize neuron group
# ###########################################
## two neurons: index 0 will be presynaptic, 1 will be postsynaptic
network = moose.LIF( 'network', 2 );
moose.le( '/network' )
network.vec.Em = Vrest
assert np.allclose(network.vec.Em, Vrest), (network.vec.Em, Vrest)
network.vec.thresh = Vt_base
network.vec.refractoryPeriod = refrT
network.vec.Rm = R
network.vec.vReset = Vreset
network.vec.Cm = tau/R
network.vec.inject = 0.
network.vec.initVm = Vrest
tauCa = 20e-3
tauSyn = 150.0
CaPre = 1.0
CaPost = 2.0
delayD = 13.7e-3
thetaD = 1.0
thetaP = 1.3
gammaD = 200.0
gammaP = 321.808
J = 5e-3 # V
weight = 0.5
bistable = True
syn = moose.GraupnerBrunel2012CaPlasticitySynHandler( '/network/syn' )
syn.numSynapses = 1
moose.connect( syn, 'activationOut', network.vec[1], 'activation' )
# synapse from presynaptic neuron
moose.connect( network.vec[0],'spikeOut', syn.synapse[0], 'addSpike')
# post-synaptic spikes also needed for STDP
moose.connect( network.vec[1], 'spikeOut', syn, 'addPostSpike')
syn.synapse[0].delay = 0.0
syn.synapse[0].weight = weight
syn.CaInit = 0.0
syn.tauCa = tauCa
syn.tauSyn = tauSyn
syn.CaPre = CaPre
syn.CaPost = CaPost
syn.delayD = delayD
syn.thetaD = thetaD
syn.thetaP = thetaP
syn.gammaD = gammaD
syn.gammaP = gammaP
syn.weightScale = J
syn.weightMax = 1.0
syn.weightMin = 0.
syn.noisy = True
syn.noiseSD = 1.3333
syn.bistable = bistable
# ###########################################
# Setting up tables
# ###########################################
Vms = moose.Table( '/plotVms', 2 )
moose.connect( network, 'VmOut', Vms, 'input', 'OneToOne')
spikes = moose.Table( '/plotSpikes', 2 )
moose.connect( network, 'spikeOut', spikes, 'input', 'OneToOne')
CaTable = moose.Table( '/plotCa', 1 )
moose.connect( CaTable, 'requestOut', syn, 'getCa')
WtTable = moose.Table( '/plotWeight', 1 )
moose.connect( WtTable, 'requestOut', syn.synapse[0], 'getWeight')
dt = 1e-3
moose.useClock( 0, '/network/syn', 'process' )
moose.useClock( 1, '/network', 'process' )
moose.useClock( 2, '/plotSpikes', 'process' )
moose.useClock( 3, '/plotVms', 'process' )
moose.useClock( 3, '/plotCa', 'process' )
moose.useClock( 3, '/plotWeight', 'process' )
moose.setClock( 0, dt )
moose.setClock( 1, dt )
moose.setClock( 2, dt )
moose.setClock( 3, dt )
moose.setClock( 9, dt )
moose.reinit()
# function to make the aPlus and aMinus settle to equilibrium values
settletime = 10e-3 # s
def reset_settle():
""" Call this between every pre-post pair
to reset the neurons and make them settle to rest.
"""
syn.synapse[0].weight = weight
syn.Ca = 0.0
moose.start(settletime)
# Ca gets a jump at pre-spike+delayD
# So this event can occur during settletime
# So set Ca and weight once more after settletime
syn.synapse[0].weight = weight
syn.Ca = 0.0
# function to inject a sharp current pulse to make neuron spike
# immediately at a given time step
def make_neuron_spike(nrnidx,I=1e-7,duration=1e-3):
""" Inject a brief current pulse to
make a neuron spike
"""
network.vec[nrnidx].inject = I
moose.start(duration)
network.vec[nrnidx].inject = 0.
dwlist_neg = []
ddt = 10e-3 # s
# since CaPlasticitySynHandler is event based
# multiple pairs are needed for Ca to be registered above threshold
# Values from Fig 2, last line of legend
numpairs = 60 # number of spike parts per deltat
t_between_pairs = 1.0 # time between each spike pair
t_extent = 100e-3 # s # STDP kernel extent,
# t_extent > t_between_pairs/2 inverts pre-post pairing!
# dt = tpost - tpre
# negative dt corresponds to post before pre
print('-----------------------------------------------')
for deltat in np.arange(t_extent,0.0,-ddt):
reset_settle()
for i in range(numpairs):
# post neuron spike
make_neuron_spike(1)
moose.start(deltat)
# pre neuron spike after deltat
make_neuron_spike(0)
moose.start(t_between_pairs) # weight changes after pre-spike+delayD
# must run for at least delayD after pre-spike
dw = ( syn.synapse[0].weight - weight ) / weight
print(('post before pre, dt = %1.3f s, dw/w = %1.3f'%(-deltat,dw)))
dwlist_neg.append(dw)
print('-----------------------------------------------')
# positive dt corresponds to pre before post
dwlist_pos = []
for deltat in np.arange(ddt,t_extent+ddt,ddt):
reset_settle()
for i in range(numpairs):
# pre neuron spike
make_neuron_spike(0)
moose.start(deltat)
# post neuron spike after deltat
make_neuron_spike(1)
moose.start(t_between_pairs)
dw = ( syn.synapse[0].weight - weight ) / weight
print(('pre before post, dt = %1.3f s, dw/w = %1.3f'%(deltat,dw)))
dwlist_pos.append(dw)
Vmseries0 = Vms.vec[0].vector
numsteps = len(Vmseries0)
for t in spikes.vec[0].vector:
Vmseries0[int(t/dt)-1] = 30e-3 # V
Vmseries1 = Vms.vec[1].vector
for t in spikes.vec[1].vector:
Vmseries1[int(t/dt)-1] = 30e-3 # V
timeseries = np.linspace(0.,200*numsteps*dt,numsteps)
# STDP curve
up, sp = np.mean( dwlist_pos ), np.std( dwlist_pos )
un, sn = np.mean( dwlist_neg ), np.std( dwlist_neg )
expected = [0.32476025611655324, 0.22658173497286094,
0.02706212384326734, -0.2176119329016457, -0.17349820098625146,
-0.049000627347906, 0.10942145078777199, 0.015381955378225953,
0.004742824127517586, -0.12298343312253879]
assert np.isclose(dwlist_pos[1:], expected[1:]).all(), "Got %s \nexpected %s" % (dwlist_pos, expected)
expected = [-0.07871282492831622, 0.11915009122888964,
-0.028510348966579557, 0.11812233585111875, 0.05098143255634335,
-0.2303047508248669, 0.18033418630802123, -0.019377885225611347,
-0.06038610826728241, 0.06575882890278106]
assert np.isclose(dwlist_neg[1:], expected[1:]).all(), "Got %s\nexpected %s" % (dwlist_neg,
expected)
got = (up, sp)
expNew = (0.014485615086785508, 0.16206703949072981)
assert np.isclose(got, expNew).all(), 'Expected: %s, Got: %s' % (str(expNew), str(got))
def main():
test_GB2012_STDP()
if __name__ == '__main__':
main()
|
BhallaLab/moose-core
|
tests/core/test_GraupnerBrunel2012_STDPfromCaPlasticity.py
|
Python
|
gpl-3.0
| 8,008
|
[
"MOOSE",
"NEURON"
] |
f79f1602f209b49a5105e24f267c941aa19be807b6968c3d5fb980562865c58c
|
from __future__ import absolute_import
from django.utils.functional import cached_property
from parsimonious.exceptions import IncompleteParseError
from sentry.api.event_search import (
event_search_grammar,
InvalidSearchQuery,
SearchFilter,
SearchKey,
SearchValue,
SearchVisitor,
)
from sentry.constants import STATUS_CHOICES
from sentry.search.utils import (
parse_actor_value,
parse_user_value,
parse_release,
parse_status_value,
)
class IssueSearchVisitor(SearchVisitor):
key_mappings = {
"assigned_to": ["assigned"],
"bookmarked_by": ["bookmarks"],
"subscribed_by": ["subscribed"],
"first_release": ["first-release", "firstRelease"],
"first_seen": ["age", "firstSeen"],
"last_seen": ["lastSeen"],
"active_at": ["activeSince"],
# TODO: Special case this in the backends, since they currently rely
# on date_from and date_to explicitly
"date": ["event.timestamp"],
"times_seen": ["timesSeen"],
"sentry:dist": ["dist"],
}
numeric_keys = SearchVisitor.numeric_keys.union(["times_seen"])
date_keys = SearchVisitor.date_keys.union(["active_at", "date"])
@cached_property
def is_filter_translators(self):
is_filter_translators = {
"assigned": (SearchKey("unassigned"), SearchValue(False)),
"unassigned": (SearchKey("unassigned"), SearchValue(True)),
}
for status_key, status_value in STATUS_CHOICES.items():
is_filter_translators[status_key] = (SearchKey("status"), SearchValue(status_value))
return is_filter_translators
def visit_is_filter(self, node, children):
# the key is "is" here, which we don't need
negation, _, _, search_value = children
if search_value.raw_value not in self.is_filter_translators:
raise InvalidSearchQuery(
'Invalid value for "is" search, valid values are {}'.format(
sorted(self.is_filter_translators.keys())
)
)
search_key, search_value = self.is_filter_translators[search_value.raw_value]
operator = "!=" if self.is_negated(negation) else "="
return SearchFilter(search_key, operator, search_value)
def visit_boolean_operator(self, node, children):
raise InvalidSearchQuery(
'Boolean statements containing "OR" or "AND" are not supported in this search'
)
def parse_search_query(query):
try:
tree = event_search_grammar.parse(query)
except IncompleteParseError as e:
raise InvalidSearchQuery(
"%s %s"
% (
u"Parse error: %r (column %d)." % (e.expr.name, e.column()),
"This is commonly caused by unmatched-parentheses. Enclose any text in double quotes.",
)
)
return IssueSearchVisitor().visit(tree)
def convert_actor_value(value, projects, user, environments):
return parse_actor_value(projects, value, user)
def convert_user_value(value, projects, user, environments):
return parse_user_value(value, user)
def convert_release_value(value, projects, user, environments):
return parse_release(value, projects, environments)
def convert_status_value(value, projects, user, environments):
try:
return parse_status_value(value)
except ValueError:
raise InvalidSearchQuery(u"invalid status value of '{}'".format(value))
value_converters = {
"assigned_to": convert_actor_value,
"bookmarked_by": convert_user_value,
"subscribed_by": convert_user_value,
"first_release": convert_release_value,
"release": convert_release_value,
"status": convert_status_value,
}
def convert_query_values(search_filters, projects, user, environments):
"""
Accepts a collection of SearchFilter objects and converts their values into
a specific format, based on converters specified in `value_converters`.
:param search_filters: Collection of `SearchFilter` objects.
:param projects: List of projects being searched across
:param user: The user making the search
:return: New collection of `SearchFilters`, which may have converted values.
"""
def convert_search_filter(search_filter):
if search_filter.key.name in value_converters:
converter = value_converters[search_filter.key.name]
new_value = converter(search_filter.value.raw_value, projects, user, environments)
search_filter = search_filter._replace(value=SearchValue(new_value))
return search_filter
return map(convert_search_filter, search_filters)
|
mvaled/sentry
|
src/sentry/api/issue_search.py
|
Python
|
bsd-3-clause
| 4,681
|
[
"VisIt"
] |
3076dd529fee2a5b6f33724e4e72e54426e5b13e6aee845acf392757a8de7e7b
|
"""rbf - Radial basis functions for interpolation/smoothing scattered N-D data.
Written by John Travers <jtravs@gmail.com>, February 2007
Based closely on Matlab code by Alex Chirokov
Additional, large, improvements by Robert Hetland
Some additional alterations by Travis Oliphant
Interpolation with multi-dimensional target domain by Josua Sassen
Permission to use, modify, and distribute this software is given under the
terms of the SciPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Copyright (c) 2006-2007, Robert Hetland <hetland@tamu.edu>
Copyright (c) 2007, John Travers <jtravs@gmail.com>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Robert Hetland nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
from scipy import linalg
from scipy.special import xlogy
from scipy.spatial.distance import cdist, pdist, squareform
__all__ = ['Rbf']
class Rbf(object):
"""
Rbf(*args)
A class for radial basis function interpolation of functions from
N-D scattered data to an M-D domain.
Parameters
----------
*args : arrays
x, y, z, ..., d, where x, y, z, ... are the coordinates of the nodes
and d is the array of values at the nodes
function : str or callable, optional
The radial basis function, based on the radius, r, given by the norm
(default is Euclidean distance); the default is 'multiquadric'::
'multiquadric': sqrt((r/self.epsilon)**2 + 1)
'inverse': 1.0/sqrt((r/self.epsilon)**2 + 1)
'gaussian': exp(-(r/self.epsilon)**2)
'linear': r
'cubic': r**3
'quintic': r**5
'thin_plate': r**2 * log(r)
If callable, then it must take 2 arguments (self, r). The epsilon
parameter will be available as self.epsilon. Other keyword
arguments passed in will be available as well.
epsilon : float, optional
Adjustable constant for gaussian or multiquadrics functions
- defaults to approximate average distance between nodes (which is
a good start).
smooth : float, optional
Values greater than zero increase the smoothness of the
approximation. 0 is for interpolation (default), the function will
always go through the nodal points in this case.
norm : str, callable, optional
A function that returns the 'distance' between two points, with
inputs as arrays of positions (x, y, z, ...), and an output as an
array of distance. E.g., the default: 'euclidean', such that the result
is a matrix of the distances from each point in ``x1`` to each point in
``x2``. For more options, see documentation of
`scipy.spatial.distances.cdist`.
mode : str, optional
Mode of the interpolation, can be '1-D' (default) or 'N-D'. When it is
'1-D' the data `d` will be considered as 1-D and flattened
internally. When it is 'N-D' the data `d` is assumed to be an array of
shape (n_samples, m), where m is the dimension of the target domain.
Attributes
----------
N : int
The number of data points (as determined by the input arrays).
di : ndarray
The 1-D array of data values at each of the data coordinates `xi`.
xi : ndarray
The 2-D array of data coordinates.
function : str or callable
The radial basis function. See description under Parameters.
epsilon : float
Parameter used by gaussian or multiquadrics functions. See Parameters.
smooth : float
Smoothing parameter. See description under Parameters.
norm : str or callable
The distance function. See description under Parameters.
mode : str
Mode of the interpolation. See description under Parameters.
nodes : ndarray
A 1-D array of node values for the interpolation.
A : internal property, do not use
Examples
--------
>>> from scipy.interpolate import Rbf
>>> x, y, z, d = np.random.rand(4, 50)
>>> rbfi = Rbf(x, y, z, d) # radial basis function interpolator instance
>>> xi = yi = zi = np.linspace(0, 1, 20)
>>> di = rbfi(xi, yi, zi) # interpolated values
>>> di.shape
(20,)
"""
# Available radial basis functions that can be selected as strings;
# they all start with _h_ (self._init_function relies on that)
def _h_multiquadric(self, r):
return np.sqrt((1.0/self.epsilon*r)**2 + 1)
def _h_inverse_multiquadric(self, r):
return 1.0/np.sqrt((1.0/self.epsilon*r)**2 + 1)
def _h_gaussian(self, r):
return np.exp(-(1.0/self.epsilon*r)**2)
def _h_linear(self, r):
return r
def _h_cubic(self, r):
return r**3
def _h_quintic(self, r):
return r**5
def _h_thin_plate(self, r):
return xlogy(r**2, r)
# Setup self._function and do smoke test on initial r
def _init_function(self, r):
if isinstance(self.function, str):
self.function = self.function.lower()
_mapped = {'inverse': 'inverse_multiquadric',
'inverse multiquadric': 'inverse_multiquadric',
'thin-plate': 'thin_plate'}
if self.function in _mapped:
self.function = _mapped[self.function]
func_name = "_h_" + self.function
if hasattr(self, func_name):
self._function = getattr(self, func_name)
else:
functionlist = [x[3:] for x in dir(self)
if x.startswith('_h_')]
raise ValueError("function must be a callable or one of " +
", ".join(functionlist))
self._function = getattr(self, "_h_"+self.function)
elif callable(self.function):
allow_one = False
if hasattr(self.function, 'func_code') or \
hasattr(self.function, '__code__'):
val = self.function
allow_one = True
elif hasattr(self.function, "__call__"):
val = self.function.__call__.__func__
else:
raise ValueError("Cannot determine number of arguments to "
"function")
argcount = val.__code__.co_argcount
if allow_one and argcount == 1:
self._function = self.function
elif argcount == 2:
self._function = self.function.__get__(self, Rbf)
else:
raise ValueError("Function argument must take 1 or 2 "
"arguments.")
a0 = self._function(r)
if a0.shape != r.shape:
raise ValueError("Callable must take array and return array of "
"the same shape")
return a0
def __init__(self, *args, **kwargs):
# `args` can be a variable number of arrays; we flatten them and store
# them as a single 2-D array `xi` of shape (n_args-1, array_size),
# plus a 1-D array `di` for the values.
# All arrays must have the same number of elements
self.xi = np.asarray([np.asarray(a, dtype=np.float_).flatten()
for a in args[:-1]])
self.N = self.xi.shape[-1]
self.mode = kwargs.pop('mode', '1-D')
if self.mode == '1-D':
self.di = np.asarray(args[-1]).flatten()
self._target_dim = 1
elif self.mode == 'N-D':
self.di = np.asarray(args[-1])
self._target_dim = self.di.shape[-1]
else:
raise ValueError("Mode has to be 1-D or N-D.")
if not all([x.size == self.di.shape[0] for x in self.xi]):
raise ValueError("All arrays must be equal length.")
self.norm = kwargs.pop('norm', 'euclidean')
self.epsilon = kwargs.pop('epsilon', None)
if self.epsilon is None:
# default epsilon is the "the average distance between nodes" based
# on a bounding hypercube
ximax = np.amax(self.xi, axis=1)
ximin = np.amin(self.xi, axis=1)
edges = ximax - ximin
edges = edges[np.nonzero(edges)]
self.epsilon = np.power(np.prod(edges)/self.N, 1.0/edges.size)
self.smooth = kwargs.pop('smooth', 0.0)
self.function = kwargs.pop('function', 'multiquadric')
# attach anything left in kwargs to self for use by any user-callable
# function or to save on the object returned.
for item, value in kwargs.items():
setattr(self, item, value)
# Compute weights
if self._target_dim > 1: # If we have more than one target dimension,
# we first factorize the matrix
self.nodes = np.zeros((self.N, self._target_dim), dtype=self.di.dtype)
lu, piv = linalg.lu_factor(self.A)
for i in range(self._target_dim):
self.nodes[:, i] = linalg.lu_solve((lu, piv), self.di[:, i])
else:
self.nodes = linalg.solve(self.A, self.di)
@property
def A(self):
# this only exists for backwards compatibility: self.A was available
# and, at least technically, public.
r = squareform(pdist(self.xi.T, self.norm)) # Pairwise norm
return self._init_function(r) - np.eye(self.N)*self.smooth
def _call_norm(self, x1, x2):
return cdist(x1.T, x2.T, self.norm)
def __call__(self, *args):
args = [np.asarray(x) for x in args]
if not all([x.shape == y.shape for x in args for y in args]):
raise ValueError("Array lengths must be equal")
if self._target_dim > 1:
shp = args[0].shape + (self._target_dim,)
else:
shp = args[0].shape
xa = np.asarray([a.flatten() for a in args], dtype=np.float_)
r = self._call_norm(xa, self.xi)
return np.dot(self._function(r), self.nodes).reshape(shp)
|
pizzathief/scipy
|
scipy/interpolate/rbf.py
|
Python
|
bsd-3-clause
| 11,446
|
[
"Gaussian"
] |
28715070a840ecb477da18325bc6b5dedf41de3f678e6ead3df5a04d1d398878
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-classes-have-attributes
"""Keras layers that implement explicit (approximate) kernel feature maps."""
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util.tf_export import keras_export
_SUPPORTED_RBF_KERNEL_TYPES = ['gaussian', 'laplacian']
@keras_export('keras.layers.experimental.RandomFourierFeatures')
class RandomFourierFeatures(base_layer.Layer):
r"""Layer that projects its inputs into a random feature space.
This layer implements a mapping from input space to a space with `output_dim`
dimensions, which approximates shift-invariant kernels. A kernel function
`K(x, y)` is shift-invariant if `K(x, y) == k(x - y)` for some function `k`.
Many popular Radial Basis Functions (RBF), including Gaussian and
Laplacian kernels, are shift-invariant.
The implementation of this layer is based on the following paper:
["Random Features for Large-Scale Kernel Machines"](
https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf)
by Ali Rahimi and Ben Recht.
The distribution from which the parameters of the random features map (layer)
are sampled determines which shift-invariant kernel the layer approximates
(see paper for more details). You can use the distribution of your
choice. The layer supports out-of-the-box
approximation sof the following two RBF kernels:
- Gaussian: `K(x, y) == exp(- square(x - y) / (2 * square(scale)))`
- Laplacian: `K(x, y) = exp(-abs(x - y) / scale))`
**Note:** Unlike what is described in the paper and unlike what is used in
the Scikit-Learn implementation, the output of this layer does not apply
the `sqrt(2 / D)` normalization factor.
**Usage:** Typically, this layer is used to "kernelize" linear models by
applying a non-linear transformation (this layer) to the input features and
then training a linear model on top of the transformed features. Depending on
the loss function of the linear model, the composition of this layer and the
linear model results to models that are equivalent (up to approximation) to
kernel SVMs (for hinge loss), kernel logistic regression (for logistic loss),
kernel linear regression (for squared loss), etc.
Examples:
A kernel multinomial logistic regression model with Gaussian kernel for MNIST:
```python
model = keras.Sequential([
keras.Input(shape=(784,)),
RandomFourierFeatures(
output_dim=4096,
scale=10.,
kernel_initializer='gaussian'),
layers.Dense(units=10, activation='softmax'),
])
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['categorical_accuracy']
)
```
A quasi-SVM classifier for MNIST:
```python
model = keras.Sequential([
keras.Input(shape=(784,)),
RandomFourierFeatures(
output_dim=4096,
scale=10.,
kernel_initializer='gaussian'),
layers.Dense(units=10),
])
model.compile(
optimizer='adam',
loss='hinge',
metrics=['categorical_accuracy']
)
```
To use another kernel, just replace the layer creation line with:
```python
random_features_layer = RandomFourierFeatures(
output_dim=500,
kernel_initializer=<my_initializer>,
scale=...,
...)
```
Args:
output_dim: Positive integer, the dimension of the layer's output, i.e., the
number of random features used to approximate the kernel.
kernel_initializer: Determines the distribution of the parameters of the
random features map (and therefore the kernel approximated by the layer).
It can be either a string identifier or a Keras `Initializer` instance.
Currently only 'gaussian' and 'laplacian' are supported string
identifiers (case insensitive). Note that the kernel matrix is not
trainable.
scale: For Gaussian and Laplacian kernels, this corresponds to a scaling
factor of the corresponding kernel approximated by the layer (see concrete
definitions above). When provided, it should be a positive float. If None,
a default value is used: if the kernel initializer is set to "gaussian",
`scale` defaults to `sqrt(input_dim / 2)`, otherwise, it defaults to 1.0.
Both the approximation error of the kernel and the classification quality
are sensitive to this parameter. If `trainable` is set to `True`, this
parameter is learned end-to-end during training and the provided value
serves as the initial value.
**Note:** When features from this layer are fed to a linear model,
by making `scale` trainable, the resulting optimization problem is
no longer convex (even if the loss function used by the linear model
is convex).
trainable: Whether the scaling parameter of the layer should be trainable.
Defaults to `False`.
name: String, name to use for this layer.
"""
def __init__(self,
output_dim,
kernel_initializer='gaussian',
scale=None,
trainable=False,
name=None,
**kwargs):
if output_dim <= 0:
raise ValueError(
'`output_dim` should be a positive integer. Given: {}.'.format(
output_dim))
if isinstance(kernel_initializer, str):
if kernel_initializer.lower() not in _SUPPORTED_RBF_KERNEL_TYPES:
raise ValueError(
'Unsupported kernel type: \'{}\'. Supported kernel types: {}.'
.format(kernel_initializer, _SUPPORTED_RBF_KERNEL_TYPES))
if scale is not None and scale <= 0.0:
raise ValueError('When provided, `scale` should be a positive float. '
'Given: {}.'.format(scale))
super(RandomFourierFeatures, self).__init__(
trainable=trainable, name=name, **kwargs)
self.output_dim = output_dim
self.kernel_initializer = kernel_initializer
self.scale = scale
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
# TODO(sibyl-vie3Poto): Allow higher dimension inputs. Currently the input is expected
# to have shape [batch_size, dimension].
if input_shape.rank != 2:
raise ValueError(
'The rank of the input tensor should be 2. Got {} instead.'.format(
input_shape.ndims))
if input_shape.dims[1].value is None:
raise ValueError(
'The last dimension of the inputs to `RandomFourierFeatures` '
'should be defined. Found `None`.')
self.input_spec = input_spec.InputSpec(
ndim=2, axes={1: input_shape.dims[1].value})
input_dim = input_shape.dims[1].value
kernel_initializer = _get_random_features_initializer(
self.kernel_initializer, shape=(input_dim, self.output_dim))
self.unscaled_kernel = self.add_weight(
name='unscaled_kernel',
shape=(input_dim, self.output_dim),
dtype=dtypes.float32,
initializer=kernel_initializer,
trainable=False)
self.bias = self.add_weight(
name='bias',
shape=(self.output_dim,),
dtype=dtypes.float32,
initializer=init_ops.random_uniform_initializer(
minval=0.0, maxval=2 * np.pi, dtype=dtypes.float32),
trainable=False)
if self.scale is None:
self.scale = _get_default_scale(self.kernel_initializer, input_dim)
self.kernel_scale = self.add_weight(
name='kernel_scale',
shape=(1,),
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(self.scale),
trainable=True,
constraint='NonNeg')
super(RandomFourierFeatures, self).build(input_shape)
def call(self, inputs):
inputs = ops.convert_to_tensor_v2_with_dispatch(inputs, dtype=self.dtype)
inputs = math_ops.cast(inputs, dtypes.float32)
kernel = (1.0 / self.kernel_scale) * self.unscaled_kernel
outputs = gen_math_ops.MatMul(a=inputs, b=kernel)
outputs = nn.bias_add(outputs, self.bias)
return gen_math_ops.cos(outputs)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank(2)
if input_shape.dims[-1].value is None:
raise ValueError(
'The innermost dimension of input shape must be defined. Given: %s' %
input_shape)
return input_shape[:-1].concatenate(self.output_dim)
def get_config(self):
kernel_initializer = self.kernel_initializer
if not isinstance(kernel_initializer, str):
kernel_initializer = initializers.serialize(kernel_initializer)
config = {
'output_dim': self.output_dim,
'kernel_initializer': kernel_initializer,
'scale': self.scale,
}
base_config = super(RandomFourierFeatures, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _get_random_features_initializer(initializer, shape):
"""Returns Initializer object for random features."""
def _get_cauchy_samples(loc, scale, shape):
probs = np.random.uniform(low=0., high=1., size=shape)
return loc + scale * np.tan(np.pi * (probs - 0.5))
random_features_initializer = initializer
if isinstance(initializer, str):
if initializer.lower() == 'gaussian':
random_features_initializer = init_ops.random_normal_initializer(
stddev=1.0)
elif initializer.lower() == 'laplacian':
random_features_initializer = init_ops.constant_initializer(
_get_cauchy_samples(loc=0.0, scale=1.0, shape=shape))
else:
raise ValueError(
'Unsupported kernel type: \'{}\'. Supported kernel types: {}.'.format(
random_features_initializer, _SUPPORTED_RBF_KERNEL_TYPES))
return random_features_initializer
def _get_default_scale(initializer, input_dim):
if (isinstance(initializer, str) and
initializer.lower() == 'gaussian'):
return np.sqrt(input_dim / 2.0)
return 1.0
|
tensorflow/tensorflow
|
tensorflow/python/keras/layers/kernelized.py
|
Python
|
apache-2.0
| 11,017
|
[
"Gaussian"
] |
76e75d282851a1ee995964cdf8e4777800435e70d362921f32f78f15c9b5c243
|
## Copyright 2016 Kurt Cutajar, Edwin V. Bonilla, Pietro Michiardi, Maurizio Filippone
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from .likelihood import Likelihood
from .gaussian import Gaussian
from .softmax import Softmax
|
mauriziofilippone/deep_gp_random_features
|
code/likelihoods/__init__.py
|
Python
|
apache-2.0
| 741
|
[
"Gaussian"
] |
d89c0000246279422494da9c0a5049b7c9b7b071e0cf39cbe6a9af3f6e739194
|
'''<b>Smooth</b> smooths (i.e., blurs) images.
<hr>
This module allows you to smooth (blur) images, which can be helpful to
remove artifacts of a particular size.
Note that smoothing can be a time-consuming process.
'''
# CellProfiler is distributed under the GNU General Public License.
# See the accompanying file LICENSE for details.
#
# Copyright (c) 2003-2009 Massachusetts Institute of Technology
# Copyright (c) 2009-2015 Broad Institute
#
# Please see the AUTHORS file for credits.
#
# Website: http://www.cellprofiler.org
import numpy as np
import scipy.ndimage as scind
import cellprofiler.cpmodule as cpm
import cellprofiler.settings as cps
from cellprofiler.settings import YES, NO
import cellprofiler.cpimage as cpi
from cellprofiler.cpmath.smooth import smooth_with_function_and_mask
from cellprofiler.cpmath.smooth import circular_gaussian_kernel
from cellprofiler.cpmath.smooth import fit_polynomial
from cellprofiler.cpmath.filter import median_filter, bilateral_filter, circular_average_filter
from cellprofiler.gui.help import HELP_ON_MEASURING_DISTANCES, HELP_ON_PIXEL_INTENSITIES
FIT_POLYNOMIAL = 'Fit Polynomial'
MEDIAN_FILTER = 'Median Filter'
GAUSSIAN_FILTER = 'Gaussian Filter'
SMOOTH_KEEPING_EDGES = 'Smooth Keeping Edges'
CIRCULAR_AVERAGE_FILTER = 'Circular Average Filter'
SM_TO_AVERAGE = "Smooth to Average"
class Smooth(cpm.CPModule):
module_name = 'Smooth'
category = "Image Processing"
variable_revision_number = 2
def create_settings(self):
self.image_name = cps.ImageNameSubscriber('Select the input image',cps.NONE)
self.filtered_image_name = cps.ImageNameProvider('Name the output image','FilteredImage')
self.smoothing_method = cps.Choice(
'Select smoothing method',
[FIT_POLYNOMIAL, GAUSSIAN_FILTER,MEDIAN_FILTER, SMOOTH_KEEPING_EDGES,CIRCULAR_AVERAGE_FILTER, SM_TO_AVERAGE],doc="""
This module smooths images using one of several filters.
Fitting a polynomial
is fastest but does not allow a very tight fit compared to the other methods:
<ul>
<li><i>%(FIT_POLYNOMIAL)s:</i> This method treats the intensity of the image pixels
as a polynomial function of the x and y position of
each pixel. It fits the intensity to the polynomial,
<i>A x<sup>2</sup> + B y<sup>2</sup> + C xy + D x + E y + F</i>.
This will produce a smoothed image with a single peak or trough of intensity
that tapers off elsewhere in the image. For many microscopy images (where
the illumination of the lamp is brightest in the center of field of view),
this method will produce an image with a bright central region and dimmer
edges. But, in some cases the peak/trough of the polynomial may actually
occur outside of the image itself.</li>
<li><i>%(GAUSSIAN_FILTER)s:</i> This method convolves the image with a Gaussian whose
full width at half maximum is the artifact diameter entered.
Its effect is to blur and obscure features
smaller than the artifact diameter and spread bright or
dim features larger than the artifact diameter.</li>
<li><i>%(MEDIAN_FILTER)s:</i> This method finds the median pixel value within the
artifact diameter you specify. It removes bright or dim features that are much smaller
than the artifact diameter.</li>
<li><i>%(SMOOTH_KEEPING_EDGES)s:</i> This method uses a bilateral filter which
limits Gaussian smoothing across an edge while
applying smoothing perpendicular to an edge. The effect
is to respect edges in an image while smoothing other
features. <i>%(SMOOTH_KEEPING_EDGES)s</i> will filter an image with reasonable
speed for artifact diameters greater than 10 and for
intensity differences greater than 0.1. The algorithm
will consume more memory and operate more slowly as
you lower these numbers.</li>
<li><i>%(CIRCULAR_AVERAGE_FILTER)s:</i> This method convolves the image with
a uniform circular averaging filter whose size is the artifact diameter entered. This filter is
useful for re-creating an out-of-focus blur to an image.</li>
<li><i>%(SM_TO_AVERAGE)s:</i> Creates a flat, smooth image where every pixel
of the image equals the average value of the original image.</li>
</ul>"""%globals())
self.wants_automatic_object_size = cps.Binary(
'Calculate artifact diameter automatically?',True,doc="""
<i>(Used only if "%(GAUSSIAN_FILTER)s", "%(MEDIAN_FILTER)s", "%(SMOOTH_KEEPING_EDGES)s" or "%(CIRCULAR_AVERAGE_FILTER)s" is selected)</i><br>
Select <i>%(YES)s</i> to choose an artifact diameter based on
the size of the image. The minimum size it will choose is 30 pixels,
otherwise the size is 1/40 of the size of the image.
<p>Select <i>%(YES)s</i> to manually enter an artifact diameter.</p>"""%globals())
self.object_size = cps.Float(
'Typical artifact diameter',16.0,doc="""
<i>(Used only if choosing the artifact diameter automatically is set to "%(NO)s")</i><br>
Enter the approximate diameter (in pixels) of the features to be blurred by
the smoothing algorithm. This value is used to calculate the size of
the spatial filter. %(HELP_ON_MEASURING_DISTANCES)s
For most smoothing methods, selecting a
diameter over ~50 will take substantial amounts of time to process."""%globals())
self.sigma_range = cps.Float(
'Edge intensity difference', 0.1,doc="""
<i>(Used only if "%(SMOOTH_KEEPING_EDGES)s" is selected)</i><br>
Enter the intensity step (which indicates an edge in an image) that you want to preserve.
Edges are locations where the intensity changes precipitously, so this
setting is used to adjust the rough magnitude of these changes. A lower
number will preserve weaker edges. A higher number will preserve only stronger edges.
Values should be between zero and one. %(HELP_ON_PIXEL_INTENSITIES)s"""%globals())
self.clip = cps.Binary(
'Clip intensities to 0 and 1?', True,doc="""
<i>(Used only if %(FIT_POLYNOMIAL)s is selected)</i><br>
The <i>%(FIT_POLYNOMIAL)s</i> method is the only smoothing option that can yield
an output image whose values are outside of the values of the
input image. This setting controls whether to limit the image
intensity to the 0 - 1 range used by CellProfiler.
<p>Select <i>%(YES)s</i> to set all output image pixels less than zero to zero
and all pixels greater than one to one. </p>
<p>Select <i>%(NO)s</i> to
allow values less than zero and greater than one in the output
image.</p>"""%globals())
def settings(self):
return [self.image_name, self.filtered_image_name,
self.smoothing_method, self.wants_automatic_object_size,
self.object_size, self.sigma_range, self.clip]
def visible_settings(self):
result = [self.image_name, self.filtered_image_name,
self.smoothing_method]
if self.smoothing_method.value not in [FIT_POLYNOMIAL,SM_TO_AVERAGE]:
result.append(self.wants_automatic_object_size)
if not self.wants_automatic_object_size.value:
result.append(self.object_size)
if self.smoothing_method.value == SMOOTH_KEEPING_EDGES:
result.append(self.sigma_range)
if self.smoothing_method.value == FIT_POLYNOMIAL:
result.append(self.clip)
return result
def run(self, workspace):
image = workspace.image_set.get_image(self.image_name.value,
must_be_grayscale=True)
pixel_data = image.pixel_data
if self.wants_automatic_object_size.value:
object_size = min(30,max(1,np.mean(pixel_data.shape)/40))
else:
object_size = float(self.object_size.value)
sigma = object_size / 2.35
if self.smoothing_method.value == GAUSSIAN_FILTER:
def fn(image):
return scind.gaussian_filter(image, sigma,
mode='constant', cval=0)
output_pixels = smooth_with_function_and_mask(pixel_data, fn,
image.mask)
elif self.smoothing_method.value == MEDIAN_FILTER:
output_pixels = median_filter(pixel_data, image.mask,
object_size/2+1)
elif self.smoothing_method.value == SMOOTH_KEEPING_EDGES:
sigma_range = float(self.sigma_range.value)
output_pixels = bilateral_filter(pixel_data, image.mask,
sigma, sigma_range)
elif self.smoothing_method.value == FIT_POLYNOMIAL:
output_pixels = fit_polynomial(pixel_data, image.mask,
self.clip.value)
elif self.smoothing_method.value == CIRCULAR_AVERAGE_FILTER:
output_pixels = circular_average_filter(pixel_data, object_size/2+1, image.mask)
elif self.smoothing_method.value == SM_TO_AVERAGE:
if image.has_mask:
mean = np.mean(pixel_data[image.mask])
else:
mean = np.mean(pixel_data)
output_pixels = np.ones(pixel_data.shape, pixel_data.dtype) * mean
else:
raise ValueError("Unsupported smoothing method: %s" %
self.smoothing_method.value)
output_image = cpi.Image(output_pixels, parent_image = image)
workspace.image_set.add(self.filtered_image_name.value,
output_image)
workspace.display_data.pixel_data = pixel_data
workspace.display_data.output_pixels = output_pixels
def display(self, workspace, figure):
figure.set_subplots((2, 1))
figure.subplot_imshow_grayscale(0, 0,
workspace.display_data.pixel_data,
"Original: %s" %
self.image_name.value)
figure.subplot_imshow_grayscale(1, 0,
workspace.display_data.output_pixels,
"Filtered: %s" %
self.filtered_image_name.value,
sharexy = figure.subplot(0,0))
def upgrade_settings(self, setting_values, variable_revision_number,
module_name, from_matlab):
if (module_name == 'SmoothKeepingEdges' and from_matlab and
variable_revision_number == 1):
image_name, smoothed_image_name, spatial_radius, \
intensity_radius = setting_values
setting_values = [image_name,
smoothed_image_name,
'Smooth Keeping Edges',
'Automatic',
cps.DO_NOT_USE,
cps.NO,
spatial_radius,
intensity_radius]
module_name = 'SmoothOrEnhance'
variable_revision_number = 5
if (module_name == 'SmoothOrEnhance' and from_matlab and
variable_revision_number == 4):
# Added spatial radius
setting_values = setting_values + ["0.1"]
variable_revision_number = 5
if (module_name == 'SmoothOrEnhance' and from_matlab and
variable_revision_number == 5):
if setting_values[2] in ('Remove BrightRoundSpeckles',
'Enhance BrightRoundSpeckles (Tophat Filter)'):
raise ValueError('The Smooth module does not support speckles operations. Please use EnhanceOrSuppressFeatures with the Speckles feature type instead')
setting_values = [setting_values[0], # image name
setting_values[1], # result name
setting_values[2], # smoothing method
cps.YES if setting_values[3] == 'Automatic'
else cps.NO, # wants smoothing
'16.0' if setting_values[3] == 'Automatic'
else (setting_values[6]
if setting_values[2] == SMOOTH_KEEPING_EDGES
else setting_values[3]),
setting_values[7]]
module_name = 'Smooth'
from_matlab = False
variable_revision_number = 1
if variable_revision_number == 1 and not from_matlab:
setting_values = setting_values + [cps.YES]
variable_revision_number = 2
return setting_values, variable_revision_number, from_matlab
|
LeeKamentsky/CellProfiler
|
cellprofiler/modules/smooth.py
|
Python
|
gpl-2.0
| 13,433
|
[
"Gaussian"
] |
7ecc00c977ff0c426004499023f56eba4c3eddde6c510698d67cd8acdcabf7fd
|
########################################################################
# $HeadURL $
# File: GraphTests.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2012/09/28 09:02:23
########################################################################
""" :mod: GraphTests
=======================
.. module: GraphTests
:synopsis: tests for Graph module classes
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
"""
__RCSID__ = "$Id$"
# #
# @file GraphTests.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2012/09/28 09:02:24
# @brief Definition of GraphTests class.
# # imports
import unittest
# # SUT
from DIRAC.Core.Utilities.Graph import Node, Edge, Graph, DynamicProps # , topologicalSort, topoSort
class DynamicPropTests( unittest.TestCase ):
"""
.. class:: DynamicPropTests
"""
def testDynamicProps( self ):
""" test dynamic props """
class TestClass( object ):
"""
.. class:: TestClass
dummy class
"""
__metaclass__ = DynamicProps
# # dummy instance
testObj = TestClass()
# # makeProperty in
self.assertEqual( hasattr( testObj, "makeProperty" ), True )
self.assertEqual( callable( getattr( testObj, "makeProperty" ) ), True )
# # .. and works for rw properties
testObj.makeProperty( "rwTestProp", 10 ) #pylint: disable=no-member
self.assertEqual( hasattr( testObj, "rwTestProp" ), True )
self.assertEqual( getattr( testObj, "rwTestProp" ), 10 )
testObj.rwTestProp += 1 #pylint: disable=no-member
self.assertEqual( getattr( testObj, "rwTestProp" ), 11 )
# # .. and ro as well
testObj.makeProperty( "roTestProp", "I'm read only", True ) #pylint: disable=no-member
self.assertEqual( hasattr( testObj, "roTestProp" ), True )
self.assertEqual( getattr( testObj, "roTestProp" ), "I'm read only" )
# # AttributeError for read only property setattr
try:
testObj.roTestProp = 11
except AttributeError as error:
self.assertEqual( str( error ), "can't set attribute" )
class NodeTests( unittest.TestCase ):
"""
.. class:: NodeTests
"""
def setUp( self ):
""" test setup """
self.roAttrs = { "ro1" : True, "ro2" : "I'm read only" }
self.rwAttrs = { "rw1" : 0, "rw2" : ( 1, 2, 3 ) }
self.name = "BrightStart"
self.node = Node( self.name, self.rwAttrs, self.roAttrs )
def tearDown( self ):
""" clean up """
del self.roAttrs
del self.rwAttrs
del self.name
del self.node
def testNode( self ):
""" node rwAttrs roAttrs connect """
# # node name - th eon,y one prop you can't overwrite
self.assertEqual( self.node.name, self.name )
try:
self.node.name = "can't do this"
except AttributeError as error:
self.assertEqual( str( error ), "can't set attribute" )
try:
self.node.makeProperty( "name", "impossible" )
except AttributeError as error:
self.assertEqual( str( error ), "_name or name is already defined as a member" )
# # visited attr for walking
self.assertEqual( hasattr( self.node, "visited" ), True )
self.assertEqual( self.node.visited, False ) #pylint: disable=no-member
# # ro attrs
for k, v in self.roAttrs.items():
self.assertEqual( hasattr( self.node, k ), True )
self.assertEqual( getattr( self.node, k ), v )
try:
setattr( self.node, k, "new value" )
except AttributeError as error:
self.assertEqual( str( error ), "can't set attribute" )
# # rw attrs
for k, v in self.rwAttrs.items():
self.assertEqual( hasattr( self.node, k ), True )
self.assertEqual( getattr( self.node, k ), v )
setattr( self.node, k, "new value" )
self.assertEqual( getattr( self.node, k ), "new value" )
# # connect
toNode = Node( "DeadEnd" )
edge = self.node.connect( toNode, { "foo" : "boo" }, { "ro3" : True } )
self.assertEqual( isinstance( edge, Edge ), True )
self.assertEqual( edge.name, self.name + "-DeadEnd" )
self.assertEqual( self.node, edge.fromNode ) #pylint: disable=no-member
self.assertEqual( toNode, edge.toNode ) #pylint: disable=no-member
class EdgeTests( unittest.TestCase ):
"""
.. class:: EdgeTests
"""
def setUp( self ):
""" test setup """
self.fromNode = Node( "Start" )
self.toNode = Node( "End" )
self.roAttrs = { "ro1" : True, "ro2" : "I'm read only" }
self.rwAttrs = { "rw1" : 0, "rw2" : ( 1, 2, 3 ) }
def tearDown( self ):
""" clean up """
del self.fromNode
del self.toNode
del self.roAttrs
del self.rwAttrs
def testEdge( self ):
""" c'tor connect attrs """
edge = Edge( self.fromNode, self.toNode, self.rwAttrs, self.roAttrs )
# # name
self.assertEqual( edge.name, "%s-%s" % ( self.fromNode.name, self.toNode.name ) )
try:
edge.name = "can't do this"
except AttributeError as error:
self.assertEqual( str( error ), "can't set attribute" )
try:
edge.makeProperty( "name", "impossible" )
except AttributeError as error:
self.assertEqual( str( error ), "_name or name is already defined as a member" )
# # visited attr
self.assertEqual( hasattr( edge, "visited" ), True )
self.assertEqual( edge.visited, False ) #pylint: disable=no-member
# # ro attrs
for k, v in self.roAttrs.items():
self.assertEqual( hasattr( edge, k ), True )
self.assertEqual( getattr( edge, k ), v )
try:
setattr( edge, k, "new value" )
except AttributeError as error:
self.assertEqual( str( error ), "can't set attribute" )
# # rw attrs
for k, v in self.rwAttrs.items():
self.assertEqual( hasattr( edge, k ), True )
self.assertEqual( getattr( edge, k ), v )
setattr( edge, k, "new value" )
self.assertEqual( getattr( edge, k ), "new value" )
# # start and end
self.assertEqual( edge.fromNode, self.fromNode ) #pylint: disable=no-member
self.assertEqual( edge.toNode, self.toNode ) #pylint: disable=no-member
# # in fromNode, not in toNode
self.assertEqual( edge in self.fromNode, True )
self.assertEqual( edge not in self.toNode, True )
clock = 0
########################################################################
class GraphTests( unittest.TestCase ):
"""
.. class:: GraphTests
"""
def setUp( self ):
""" setup test case """
self.nodes = [ Node( "1" ), Node( "2" ), Node( "3" ) ]
self.edges = [ self.nodes[0].connect( self.nodes[1] ),
self.nodes[0].connect( self.nodes[2] ) ]
self.aloneNode = Node( "4" )
def tearDown( self ):
""" clean up """
del self.nodes
del self.edges
del self.aloneNode
def testGraph( self ):
""" ctor nodes edges connect walk """
# # create graph
gr = Graph( "testGraph", self.nodes, self.edges )
# # nodes and edges
for node in self.nodes:
self.assertEqual( node in gr, True )
for edge in self.edges:
self.assertEqual( edge in gr, True )
self.assertEqual( sorted( self.nodes ), sorted( gr.nodes() ) )
self.assertEqual( sorted( self.edges ), sorted( gr.edges() ) )
# # getNode
for node in self.nodes:
self.assertEqual( gr.getNode( node.name ), node )
# # connect
aloneEdge = gr.connect( self.nodes[0], self.aloneNode )
self.assertEqual( self.aloneNode in gr, True )
self.assertEqual( aloneEdge in gr, True )
# # addNode
anotherNode = Node( "5" )
anotherEdge = anotherNode.connect( self.aloneNode )
gr.addNode( anotherNode )
self.assertEqual( anotherNode in gr, True )
self.assertEqual( anotherEdge in gr, True )
# # walk no nodeFcn
ret = gr.walkAll()
self.assertEqual( ret, {} )
for node in gr.nodes():
self.assertEqual( node.visited, True )
gr.reset()
for node in gr.nodes():
self.assertEqual( node.visited, False )
# # walk with nodeFcn
def nbEdges( node ):
""" dummy node fcn """
return len( node.edges() )
ret = gr.walkAll( nodeFcn = nbEdges )
self.assertEqual( ret, { '1': 3, '2' : 0, '3': 0, '4' : 0, '5': 1 } )
def testDFS( self ):
""" dfs """
global clock
def topoA( graph ):
""" topological sort """
global clock
nodes = graph.nodes()
for node in nodes:
node.makeProperty( "clockA", 0 )
def postVisit( node ):
global clock
node.clockA = clock
clock += 1
graph.dfs( postVisit = postVisit )
nodes = graph.nodes()
nodes.sort( key = lambda node: node.clockA )
return nodes
def topoB( graph ):
""" topological sort """
global clock
nodes = graph.nodes()
for node in nodes:
node.makeProperty( "clockB", 0 )
def postVisit( node ):
global clock
node.clockB = clock
clock += 1
graph.dfsIter( postVisit = postVisit )
nodes = graph.nodes()
nodes.sort( key = lambda node: node.clockB )
return nodes
clock = 0
gr = Graph( "testGraph", self.nodes, self.edges )
gr.addNode( self.aloneNode )
nodesSorted = topoA( gr )
nodes = gr.nodes()
nodes.sort( key = lambda node: node.clockA, reverse = True )
self.assertEqual( nodes, nodesSorted, "topoA sort failed" )
clock = 0
gr = Graph( "testGraph", self.nodes, self.edges )
gr.addNode( self.aloneNode )
gr.reset()
nodesSorted = topoB( gr )
nodes = gr.nodes()
nodes.sort( key = lambda node: node.clockB, reverse = True )
self.assertEqual( nodes, nodesSorted, "topoB sort failed" )
def testBFS( self ):
""" bfs walk """
global clock
def walk( graph ):
""" bfs walk """
global clock
nodes = graph.nodes()
for node in nodes:
node.makeProperty( "clockC", 0 )
def postVisit( node ):
global clock
node.clockC = clock
clock += 1
nodes = graph.bfs( postVisit = postVisit )
nodes.sort( key = lambda node: node.clockC )
return nodes
clock = 0
gr = Graph( "testGraph", self.nodes, self.edges )
gr.addNode( self.aloneNode )
gr.reset()
nodesSorted = walk( gr )
nodes = gr.nodes()
nodes.sort( key = lambda node: node.clockC )
self.assertEqual( nodesSorted, nodes, "bfs failed" )
# # test execution
if __name__ == "__main__":
testLoader = unittest.TestLoader()
tests = ( testLoader.loadTestsFromTestCase( testCase ) for testCase in ( DynamicPropTests,
NodeTests,
EdgeTests,
GraphTests ) )
testSuite = unittest.TestSuite( tests )
unittest.TextTestRunner( verbosity = 3 ).run( testSuite )
|
arrabito/DIRAC
|
Core/Utilities/test/Test_Graph.py
|
Python
|
gpl-3.0
| 10,806
|
[
"DIRAC"
] |
99536279796e66733e5831f3e3f213aa909951c80603de8bf8e2cd53da44e1f7
|
# coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import os.path
import random
import re
import time
import traceback
from .common import InfoExtractor, SearchInfoExtractor
from ..jsinterp import JSInterpreter
from ..swfinterp import SWFInterpreter
from ..compat import (
compat_chr,
compat_HTTPError,
compat_kwargs,
compat_parse_qs,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
)
from ..utils import (
bool_or_none,
clean_html,
error_to_compat_str,
extract_attributes,
ExtractorError,
float_or_none,
get_element_by_attribute,
get_element_by_id,
int_or_none,
mimetype2ext,
orderedSet,
parse_codecs,
parse_duration,
remove_quotes,
remove_start,
smuggle_url,
str_or_none,
str_to_int,
try_get,
unescapeHTML,
unified_strdate,
unsmuggle_url,
uppercase_escape,
url_or_none,
urlencode_postdata,
)
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
_LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup'
_CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge'
_TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
_NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
_PLAYLIST_ID_RE = r'(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}'
def _set_language(self):
self._set_cookie(
'.youtube.com', 'PREF', 'f1=50000000&hl=en',
# YouTube sets the expire time to about two months
expire_time=time.time() + 2 * 30 * 24 * 3600)
def _ids_to_results(self, ids):
return [
self.url_result(vid_id, 'Youtube', video_id=vid_id)
for vid_id in ids]
def _login(self):
"""
Attempt to log in to YouTube.
True is returned if successful or skipped.
False is returned if login failed.
If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
"""
username, password = self._get_login_info()
# No authentication to be performed
if username is None:
if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return True
login_page = self._download_webpage(
self._LOGIN_URL, None,
note='Downloading login page',
errnote='unable to fetch login page', fatal=False)
if login_page is False:
return
login_form = self._hidden_inputs(login_page)
def req(url, f_req, note, errnote):
data = login_form.copy()
data.update({
'pstMsg': 1,
'checkConnection': 'youtube',
'checkedDomains': 'youtube',
'hl': 'en',
'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]',
'f.req': json.dumps(f_req),
'flowName': 'GlifWebSignIn',
'flowEntry': 'ServiceLogin',
# TODO: reverse actual botguard identifier generation algo
'bgRequest': '["identifier",""]',
})
return self._download_json(
url, None, note=note, errnote=errnote,
transform_source=lambda s: re.sub(r'^[^[]*', '', s),
fatal=False,
data=urlencode_postdata(data), headers={
'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
'Google-Accounts-XSRF': 1,
})
def warn(message):
self._downloader.report_warning(message)
lookup_req = [
username,
None, [], None, 'US', None, None, 2, False, True,
[
None, None,
[2, 1, None, 1,
'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
None, [], 4],
1, [None, None, []], None, None, None, True
],
username,
]
lookup_results = req(
self._LOOKUP_URL, lookup_req,
'Looking up account info', 'Unable to look up account info')
if lookup_results is False:
return False
user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str)
if not user_hash:
warn('Unable to extract user hash')
return False
challenge_req = [
user_hash,
None, 1, None, [1, None, None, None, [password, None, True]],
[
None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
1, [None, None, []], None, None, None, True
]]
challenge_results = req(
self._CHALLENGE_URL, challenge_req,
'Logging in', 'Unable to log in')
if challenge_results is False:
return
login_res = try_get(challenge_results, lambda x: x[0][5], list)
if login_res:
login_msg = try_get(login_res, lambda x: x[5], compat_str)
warn(
'Unable to login: %s' % 'Invalid password'
if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg)
return False
res = try_get(challenge_results, lambda x: x[0][-1], list)
if not res:
warn('Unable to extract result entry')
return False
login_challenge = try_get(res, lambda x: x[0][0], list)
if login_challenge:
challenge_str = try_get(login_challenge, lambda x: x[2], compat_str)
if challenge_str == 'TWO_STEP_VERIFICATION':
# SEND_SUCCESS - TFA code has been successfully sent to phone
# QUOTA_EXCEEDED - reached the limit of TFA codes
status = try_get(login_challenge, lambda x: x[5], compat_str)
if status == 'QUOTA_EXCEEDED':
warn('Exceeded the limit of TFA codes, try later')
return False
tl = try_get(challenge_results, lambda x: x[1][2], compat_str)
if not tl:
warn('Unable to extract TL')
return False
tfa_code = self._get_tfa_info('2-step verification code')
if not tfa_code:
warn(
'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
'(Note that only TOTP (Google Authenticator App) codes work at this time.)')
return False
tfa_code = remove_start(tfa_code, 'G-')
tfa_req = [
user_hash, None, 2, None,
[
9, None, None, None, None, None, None, None,
[None, tfa_code, True, 2]
]]
tfa_results = req(
self._TFA_URL.format(tl), tfa_req,
'Submitting TFA code', 'Unable to submit TFA code')
if tfa_results is False:
return False
tfa_res = try_get(tfa_results, lambda x: x[0][5], list)
if tfa_res:
tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str)
warn(
'Unable to finish TFA: %s' % 'Invalid TFA code'
if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg)
return False
check_cookie_url = try_get(
tfa_results, lambda x: x[0][-1][2], compat_str)
else:
CHALLENGES = {
'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.",
'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.',
'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.",
}
challenge = CHALLENGES.get(
challenge_str,
'%s returned error %s.' % (self.IE_NAME, challenge_str))
warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge)
return False
else:
check_cookie_url = try_get(res, lambda x: x[2], compat_str)
if not check_cookie_url:
warn('Unable to extract CheckCookie URL')
return False
check_cookie_results = self._download_webpage(
check_cookie_url, None, 'Checking cookie', fatal=False)
if check_cookie_results is False:
return False
if 'https://myaccount.google.com/' not in check_cookie_results:
warn('Unable to log in')
return False
return True
def _download_webpage_handle(self, *args, **kwargs):
query = kwargs.get('query', {}).copy()
query['disable_polymer'] = 'true'
kwargs['query'] = query
return super(YoutubeBaseInfoExtractor, self)._download_webpage_handle(
*args, **compat_kwargs(kwargs))
def _real_initialize(self):
if self._downloader is None:
return
self._set_language()
if not self._login():
return
class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor):
# Extract entries from page with "Load more" button
def _entries(self, page, playlist_id):
more_widget_html = content_html = page
for page_num in itertools.count(1):
for entry in self._process_page(content_html):
yield entry
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
count = 0
retries = 3
while count <= retries:
try:
# Downloading page may result in intermittent 5xx HTTP error
# that is usually worked around with a retry
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), playlist_id,
'Downloading page #%s%s'
% (page_num, ' (retry #%d)' % count if count else ''),
transform_source=uppercase_escape)
break
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503):
count += 1
if count <= retries:
continue
raise
content_html = more['content_html']
if not content_html.strip():
# Some webpages show a "Load more" button but they don't
# have more videos
break
more_widget_html = more['load_more_widget_html']
class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
def _process_page(self, content):
for video_id, video_title in self.extract_videos_from_page(content):
yield self.url_result(video_id, 'Youtube', video_id, video_title)
def extract_videos_from_page_impl(self, video_re, page, ids_in_page, titles_in_page):
for mobj in re.finditer(video_re, page):
# The link with index 0 is not the first video of the playlist (not sure if still actual)
if 'index' in mobj.groupdict() and mobj.group('id') == '0':
continue
video_id = mobj.group('id')
video_title = unescapeHTML(
mobj.group('title')) if 'title' in mobj.groupdict() else None
if video_title:
video_title = video_title.strip()
if video_title == '► Play all':
video_title = None
try:
idx = ids_in_page.index(video_id)
if video_title and not titles_in_page[idx]:
titles_in_page[idx] = video_title
except ValueError:
ids_in_page.append(video_id)
titles_in_page.append(video_title)
def extract_videos_from_page(self, page):
ids_in_page = []
titles_in_page = []
self.extract_videos_from_page_impl(
self._VIDEO_RE, page, ids_in_page, titles_in_page)
return zip(ids_in_page, titles_in_page)
class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
def _process_page(self, content):
for playlist_id in orderedSet(re.findall(
r'<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*><a[^>]+href="/?playlist\?list=([0-9A-Za-z-_]{10,})"',
content)):
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title = self._og_search_title(webpage, fatal=False)
return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title)
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com/|
(?:www\.)?deturl\.com/www\.youtube\.com/|
(?:www\.)?pwnyoutube\.com/|
(?:www\.)?hooktube\.com/|
(?:www\.)?yourepeat\.com/|
tube\.majestyc\.net/|
# Invidious instances taken from https://github.com/omarroth/invidious/wiki/Invidious-Instances
(?:(?:www|dev)\.)?invidio\.us/|
(?:(?:www|no)\.)?invidiou\.sh/|
(?:(?:www|fi|de)\.)?invidious\.snopyta\.org/|
(?:www\.)?invidious\.kabi\.tk/|
(?:www\.)?invidious\.13ad\.de/|
(?:www\.)?invidious\.mastodon\.host/|
(?:www\.)?invidious\.nixnet\.xyz/|
(?:www\.)?invidious\.drycat\.fr/|
(?:www\.)?tube\.poal\.co/|
(?:www\.)?vid\.wxzm\.sx/|
(?:www\.)?yewtu\.be/|
(?:www\.)?yt\.elukerio\.org/|
(?:www\.)?yt\.lelux\.fi/|
(?:www\.)?invidious\.ggc-project\.de/|
(?:www\.)?yt\.maisputain\.ovh/|
(?:www\.)?invidious\.13ad\.de/|
(?:www\.)?invidious\.toot\.koeln/|
(?:www\.)?invidious\.fdn\.fr/|
(?:www\.)?watch\.nettohikari\.com/|
(?:www\.)?kgg2m7yk5aybusll\.onion/|
(?:www\.)?qklhadlycap4cnod\.onion/|
(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion/|
(?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion/|
(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion/|
(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion/|
(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p/|
(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion/|
youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY)
v=
)
))
|(?:
youtu\.be| # just youtu.be/xxxx
vid\.plus| # or vid.plus/xxxx
zwearz\.com/watch| # or zwearz.com/watch/xxxx
)/
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?!.*?\blist=
(?:
%(playlist_id)s| # combined list/video URLs are handled by the playlist IE
WL # WL are handled by the watch later IE
)
)
(?(1).+)? # if we found the ID, everything can follow
$""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_PLAYER_INFO_RE = (
r'/(?P<id>[a-zA-Z0-9_-]{8,})/player_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?/base\.(?P<ext>[a-z]+)$',
r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.(?P<ext>[a-z]+)$',
)
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
'17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
'18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
'22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
'36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
'37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# 3D videos
'82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
'101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
'102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
# Apple HTTP Live Streaming
'91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
# DASH mp4 video
'133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
'134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
'135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
'137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
'138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
'212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
# Dash mp4 audio
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
'256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
'328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
# Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
# itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
# Dash webm audio
'171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
'172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
# Dash webm audio with opus inside
'249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
'250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
'251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
# av01 video only formats sometimes served with "unknown" codecs
'394': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'395': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'396': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
}
_SUBTITLE_FORMATS = ('srv1', 'srv2', 'srv3', 'ttml', 'vtt')
_GEO_BYPASS = False
IE_NAME = 'youtube'
_TESTS = [
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'dislike_count': int,
'start_time': 1,
'end_time': 9,
}
},
{
'url': 'https://www.youtube.com/watch?v=UxxajLWwzqY',
'note': 'Test generic use_cipher_signature video (#897)',
'info_dict': {
'id': 'UxxajLWwzqY',
'ext': 'mp4',
'upload_date': '20120506',
'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
'alt_title': 'I Love It (feat. Charli XCX)',
'description': 'md5:19a2f98d9032b9311e686ed039564f63',
'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
'iconic ep', 'iconic', 'love', 'it'],
'duration': 180,
'uploader': 'Icona Pop',
'uploader_id': 'IconaPop',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IconaPop',
'creator': 'Icona Pop',
'track': 'I Love It (feat. Charli XCX)',
'artist': 'Icona Pop',
}
},
{
'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
'note': 'Test VEVO video with age protection (#956)',
'info_dict': {
'id': '07FYdnEawAQ',
'ext': 'mp4',
'upload_date': '20130703',
'title': 'Justin Timberlake - Tunnel Vision (Official Music Video) (Explicit)',
'alt_title': 'Tunnel Vision',
'description': 'md5:07dab3356cde4199048e4c7cd93471e1',
'duration': 419,
'uploader': 'justintimberlakeVEVO',
'uploader_id': 'justintimberlakeVEVO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/justintimberlakeVEVO',
'creator': 'Justin Timberlake',
'track': 'Tunnel Vision',
'artist': 'Justin Timberlake',
'age_limit': 18,
}
},
{
'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
'note': 'Embed-only video (#1746)',
'info_dict': {
'id': 'yZIXLfi8CZQ',
'ext': 'mp4',
'upload_date': '20120608',
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
'uploader_id': 'setindia',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
'age_limit': 18,
}
},
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=UxxajLWwzqY',
'note': 'Use the first video ID in the URL',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
'id': 'a9LDPn-MO4I',
'ext': 'm4a',
'upload_date': '20121002',
'uploader_id': '8KVIDEO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'title': 'UHDTV TEST 8K VIDEO.mp4'
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
'skip': 'format 141 not served anymore',
},
# DASH manifest with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
'info_dict': {
'id': 'IB3lcPjvWLA',
'ext': 'm4a',
'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
'duration': 244,
'uploader': 'AfrojackVEVO',
'uploader_id': 'AfrojackVEVO',
'upload_date': '20131011',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# JS player signature function name containing $
{
'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
'info_dict': {
'id': 'nfWlot6h_JM',
'ext': 'm4a',
'title': 'Taylor Swift - Shake It Off',
'description': 'md5:307195cd21ff7fa352270fe884570ef0',
'duration': 242,
'uploader': 'TaylorSwiftVEVO',
'uploader_id': 'TaylorSwiftVEVO',
'upload_date': '20140818',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# Controversy video
{
'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
'info_dict': {
'id': 'T4XJQO3qol8',
'ext': 'mp4',
'duration': 219,
'upload_date': '20100909',
'uploader': 'Amazing Atheist',
'uploader_id': 'TheAmazingAtheist',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
'title': 'Burning Everyone\'s Koran',
'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
}
},
# Normal age-gate video (No vevo, embed allowed)
{
'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
'info_dict': {
'id': 'HtVdAasjOgU',
'ext': 'mp4',
'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
'duration': 142,
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
'upload_date': '20140605',
'age_limit': 18,
},
},
# Age-gate video with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=6kLq3WMV1nU',
'info_dict': {
'id': '6kLq3WMV1nU',
'ext': 'mp4',
'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
'duration': 246,
'uploader': 'LloydVEVO',
'uploader_id': 'LloydVEVO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/LloydVEVO',
'upload_date': '20110629',
'age_limit': 18,
},
},
# video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
# YouTube Red ad is not captured for creator
{
'url': '__2ABJjxzNo',
'info_dict': {
'id': '__2ABJjxzNo',
'ext': 'mp4',
'duration': 266,
'upload_date': '20100430',
'uploader_id': 'deadmau5',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
'creator': 'Dada Life, deadmau5',
'description': 'md5:12c56784b8032162bb936a5f76d55360',
'uploader': 'deadmau5',
'title': 'Deadmau5 - Some Chords (HD)',
'alt_title': 'This Machine Kills Some Chords',
},
'expected_warnings': [
'DASH manifest missing',
]
},
# Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
{
'url': 'lqQg6PlCWgI',
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
'duration': 6085,
'upload_date': '20150827',
'uploader_id': 'olympic',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympic',
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
},
'params': {
'skip_download': 'requires avconv',
}
},
# Non-square pixels
{
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
'info_dict': {
'id': '_b-2C3KPAM0',
'ext': 'mp4',
'stretched_ratio': 16 / 9.,
'duration': 85,
'upload_date': '20110310',
'uploader_id': 'AllenMeow',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫ᄋᄅ',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
},
},
# url_encoded_fmt_stream_map is empty string
{
'url': 'qEJwOuvDf7I',
'info_dict': {
'id': 'qEJwOuvDf7I',
'ext': 'webm',
'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
'description': '',
'upload_date': '20150404',
'uploader_id': 'spbelect',
'uploader': 'Наблюдатели Петербурга',
},
'params': {
'skip_download': 'requires avconv',
},
'skip': 'This live event has ended.',
},
# Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
{
'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
'info_dict': {
'id': 'FIl7x6_3R5Y',
'ext': 'webm',
'title': 'md5:7b81415841e02ecd4313668cde88737a',
'description': 'md5:116377fd2963b81ec4ce64b542173306',
'duration': 220,
'upload_date': '20150625',
'uploader_id': 'dorappi2000',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
'uploader': 'dorappi2000',
'formats': 'mincount:31',
},
'skip': 'not actual anymore',
},
# DASH manifest with segment_list
{
'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
'md5': '8ce563a1d667b599d21064e982ab9e31',
'info_dict': {
'id': 'CsmdDsKjzN8',
'ext': 'mp4',
'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
'uploader': 'Airtek',
'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '135', # bestvideo
},
'skip': 'This live event has ended.',
},
{
# Multifeed videos (multiple cameras), URL is for Main Camera
'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
'info_dict': {
'id': 'jqWvoWXjCVs',
'title': 'teamPGP: Rocket League Noob Stream',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
},
'playlist': [{
'info_dict': {
'id': 'jqWvoWXjCVs',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7335,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': '6h8e8xoXJzg',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7337,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': 'PUOgX5z9xZw',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7337,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': 'teuwxikvS5k',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (zim)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7334,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}],
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
'info_dict': {
'id': 'gVfLd0zydlo',
'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
},
'playlist_count': 2,
'skip': 'Not multifeed anymore',
},
{
'url': 'https://vid.plus/FlRa-iH7PGw',
'only_matching': True,
},
{
'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
'only_matching': True,
},
{
# Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
# Also tests cut-off URL expansion in video description (see
# https://github.com/ytdl-org/youtube-dl/issues/1892,
# https://github.com/ytdl-org/youtube-dl/issues/8164)
'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
'info_dict': {
'id': 'lsguqyKfVQg',
'ext': 'mp4',
'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
'alt_title': 'Dark Walk - Position Music',
'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
'duration': 133,
'upload_date': '20151119',
'uploader_id': 'IronSoulElf',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
'uploader': 'IronSoulElf',
'creator': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
'track': 'Dark Walk - Position Music',
'artist': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
},
'params': {
'skip_download': True,
},
},
{
# Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
'only_matching': True,
},
{
# Video with yt:stretch=17:0
'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
'info_dict': {
'id': 'Q39EVAstoRM',
'ext': 'mp4',
'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
'description': 'md5:ee18a25c350637c8faff806845bddee9',
'upload_date': '20151107',
'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
'uploader': 'CH GAMER DROID',
},
'params': {
'skip_download': True,
},
'skip': 'This video does not exist.',
},
{
# Video licensed under Creative Commons
'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
'info_dict': {
'id': 'M4gD1WSo5mA',
'ext': 'mp4',
'title': 'md5:e41008789470fc2533a3252216f1c1d1',
'description': 'md5:a677553cf0840649b731a3024aeff4cc',
'duration': 721,
'upload_date': '20150127',
'uploader_id': 'BerkmanCenter',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
'uploader': 'The Berkman Klein Center for Internet & Society',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
# Channel-like uploader_url
'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
'info_dict': {
'id': 'eQcmzGIKrzg',
'ext': 'mp4',
'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
'description': 'md5:dda0d780d5a6e120758d1711d062a867',
'duration': 4060,
'upload_date': '20151119',
'uploader': 'Bernie Sanders',
'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY',
'only_matching': True,
},
{
# YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
'only_matching': True,
},
{
# Rental video preview
'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
'info_dict': {
'id': 'uGpuVWrhIzE',
'ext': 'mp4',
'title': 'Piku - Trailer',
'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
'upload_date': '20150811',
'uploader': 'FlixMatrix',
'uploader_id': 'FlixMatrixKaravan',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
'license': 'Standard YouTube License',
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# YouTube Red video with episode data
'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
'info_dict': {
'id': 'iqKdEhx-dD4',
'ext': 'mp4',
'title': 'Isolation - Mind Field (Ep 1)',
'description': 'md5:46a29be4ceffa65b92d277b93f463c0f',
'duration': 2085,
'upload_date': '20170118',
'uploader': 'Vsauce',
'uploader_id': 'Vsauce',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
'series': 'Mind Field',
'season_number': 1,
'episode_number': 1,
},
'params': {
'skip_download': True,
},
'expected_warnings': [
'Skipping DASH manifest',
],
},
{
# The following content has been identified by the YouTube community
# as inappropriate or offensive to some audiences.
'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
'info_dict': {
'id': '6SJNVb0GnPI',
'ext': 'mp4',
'title': 'Race Differences in Intelligence',
'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
'duration': 965,
'upload_date': '20140124',
'uploader': 'New Century Foundation',
'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
},
'params': {
'skip_download': True,
},
},
{
# itag 212
'url': '1t24XAntNCY',
'only_matching': True,
},
{
# geo restricted to JP
'url': 'sJL6WA-aGkQ',
'only_matching': True,
},
{
'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
'only_matching': True,
},
{
'url': 'https://invidio.us/watch?v=BaW_jenozKc',
'only_matching': True,
},
{
# DRM protected
'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
'only_matching': True,
},
{
# Video with unsupported adaptive stream type formats
'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
'info_dict': {
'id': 'Z4Vy8R84T1U',
'ext': 'mp4',
'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'duration': 433,
'upload_date': '20130923',
'uploader': 'Amelia Putri Harwita',
'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
'formats': 'maxcount:10',
},
'params': {
'skip_download': True,
'youtube_include_dash_manifest': False,
},
'skip': 'not actual anymore',
},
{
# Youtube Music Auto-generated description
'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
'info_dict': {
'id': 'MgNrAu2pzNs',
'ext': 'mp4',
'title': 'Voyeur Girl',
'description': 'md5:7ae382a65843d6df2685993e90a8628f',
'upload_date': '20190312',
'uploader': 'Stephen - Topic',
'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
'artist': 'Stephen',
'track': 'Voyeur Girl',
'album': 'it\'s too much love to know my dear',
'release_date': '20190313',
'release_year': 2019,
},
'params': {
'skip_download': True,
},
},
{
# Youtube Music Auto-generated description
# Retrieve 'artist' field from 'Artist:' in video description
# when it is present on youtube music video
'url': 'https://www.youtube.com/watch?v=k0jLE7tTwjY',
'info_dict': {
'id': 'k0jLE7tTwjY',
'ext': 'mp4',
'title': 'Latch Feat. Sam Smith',
'description': 'md5:3cb1e8101a7c85fcba9b4fb41b951335',
'upload_date': '20150110',
'uploader': 'Various Artists - Topic',
'uploader_id': 'UCNkEcmYdjrH4RqtNgh7BZ9w',
'artist': 'Disclosure',
'track': 'Latch Feat. Sam Smith',
'album': 'Latch Featuring Sam Smith',
'release_date': '20121008',
'release_year': 2012,
},
'params': {
'skip_download': True,
},
},
{
# Youtube Music Auto-generated description
# handle multiple artists on youtube music video
'url': 'https://www.youtube.com/watch?v=74qn0eJSjpA',
'info_dict': {
'id': '74qn0eJSjpA',
'ext': 'mp4',
'title': 'Eastside',
'description': 'md5:290516bb73dcbfab0dcc4efe6c3de5f2',
'upload_date': '20180710',
'uploader': 'Benny Blanco - Topic',
'uploader_id': 'UCzqz_ksRu_WkIzmivMdIS7A',
'artist': 'benny blanco, Halsey, Khalid',
'track': 'Eastside',
'album': 'Eastside',
'release_date': '20180713',
'release_year': 2018,
},
'params': {
'skip_download': True,
},
},
{
# Youtube Music Auto-generated description
# handle youtube music video with release_year and no release_date
'url': 'https://www.youtube.com/watch?v=-hcAI0g-f5M',
'info_dict': {
'id': '-hcAI0g-f5M',
'ext': 'mp4',
'title': 'Put It On Me',
'description': 'md5:f6422397c07c4c907c6638e1fee380a5',
'upload_date': '20180426',
'uploader': 'Matt Maeson - Topic',
'uploader_id': 'UCnEkIGqtGcQMLk73Kp-Q5LQ',
'artist': 'Matt Maeson',
'track': 'Put It On Me',
'album': 'The Hearse',
'release_date': None,
'release_year': 2018,
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
'only_matching': True,
},
{
# invalid -> valid video id redirection
'url': 'DJztXj2GPfl',
'info_dict': {
'id': 'DJztXj2GPfk',
'ext': 'mp4',
'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
'description': 'md5:bf577a41da97918e94fa9798d9228825',
'upload_date': '20090125',
'uploader': 'Prochorowka',
'uploader_id': 'Prochorowka',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
'artist': 'Panjabi MC',
'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
'album': 'Beware of the Boys (Mundian To Bach Ke)',
},
'params': {
'skip_download': True,
},
}
]
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._player_cache = {}
def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage."""
self.to_screen('%s: Downloading video info webpage' % video_id)
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
self.to_screen('%s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format):
"""Report extracted video URL."""
self.to_screen('%s: Format %s not available' % (video_id, format))
def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol."""
self.to_screen('RTMP download detected')
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
@classmethod
def _extract_player_info(cls, player_url):
for player_re in cls._PLAYER_INFO_RE:
id_m = re.search(player_re, player_url)
if id_m:
break
else:
raise ExtractorError('Cannot identify player %r' % player_url)
return id_m.group('ext'), id_m.group('id')
def _extract_signature_function(self, video_id, player_url, example_sig):
player_type, player_id = self._extract_player_info(player_url)
# Read from filesystem cache
func_id = '%s_%s_%s' % (
player_type, player_id, self._signature_cache_id(example_sig))
assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
download_note = (
'Downloading player %s' % player_url
if self._downloader.params.get('verbose') else
'Downloading %s player %s' % (player_type, player_id)
)
if player_type == 'js':
code = self._download_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
res = self._parse_sig_js(code)
elif player_type == 'swf':
urlh = self._request_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
code = urlh.read()
res = self._parse_sig_swf(code)
else:
assert False, 'Invalid player type %r' % player_type
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps)
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
(r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
# Obsolete patterns
r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
jscode, 'Initial JS player signature function name', group='sig')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _parse_sig_swf(self, file_contents):
swfi = SWFInterpreter(file_contents)
TARGET_CLASSNAME = 'SignatureDecipher'
searched_class = swfi.extract_class(TARGET_CLASSNAME)
initial_function = swfi.extract_function(searched_class, 'decipher')
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt signature without player_url')
if player_url.startswith('//'):
player_url = 'https:' + player_url
elif not re.match(r'https?://', player_url):
player_url = compat_urlparse.urljoin(
'https://www.youtube.com', player_url)
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
if self._downloader.params.get('youtube_print_sig_code'):
self._print_sig_code(func, s)
return func(s)
except Exception as e:
tb = traceback.format_exc()
raise ExtractorError(
'Signature extraction failed: ' + tb, cause=e)
def _get_subtitles(self, video_id, webpage):
try:
subs_doc = self._download_xml(
'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
return {}
sub_lang_list = {}
for track in subs_doc.findall('track'):
lang = track.attrib['lang_code']
if lang in sub_lang_list:
continue
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse_urlencode({
'lang': lang,
'v': video_id,
'fmt': ext,
'name': track.attrib['name'].encode('utf-8'),
})
sub_formats.append({
'url': 'https://www.youtube.com/api/timedtext?' + params,
'ext': ext,
})
sub_lang_list[lang] = sub_formats
if not sub_lang_list:
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
return sub_lang_list
def _get_ytplayer_config(self, video_id, webpage):
patterns = (
# User data may contain arbitrary character sequences that may affect
# JSON extraction with regex, e.g. when '};' is contained the second
# regex won't capture the whole JSON. Yet working around by trying more
# concrete regex first keeping in mind proper quoted string handling
# to be implemented in future that will replace this workaround (see
# https://github.com/ytdl-org/youtube-dl/issues/7468,
# https://github.com/ytdl-org/youtube-dl/pull/7599)
r';ytplayer\.config\s*=\s*({.+?});ytplayer',
r';ytplayer\.config\s*=\s*({.+?});',
)
config = self._search_regex(
patterns, webpage, 'ytplayer.config', default=None)
if config:
return self._parse_json(
uppercase_escape(config), video_id, fatal=False)
def _get_automatic_captions(self, video_id, webpage):
"""We need the webpage for getting the captions url, pass it as an
argument to speed up the process."""
self.to_screen('%s: Looking for automatic captions' % video_id)
player_config = self._get_ytplayer_config(video_id, webpage)
err_msg = 'Couldn\'t find automatic captions for %s' % video_id
if not player_config:
self._downloader.report_warning(err_msg)
return {}
try:
args = player_config['args']
caption_url = args.get('ttsurl')
if caption_url:
timestamp = args['timestamp']
# We get the available subtitles
list_params = compat_urllib_parse_urlencode({
'type': 'list',
'tlangs': 1,
'asrs': 1,
})
list_url = caption_url + '&' + list_params
caption_list = self._download_xml(list_url, video_id)
original_lang_node = caption_list.find('track')
if original_lang_node is None:
self._downloader.report_warning('Video doesn\'t have automatic captions')
return {}
original_lang = original_lang_node.attrib['lang_code']
caption_kind = original_lang_node.attrib.get('kind', '')
sub_lang_list = {}
for lang_node in caption_list.findall('target'):
sub_lang = lang_node.attrib['lang_code']
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse_urlencode({
'lang': original_lang,
'tlang': sub_lang,
'fmt': ext,
'ts': timestamp,
'kind': caption_kind,
})
sub_formats.append({
'url': caption_url + '&' + params,
'ext': ext,
})
sub_lang_list[sub_lang] = sub_formats
return sub_lang_list
def make_captions(sub_url, sub_langs):
parsed_sub_url = compat_urllib_parse_urlparse(sub_url)
caption_qs = compat_parse_qs(parsed_sub_url.query)
captions = {}
for sub_lang in sub_langs:
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
caption_qs.update({
'tlang': [sub_lang],
'fmt': [ext],
})
sub_url = compat_urlparse.urlunparse(parsed_sub_url._replace(
query=compat_urllib_parse_urlencode(caption_qs, True)))
sub_formats.append({
'url': sub_url,
'ext': ext,
})
captions[sub_lang] = sub_formats
return captions
# New captions format as of 22.06.2017
player_response = args.get('player_response')
if player_response and isinstance(player_response, compat_str):
player_response = self._parse_json(
player_response, video_id, fatal=False)
if player_response:
renderer = player_response['captions']['playerCaptionsTracklistRenderer']
base_url = renderer['captionTracks'][0]['baseUrl']
sub_lang_list = []
for lang in renderer['translationLanguages']:
lang_code = lang.get('languageCode')
if lang_code:
sub_lang_list.append(lang_code)
return make_captions(base_url, sub_lang_list)
# Some videos don't provide ttsurl but rather caption_tracks and
# caption_translation_languages (e.g. 20LmZk1hakA)
# Does not used anymore as of 22.06.2017
caption_tracks = args['caption_tracks']
caption_translation_languages = args['caption_translation_languages']
caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
sub_lang_list = []
for lang in caption_translation_languages.split(','):
lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
sub_lang = lang_qs.get('lc', [None])[0]
if sub_lang:
sub_lang_list.append(sub_lang)
return make_captions(caption_url, sub_lang_list)
# An extractor error can be raise by the download process if there are
# no automatic captions but there are subtitles
except (KeyError, IndexError, ExtractorError):
self._downloader.report_warning(err_msg)
return {}
def _mark_watched(self, video_id, video_info, player_response):
playback_url = url_or_none(try_get(
player_response,
lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl']) or try_get(
video_info, lambda x: x['videostats_playback_base_url'][0]))
if not playback_url:
return
parsed_playback_url = compat_urlparse.urlparse(playback_url)
qs = compat_urlparse.parse_qs(parsed_playback_url.query)
# cpn generation algorithm is reverse engineered from base.js.
# In fact it works even with dummy cpn.
CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
qs.update({
'ver': ['2'],
'cpn': [cpn],
})
playback_url = compat_urlparse.urlunparse(
parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
self._download_webpage(
playback_url, video_id, 'Marking watched',
'Unable to mark watched', fatal=False)
@staticmethod
def _extract_urls(webpage):
# Embedded YouTube player
entries = [
unescapeHTML(mobj.group('url'))
for mobj in re.finditer(r'''(?x)
(?:
<iframe[^>]+?src=|
data-video-url=|
<embed[^>]+?src=|
embedSWF\(?:\s*|
<object[^>]+data=|
new\s+SWFObject\(
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
\1''', webpage)]
# lazyYT YouTube embed
entries.extend(list(map(
unescapeHTML,
re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
# Wordpress "YouTube Video Importer" plugin
matches = re.findall(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
entries.extend(m[-1] for m in matches)
return entries
@staticmethod
def _extract_url(webpage):
urls = YoutubeIE._extract_urls(webpage)
return urls[0] if urls else None
@classmethod
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(2)
return video_id
@staticmethod
def _extract_chapters(description, duration):
if not description:
return None
chapter_lines = re.findall(
r'(?:^|<br\s*/>)([^<]*<a[^>]+onclick=["\']yt\.www\.watch\.player\.seekTo[^>]+>(\d{1,2}:\d{1,2}(?::\d{1,2})?)</a>[^>]*)(?=$|<br\s*/>)',
description)
if not chapter_lines:
return None
chapters = []
for next_num, (chapter_line, time_point) in enumerate(
chapter_lines, start=1):
start_time = parse_duration(time_point)
if start_time is None:
continue
if start_time > duration:
break
end_time = (duration if next_num == len(chapter_lines)
else parse_duration(chapter_lines[next_num][1]))
if end_time is None:
continue
if end_time > duration:
end_time = duration
if start_time > end_time:
break
chapter_title = re.sub(
r'<a[^>]+>[^<]+</a>', '', chapter_line).strip(' \t-')
chapter_title = re.sub(r'\s+', ' ', chapter_title)
chapters.append({
'start_time': start_time,
'end_time': end_time,
'title': chapter_title,
})
return chapters
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
proto = (
'http' if self._downloader.params.get('prefer_insecure', False)
else 'https')
start_time = None
end_time = None
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
query = compat_parse_qs(component)
if start_time is None and 't' in query:
start_time = parse_duration(query['t'][0])
if start_time is None and 'start' in query:
start_time = parse_duration(query['start'][0])
if end_time is None and 'end' in query:
end_time = parse_duration(query['end'][0])
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
video_id = self.extract_id(url)
# Get video webpage
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
video_webpage, urlh = self._download_webpage_handle(url, video_id)
qs = compat_parse_qs(compat_urllib_parse_urlparse(urlh.geturl()).query)
video_id = qs.get('v', [None])[0] or video_id
# Attempt to extract SWF player URL
mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
if mobj is not None:
player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
else:
player_url = None
dash_mpds = []
def add_dash_mpd(video_info):
dash_mpd = video_info.get('dashmpd')
if dash_mpd and dash_mpd[0] not in dash_mpds:
dash_mpds.append(dash_mpd[0])
def add_dash_mpd_pr(pl_response):
dash_mpd = url_or_none(try_get(
pl_response, lambda x: x['streamingData']['dashManifestUrl'],
compat_str))
if dash_mpd and dash_mpd not in dash_mpds:
dash_mpds.append(dash_mpd)
is_live = None
view_count = None
def extract_view_count(v_info):
return int_or_none(try_get(v_info, lambda x: x['view_count'][0]))
def extract_player_response(player_response, video_id):
pl_response = str_or_none(player_response)
if not pl_response:
return
pl_response = self._parse_json(pl_response, video_id, fatal=False)
if isinstance(pl_response, dict):
add_dash_mpd_pr(pl_response)
return pl_response
player_response = {}
# Get video info
video_info = {}
embed_webpage = None
if re.search(r'player-age-gate-content">', video_webpage) is not None:
age_gate = True
# We simulate the access to the video from www.youtube.com/v/{video_id}
# this can be viewed without login into Youtube
url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
data = compat_urllib_parse_urlencode({
'video_id': video_id,
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
'sts': self._search_regex(
r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
})
video_info_url = proto + '://www.youtube.com/get_video_info?' + data
try:
video_info_webpage = self._download_webpage(
video_info_url, video_id,
note='Refetching age-gated info webpage',
errnote='unable to download video info webpage')
except ExtractorError:
video_info_webpage = None
if video_info_webpage:
video_info = compat_parse_qs(video_info_webpage)
pl_response = video_info.get('player_response', [None])[0]
player_response = extract_player_response(pl_response, video_id)
add_dash_mpd(video_info)
view_count = extract_view_count(video_info)
else:
age_gate = False
# Try looking directly into the video webpage
ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
if ytplayer_config:
args = ytplayer_config['args']
if args.get('url_encoded_fmt_stream_map') or args.get('hlsvp'):
# Convert to the same format returned by compat_parse_qs
video_info = dict((k, [v]) for k, v in args.items())
add_dash_mpd(video_info)
# Rental video is not rented but preview is available (e.g.
# https://www.youtube.com/watch?v=yYr8q0y5Jfg,
# https://github.com/ytdl-org/youtube-dl/issues/10532)
if not video_info and args.get('ypc_vid'):
return self.url_result(
args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid'])
if args.get('livestream') == '1' or args.get('live_playback') == 1:
is_live = True
if not player_response:
player_response = extract_player_response(args.get('player_response'), video_id)
if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
add_dash_mpd_pr(player_response)
def extract_unavailable_message():
messages = []
for tag, kind in (('h1', 'message'), ('div', 'submessage')):
msg = self._html_search_regex(
r'(?s)<{tag}[^>]+id=["\']unavailable-{kind}["\'][^>]*>(.+?)</{tag}>'.format(tag=tag, kind=kind),
video_webpage, 'unavailable %s' % kind, default=None)
if msg:
messages.append(msg)
if messages:
return '\n'.join(messages)
if not video_info and not player_response:
unavailable_message = extract_unavailable_message()
if not unavailable_message:
unavailable_message = 'Unable to extract video data'
raise ExtractorError(
'YouTube said: %s' % unavailable_message, expected=True, video_id=video_id)
if not isinstance(video_info, dict):
video_info = {}
video_details = try_get(
player_response, lambda x: x['videoDetails'], dict) or {}
video_title = video_info.get('title', [None])[0] or video_details.get('title')
if not video_title:
self._downloader.report_warning('Unable to extract video title')
video_title = '_'
description_original = video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
def replace_url(m):
redir_url = compat_urlparse.urljoin(url, m.group(1))
parsed_redir_url = compat_urllib_parse_urlparse(redir_url)
if re.search(r'^(?:www\.)?(?:youtube(?:-nocookie)?\.com|youtu\.be)$', parsed_redir_url.netloc) and parsed_redir_url.path == '/redirect':
qs = compat_parse_qs(parsed_redir_url.query)
q = qs.get('q')
if q and q[0]:
return q[0]
return redir_url
description_original = video_description = re.sub(r'''(?x)
<a\s+
(?:[a-zA-Z-]+="[^"]*"\s+)*?
(?:title|href)="([^"]+)"\s+
(?:[a-zA-Z-]+="[^"]*"\s+)*?
class="[^"]*"[^>]*>
[^<]+\.{3}\s*
</a>
''', replace_url, video_description)
video_description = clean_html(video_description)
else:
video_description = self._html_search_meta('description', video_webpage) or video_details.get('shortDescription')
if not smuggled_data.get('force_singlefeed', False):
if not self._downloader.params.get('noplaylist'):
multifeed_metadata_list = try_get(
player_response,
lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'],
compat_str) or try_get(
video_info, lambda x: x['multifeed_metadata_list'][0], compat_str)
if multifeed_metadata_list:
entries = []
feed_ids = []
for feed in multifeed_metadata_list.split(','):
# Unquote should take place before split on comma (,) since textual
# fields may contain comma as well (see
# https://github.com/ytdl-org/youtube-dl/issues/8536)
feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
def feed_entry(name):
return try_get(feed_data, lambda x: x[name][0], compat_str)
feed_id = feed_entry('id')
if not feed_id:
continue
feed_title = feed_entry('title')
title = video_title
if feed_title:
title += ' (%s)' % feed_title
entries.append({
'_type': 'url_transparent',
'ie_key': 'Youtube',
'url': smuggle_url(
'%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
{'force_singlefeed': True}),
'title': title,
})
feed_ids.append(feed_id)
self.to_screen(
'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
% (', '.join(feed_ids), video_id))
return self.playlist_result(entries, video_id, video_title, video_description)
else:
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
if view_count is None:
view_count = extract_view_count(video_info)
if view_count is None and video_details:
view_count = int_or_none(video_details.get('viewCount'))
if is_live is None:
is_live = bool_or_none(video_details.get('isLive'))
# Check for "rental" videos
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
raise ExtractorError('"rental" videos not supported. See https://github.com/ytdl-org/youtube-dl/issues/359 for more information.', expected=True)
def _extract_filesize(media_url):
return int_or_none(self._search_regex(
r'\bclen[=/](\d+)', media_url, 'filesize', default=None))
streaming_formats = try_get(player_response, lambda x: x['streamingData']['formats'], list) or []
streaming_formats.extend(try_get(player_response, lambda x: x['streamingData']['adaptiveFormats'], list) or [])
if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
self.report_rtmp_download()
formats = [{
'format_id': '_rtmp',
'protocol': 'rtmp',
'url': video_info['conn'][0],
'player_url': player_url,
}]
elif not is_live and (streaming_formats or len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1):
encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
if 'rtmpe%3Dyes' in encoded_url_map:
raise ExtractorError('rtmpe downloads are not supported, see https://github.com/ytdl-org/youtube-dl/issues/343 for more information.', expected=True)
formats = []
formats_spec = {}
fmt_list = video_info.get('fmt_list', [''])[0]
if fmt_list:
for fmt in fmt_list.split(','):
spec = fmt.split('/')
if len(spec) > 1:
width_height = spec[1].split('x')
if len(width_height) == 2:
formats_spec[spec[0]] = {
'resolution': spec[1],
'width': int_or_none(width_height[0]),
'height': int_or_none(width_height[1]),
}
for fmt in streaming_formats:
itag = str_or_none(fmt.get('itag'))
if not itag:
continue
quality = fmt.get('quality')
quality_label = fmt.get('qualityLabel') or quality
formats_spec[itag] = {
'asr': int_or_none(fmt.get('audioSampleRate')),
'filesize': int_or_none(fmt.get('contentLength')),
'format_note': quality_label,
'fps': int_or_none(fmt.get('fps')),
'height': int_or_none(fmt.get('height')),
# bitrate for itag 43 is always 2147483647
'tbr': float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000) if itag != '43' else None,
'width': int_or_none(fmt.get('width')),
}
for fmt in streaming_formats:
if fmt.get('drmFamilies') or fmt.get('drm_families'):
continue
url = url_or_none(fmt.get('url'))
if not url:
cipher = fmt.get('cipher') or fmt.get('signatureCipher')
if not cipher:
continue
url_data = compat_parse_qs(cipher)
url = url_or_none(try_get(url_data, lambda x: x['url'][0], compat_str))
if not url:
continue
else:
cipher = None
url_data = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
stream_type = int_or_none(try_get(url_data, lambda x: x['stream_type'][0]))
# Unsupported FORMAT_STREAM_TYPE_OTF
if stream_type == 3:
continue
format_id = fmt.get('itag') or url_data['itag'][0]
if not format_id:
continue
format_id = compat_str(format_id)
if cipher:
if 's' in url_data or self._downloader.params.get('youtube_include_dash_manifest', True):
ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
jsplayer_url_json = self._search_regex(
ASSETS_RE,
embed_webpage if age_gate else video_webpage,
'JS player URL (1)', default=None)
if not jsplayer_url_json and not age_gate:
# We need the embed website after all
if embed_webpage is None:
embed_url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(
embed_url, video_id, 'Downloading embed webpage')
jsplayer_url_json = self._search_regex(
ASSETS_RE, embed_webpage, 'JS player URL')
player_url = json.loads(jsplayer_url_json)
if player_url is None:
player_url_json = self._search_regex(
r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
video_webpage, 'age gate player URL')
player_url = json.loads(player_url_json)
if 'sig' in url_data:
url += '&signature=' + url_data['sig'][0]
elif 's' in url_data:
encrypted_sig = url_data['s'][0]
if self._downloader.params.get('verbose'):
if player_url is None:
player_desc = 'unknown'
else:
player_type, player_version = self._extract_player_info(player_url)
player_desc = '%s player %s' % ('flash' if player_type == 'swf' else 'html5', player_version)
parts_sizes = self._signature_cache_id(encrypted_sig)
self.to_screen('{%s} signature length %s, %s' %
(format_id, parts_sizes, player_desc))
signature = self._decrypt_signature(
encrypted_sig, video_id, player_url, age_gate)
sp = try_get(url_data, lambda x: x['sp'][0], compat_str) or 'signature'
url += '&%s=%s' % (sp, signature)
if 'ratebypass' not in url:
url += '&ratebypass=yes'
dct = {
'format_id': format_id,
'url': url,
'player_url': player_url,
}
if format_id in self._formats:
dct.update(self._formats[format_id])
if format_id in formats_spec:
dct.update(formats_spec[format_id])
# Some itags are not included in DASH manifest thus corresponding formats will
# lack metadata (see https://github.com/ytdl-org/youtube-dl/pull/5993).
# Trying to extract metadata from url_encoded_fmt_stream_map entry.
mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
if width is None:
width = int_or_none(fmt.get('width'))
if height is None:
height = int_or_none(fmt.get('height'))
filesize = int_or_none(url_data.get(
'clen', [None])[0]) or _extract_filesize(url)
quality = url_data.get('quality', [None])[0] or fmt.get('quality')
quality_label = url_data.get('quality_label', [None])[0] or fmt.get('qualityLabel')
tbr = (float_or_none(url_data.get('bitrate', [None])[0], 1000)
or float_or_none(fmt.get('bitrate'), 1000)) if format_id != '43' else None
fps = int_or_none(url_data.get('fps', [None])[0]) or int_or_none(fmt.get('fps'))
more_fields = {
'filesize': filesize,
'tbr': tbr,
'width': width,
'height': height,
'fps': fps,
'format_note': quality_label or quality,
}
for key, value in more_fields.items():
if value:
dct[key] = value
type_ = url_data.get('type', [None])[0] or fmt.get('mimeType')
if type_:
type_split = type_.split(';')
kind_ext = type_split[0].split('/')
if len(kind_ext) == 2:
kind, _ = kind_ext
dct['ext'] = mimetype2ext(type_split[0])
if kind in ('audio', 'video'):
codecs = None
for mobj in re.finditer(
r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
if mobj.group('key') == 'codecs':
codecs = mobj.group('val')
break
if codecs:
dct.update(parse_codecs(codecs))
if dct.get('acodec') == 'none' or dct.get('vcodec') == 'none':
dct['downloader_options'] = {
# Youtube throttles chunks >~10M
'http_chunk_size': 10485760,
}
formats.append(dct)
else:
manifest_url = (
url_or_none(try_get(
player_response,
lambda x: x['streamingData']['hlsManifestUrl'],
compat_str))
or url_or_none(try_get(
video_info, lambda x: x['hlsvp'][0], compat_str)))
if manifest_url:
formats = []
m3u8_formats = self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', fatal=False)
for a_format in m3u8_formats:
itag = self._search_regex(
r'/itag/(\d+)/', a_format['url'], 'itag', default=None)
if itag:
a_format['format_id'] = itag
if itag in self._formats:
dct = self._formats[itag].copy()
dct.update(a_format)
a_format = dct
a_format['player_url'] = player_url
# Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
formats.append(a_format)
else:
error_message = extract_unavailable_message()
if not error_message:
error_message = clean_html(try_get(
player_response, lambda x: x['playabilityStatus']['reason'],
compat_str))
if not error_message:
error_message = clean_html(
try_get(video_info, lambda x: x['reason'][0], compat_str))
if error_message:
raise ExtractorError(error_message, expected=True)
raise ExtractorError('no conn, hlsvp, hlsManifestUrl or url_encoded_fmt_stream_map information found in video info')
# uploader
video_uploader = try_get(
video_info, lambda x: x['author'][0],
compat_str) or str_or_none(video_details.get('author'))
if video_uploader:
video_uploader = compat_urllib_parse_unquote_plus(video_uploader)
else:
self._downloader.report_warning('unable to extract uploader name')
# uploader_id
video_uploader_id = None
video_uploader_url = None
mobj = re.search(
r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
video_webpage)
if mobj is not None:
video_uploader_id = mobj.group('uploader_id')
video_uploader_url = mobj.group('uploader_url')
else:
self._downloader.report_warning('unable to extract uploader nickname')
channel_id = (
str_or_none(video_details.get('channelId'))
or self._html_search_meta(
'channelId', video_webpage, 'channel id', default=None)
or self._search_regex(
r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
video_webpage, 'channel id', default=None, group='id'))
channel_url = 'http://www.youtube.com/channel/%s' % channel_id if channel_id else None
# thumbnail image
# We try first to get a high quality image:
m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
video_webpage, re.DOTALL)
if m_thumb is not None:
video_thumbnail = m_thumb.group(1)
elif 'thumbnail_url' not in video_info:
self._downloader.report_warning('unable to extract video thumbnail')
video_thumbnail = None
else: # don't panic if we can't find it
video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
# upload date
upload_date = self._html_search_meta(
'datePublished', video_webpage, 'upload date', default=None)
if not upload_date:
upload_date = self._search_regex(
[r'(?s)id="eow-date.*?>(.*?)</span>',
r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'],
video_webpage, 'upload date', default=None)
upload_date = unified_strdate(upload_date)
video_license = self._html_search_regex(
r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
video_webpage, 'license', default=None)
m_music = re.search(
r'''(?x)
<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*
<ul[^>]*>\s*
<li>(?P<title>.+?)
by (?P<creator>.+?)
(?:
\(.+?\)|
<a[^>]*
(?:
\bhref=["\']/red[^>]*>| # drop possible
>\s*Listen ad-free with YouTube Red # YouTube Red ad
)
.*?
)?</li
''',
video_webpage)
if m_music:
video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
video_creator = clean_html(m_music.group('creator'))
else:
video_alt_title = video_creator = None
def extract_meta(field):
return self._html_search_regex(
r'<h4[^>]+class="title"[^>]*>\s*%s\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li>\s*' % field,
video_webpage, field, default=None)
track = extract_meta('Song')
artist = extract_meta('Artist')
album = extract_meta('Album')
# Youtube Music Auto-generated description
release_date = release_year = None
if video_description:
mobj = re.search(r'(?s)Provided to YouTube by [^\n]+\n+(?P<track>[^·]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?', video_description)
if mobj:
if not track:
track = mobj.group('track').strip()
if not artist:
artist = mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·'))
if not album:
album = mobj.group('album'.strip())
release_year = mobj.group('release_year')
release_date = mobj.group('release_date')
if release_date:
release_date = release_date.replace('-', '')
if not release_year:
release_year = int(release_date[:4])
if release_year:
release_year = int(release_year)
m_episode = re.search(
r'<div[^>]+id="watch7-headline"[^>]*>\s*<span[^>]*>.*?>(?P<series>[^<]+)</a></b>\s*S(?P<season>\d+)\s*•\s*E(?P<episode>\d+)</span>',
video_webpage)
if m_episode:
series = unescapeHTML(m_episode.group('series'))
season_number = int(m_episode.group('season'))
episode_number = int(m_episode.group('episode'))
else:
series = season_number = episode_number = None
m_cat_container = self._search_regex(
r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
video_webpage, 'categories', default=None)
if m_cat_container:
category = self._html_search_regex(
r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
default=None)
video_categories = None if category is None else [category]
else:
video_categories = None
video_tags = [
unescapeHTML(m.group('content'))
for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
def _extract_count(count_name):
return str_to_int(self._search_regex(
r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
% re.escape(count_name),
video_webpage, count_name, default=None))
like_count = _extract_count('like')
dislike_count = _extract_count('dislike')
if view_count is None:
view_count = str_to_int(self._search_regex(
r'<[^>]+class=["\']watch-view-count[^>]+>\s*([\d,\s]+)', video_webpage,
'view count', default=None))
average_rating = (
float_or_none(video_details.get('averageRating'))
or try_get(video_info, lambda x: float_or_none(x['avg_rating'][0])))
# subtitles
video_subtitles = self.extract_subtitles(video_id, video_webpage)
automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
video_duration = try_get(
video_info, lambda x: int_or_none(x['length_seconds'][0]))
if not video_duration:
video_duration = int_or_none(video_details.get('lengthSeconds'))
if not video_duration:
video_duration = parse_duration(self._html_search_meta(
'duration', video_webpage, 'video duration'))
# annotations
video_annotations = None
if self._downloader.params.get('writeannotations', False):
xsrf_token = self._search_regex(
r'([\'"])XSRF_TOKEN\1\s*:\s*([\'"])(?P<xsrf_token>[A-Za-z0-9+/=]+)\2',
video_webpage, 'xsrf token', group='xsrf_token', fatal=False)
invideo_url = try_get(
player_response, lambda x: x['annotations'][0]['playerAnnotationsUrlsRenderer']['invideoUrl'], compat_str)
if xsrf_token and invideo_url:
xsrf_field_name = self._search_regex(
r'([\'"])XSRF_FIELD_NAME\1\s*:\s*([\'"])(?P<xsrf_field_name>\w+)\2',
video_webpage, 'xsrf field name',
group='xsrf_field_name', default='session_token')
video_annotations = self._download_webpage(
self._proto_relative_url(invideo_url),
video_id, note='Downloading annotations',
errnote='Unable to download video annotations', fatal=False,
data=urlencode_postdata({xsrf_field_name: xsrf_token}))
chapters = self._extract_chapters(description_original, video_duration)
# Look for the DASH manifest
if self._downloader.params.get('youtube_include_dash_manifest', True):
dash_mpd_fatal = True
for mpd_url in dash_mpds:
dash_formats = {}
try:
def decrypt_sig(mobj):
s = mobj.group(1)
dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
return '/signature/%s' % dec_s
mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
for df in self._extract_mpd_formats(
mpd_url, video_id, fatal=dash_mpd_fatal,
formats_dict=self._formats):
if not df.get('filesize'):
df['filesize'] = _extract_filesize(df['url'])
# Do not overwrite DASH format found in some previous DASH manifest
if df['format_id'] not in dash_formats:
dash_formats[df['format_id']] = df
# Additional DASH manifests may end up in HTTP Error 403 therefore
# allow them to fail without bug report message if we already have
# some DASH manifest succeeded. This is temporary workaround to reduce
# burst of bug reports until we figure out the reason and whether it
# can be fixed at all.
dash_mpd_fatal = False
except (ExtractorError, KeyError) as e:
self.report_warning(
'Skipping DASH manifest: %r' % e, video_id)
if dash_formats:
# Remove the formats we found through non-DASH, they
# contain less info and it can be wrong, because we use
# fixed values (for example the resolution). See
# https://github.com/ytdl-org/youtube-dl/issues/5774 for an
# example.
formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
formats.extend(dash_formats.values())
# Check for malformed aspect ratio
stretched_m = re.search(
r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
video_webpage)
if stretched_m:
w = float(stretched_m.group('w'))
h = float(stretched_m.group('h'))
# yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
# We will only process correct ratios.
if w > 0 and h > 0:
ratio = w / h
for f in formats:
if f.get('vcodec') != 'none':
f['stretched_ratio'] = ratio
if not formats:
if 'reason' in video_info:
if 'The uploader has not made this video available in your country.' in video_info['reason']:
regions_allowed = self._html_search_meta(
'regionsAllowed', video_webpage, default=None)
countries = regions_allowed.split(',') if regions_allowed else None
self.raise_geo_restricted(
msg=video_info['reason'][0], countries=countries)
reason = video_info['reason'][0]
if 'Invalid parameters' in reason:
unavailable_message = extract_unavailable_message()
if unavailable_message:
reason = unavailable_message
raise ExtractorError(
'YouTube said: %s' % reason,
expected=True, video_id=video_id)
if video_info.get('license_info') or try_get(player_response, lambda x: x['streamingData']['licenseInfos']):
raise ExtractorError('This video is DRM protected.', expected=True)
self._sort_formats(formats)
self.mark_watched(video_id, video_info, player_response)
return {
'id': video_id,
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'uploader_url': video_uploader_url,
'channel_id': channel_id,
'channel_url': channel_url,
'upload_date': upload_date,
'license': video_license,
'creator': video_creator or artist,
'title': video_title,
'alt_title': video_alt_title or track,
'thumbnail': video_thumbnail,
'description': video_description,
'categories': video_categories,
'tags': video_tags,
'subtitles': video_subtitles,
'automatic_captions': automatic_captions,
'duration': video_duration,
'age_limit': 18 if age_gate else 0,
'annotations': video_annotations,
'chapters': chapters,
'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'average_rating': average_rating,
'formats': formats,
'is_live': is_live,
'start_time': start_time,
'end_time': end_time,
'series': series,
'season_number': season_number,
'episode_number': episode_number,
'track': track,
'artist': artist,
'album': album,
'release_date': release_date,
'release_year': release_year,
}
class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com playlists'
_VALID_URL = r"""(?x)(?:
(?:https?://)?
(?:\w+\.)?
(?:
(?:
youtube(?:kids)?\.com|
invidio\.us
)
/
(?:
(?:course|view_play_list|my_playlists|artist|playlist|watch|embed/(?:videoseries|[0-9A-Za-z_-]{11}))
\? (?:.*?[&;])*? (?:p|a|list)=
| p/
)|
youtu\.be/[0-9A-Za-z_-]{11}\?.*?\blist=
)
(
(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)?[0-9A-Za-z-_]{10,}
# Top tracks, they can also include dots
|(?:MC)[\w\.]*
)
.*
|
(%(playlist_id)s)
)""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
_VIDEO_RE_TPL = r'href="\s*/watch\?v=%s(?:&(?:[^"]*?index=(?P<index>\d+))?(?:[^>]+>(?P<title>[^<]+))?)?'
_VIDEO_RE = _VIDEO_RE_TPL % r'(?P<id>[0-9A-Za-z_-]{11})'
IE_NAME = 'youtube:playlist'
_TESTS = [{
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'title': 'youtube-dl public playlist',
},
'playlist_count': 1,
}, {
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'title': 'youtube-dl empty playlist',
},
'playlist_count': 0,
}, {
'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'info_dict': {
'title': '29C3: Not my department',
'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'uploader': 'Christiaan008',
'uploader_id': 'ChRiStIaAn008',
},
'playlist_count': 96,
}, {
'note': 'issue #673',
'url': 'PLBB231211A4F62143',
'info_dict': {
'title': '[OLD]Team Fortress 2 (Class-based LP)',
'id': 'PLBB231211A4F62143',
'uploader': 'Wickydoo',
'uploader_id': 'Wickydoo',
},
'playlist_mincount': 26,
}, {
'note': 'Large playlist',
'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
'info_dict': {
'title': 'Uploads from Cauchemar',
'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
'uploader': 'Cauchemar',
'uploader_id': 'Cauchemar89',
},
'playlist_mincount': 799,
}, {
'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
'info_dict': {
'title': 'YDL_safe_search',
'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
},
'playlist_count': 2,
'skip': 'This playlist is private',
}, {
'note': 'embedded',
'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'playlist_count': 4,
'info_dict': {
'title': 'JODA15',
'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'uploader': 'milan',
'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
}
}, {
'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'playlist_mincount': 485,
'info_dict': {
'title': '2018 Chinese New Singles (11/6 updated)',
'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'uploader': 'LBK',
'uploader_id': 'sdragonfang',
}
}, {
'note': 'Embedded SWF player',
'url': 'https://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
'playlist_count': 4,
'info_dict': {
'title': 'JODA7',
'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
},
'skip': 'This playlist does not exist',
}, {
'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
'info_dict': {
'title': 'Uploads from Interstellar Movie',
'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
'uploader': 'Interstellar Movie',
'uploader_id': 'InterstellarMovie1',
},
'playlist_mincount': 21,
}, {
# Playlist URL that does not actually serve a playlist
'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
'info_dict': {
'id': 'FqZTN594JQw',
'ext': 'webm',
'title': "Smiley's People 01 detective, Adventure Series, Action",
'uploader': 'STREEM',
'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
'upload_date': '20150526',
'license': 'Standard YouTube License',
'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
'categories': ['People & Blogs'],
'tags': list,
'view_count': int,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
'add_ie': [YoutubeIE.ie_key()],
}, {
'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
'info_dict': {
'id': 'yeWKywCrFtk',
'ext': 'mp4',
'title': 'Small Scale Baler and Braiding Rugs',
'uploader': 'Backus-Page House Museum',
'uploader_id': 'backuspagemuseum',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
'upload_date': '20161008',
'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
'categories': ['Nonprofits & Activism'],
'tags': list,
'like_count': int,
'dislike_count': int,
},
'params': {
'noplaylist': True,
'skip_download': True,
},
}, {
# https://github.com/ytdl-org/youtube-dl/issues/21844
'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'info_dict': {
'title': 'Data Analysis with Dr Mike Pound',
'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'uploader_id': 'Computerphile',
'uploader': 'Computerphile',
},
'playlist_mincount': 11,
}, {
'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
'only_matching': True,
}, {
'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
'only_matching': True,
}, {
# music album playlist
'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
'only_matching': True,
}, {
'url': 'https://invidio.us/playlist?list=PLDIoUOhQQPlXr63I_vwF9GD8sAKh77dWU',
'only_matching': True,
}, {
'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
'only_matching': True,
}]
def _real_initialize(self):
self._login()
def extract_videos_from_page(self, page):
ids_in_page = []
titles_in_page = []
for item in re.findall(
r'(<[^>]*\bdata-video-id\s*=\s*["\'][0-9A-Za-z_-]{11}[^>]+>)', page):
attrs = extract_attributes(item)
video_id = attrs['data-video-id']
video_title = unescapeHTML(attrs.get('data-title'))
if video_title:
video_title = video_title.strip()
ids_in_page.append(video_id)
titles_in_page.append(video_title)
# Fallback with old _VIDEO_RE
self.extract_videos_from_page_impl(
self._VIDEO_RE, page, ids_in_page, titles_in_page)
# Relaxed fallbacks
self.extract_videos_from_page_impl(
r'href="\s*/watch\?v\s*=\s*(?P<id>[0-9A-Za-z_-]{11})', page,
ids_in_page, titles_in_page)
self.extract_videos_from_page_impl(
r'data-video-ids\s*=\s*["\'](?P<id>[0-9A-Za-z_-]{11})', page,
ids_in_page, titles_in_page)
return zip(ids_in_page, titles_in_page)
def _extract_mix(self, playlist_id):
# The mixes are generated from a single video
# the id of the playlist is just 'RD' + video_id
ids = []
last_id = playlist_id[-11:]
for n in itertools.count(1):
url = 'https://youtube.com/watch?v=%s&list=%s' % (last_id, playlist_id)
webpage = self._download_webpage(
url, playlist_id, 'Downloading page {0} of Youtube mix'.format(n))
new_ids = orderedSet(re.findall(
r'''(?xs)data-video-username=".*?".*?
href="/watch\?v=([0-9A-Za-z_-]{11})&[^"]*?list=%s''' % re.escape(playlist_id),
webpage))
# Fetch new pages until all the videos are repeated, it seems that
# there are always 51 unique videos.
new_ids = [_id for _id in new_ids if _id not in ids]
if not new_ids:
break
ids.extend(new_ids)
last_id = ids[-1]
url_results = self._ids_to_results(ids)
search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
title_span = (
search_title('playlist-title')
or search_title('title long-title')
or search_title('title'))
title = clean_html(title_span)
return self.playlist_result(url_results, playlist_id, title)
def _extract_playlist(self, playlist_id):
url = self._TEMPLATE_URL % playlist_id
page = self._download_webpage(url, playlist_id)
# the yt-alert-message now has tabindex attribute (see https://github.com/ytdl-org/youtube-dl/issues/11604)
for match in re.findall(r'<div class="yt-alert-message"[^>]*>([^<]+)</div>', page):
match = match.strip()
# Check if the playlist exists or is private
mobj = re.match(r'[^<]*(?:The|This) playlist (?P<reason>does not exist|is private)[^<]*', match)
if mobj:
reason = mobj.group('reason')
message = 'This playlist %s' % reason
if 'private' in reason:
message += ', use --username or --netrc to access it'
message += '.'
raise ExtractorError(message, expected=True)
elif re.match(r'[^<]*Invalid parameters[^<]*', match):
raise ExtractorError(
'Invalid parameters. Maybe URL is incorrect.',
expected=True)
elif re.match(r'[^<]*Choose your language[^<]*', match):
continue
else:
self.report_warning('Youtube gives an alert message: ' + match)
playlist_title = self._html_search_regex(
r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
page, 'title', default=None)
_UPLOADER_BASE = r'class=["\']pl-header-details[^>]+>\s*<li>\s*<a[^>]+\bhref='
uploader = self._html_search_regex(
r'%s["\']/(?:user|channel)/[^>]+>([^<]+)' % _UPLOADER_BASE,
page, 'uploader', default=None)
mobj = re.search(
r'%s(["\'])(?P<path>/(?:user|channel)/(?P<uploader_id>.+?))\1' % _UPLOADER_BASE,
page)
if mobj:
uploader_id = mobj.group('uploader_id')
uploader_url = compat_urlparse.urljoin(url, mobj.group('path'))
else:
uploader_id = uploader_url = None
has_videos = True
if not playlist_title:
try:
# Some playlist URLs don't actually serve a playlist (e.g.
# https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4)
next(self._entries(page, playlist_id))
except StopIteration:
has_videos = False
playlist = self.playlist_result(
self._entries(page, playlist_id), playlist_id, playlist_title)
playlist.update({
'uploader': uploader,
'uploader_id': uploader_id,
'uploader_url': uploader_url,
})
return has_videos, playlist
def _check_download_just_video(self, url, playlist_id):
# Check if it's a video-specific URL
query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
video_id = query_dict.get('v', [None])[0] or self._search_regex(
r'(?:(?:^|//)youtu\.be/|youtube\.com/embed/(?!videoseries))([0-9A-Za-z_-]{11})', url,
'video id', default=None)
if video_id:
if self._downloader.params.get('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
return video_id, self.url_result(video_id, 'Youtube', video_id=video_id)
else:
self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
return video_id, None
return None, None
def _real_extract(self, url):
# Extract playlist id
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
playlist_id = mobj.group(1) or mobj.group(2)
video_id, video = self._check_download_just_video(url, playlist_id)
if video:
return video
if playlist_id.startswith(('RD', 'UL', 'PU')):
# Mixes require a custom extraction process
return self._extract_mix(playlist_id)
has_videos, playlist = self._extract_playlist(playlist_id)
if has_videos or not video_id:
return playlist
# Some playlist URLs don't actually serve a playlist (see
# https://github.com/ytdl-org/youtube-dl/issues/10537).
# Fallback to plain video extraction if there is a video id
# along with playlist id.
return self.url_result(video_id, 'Youtube', video_id=video_id)
class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com channels'
_VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie|kids)?\.com|(?:www\.)?invidio\.us)/channel/(?P<id>[0-9A-Za-z_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
_VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
IE_NAME = 'youtube:channel'
_TESTS = [{
'note': 'paginated channel',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'playlist_mincount': 91,
'info_dict': {
'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
'title': 'Uploads from lex will',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
}
}, {
'note': 'Age restricted channel',
# from https://www.youtube.com/user/DeusExOfficial
'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
'playlist_mincount': 64,
'info_dict': {
'id': 'UUs0ifCMCm1icqRbqhUINa0w',
'title': 'Uploads from Deus Ex',
'uploader': 'Deus Ex',
'uploader_id': 'DeusExOfficial',
},
}, {
'url': 'https://invidio.us/channel/UC23qupoDRn9YOAVzeoxjOQA',
'only_matching': True,
}, {
'url': 'https://www.youtubekids.com/channel/UCyu8StPfZWapR6rfW_JgqcA',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return (False if YoutubePlaylistsIE.suitable(url) or YoutubeLiveIE.suitable(url)
else super(YoutubeChannelIE, cls).suitable(url))
def _build_template_url(self, url, channel_id):
return self._TEMPLATE_URL % channel_id
def _real_extract(self, url):
channel_id = self._match_id(url)
url = self._build_template_url(url, channel_id)
# Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
# Workaround by extracting as a playlist if managed to obtain channel playlist URL
# otherwise fallback on channel by page extraction
channel_page = self._download_webpage(
url + '?view=57', channel_id,
'Downloading channel page', fatal=False)
if channel_page is False:
channel_playlist_id = False
else:
channel_playlist_id = self._html_search_meta(
'channelId', channel_page, 'channel id', default=None)
if not channel_playlist_id:
channel_url = self._html_search_meta(
('al:ios:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad'),
channel_page, 'channel url', default=None)
if channel_url:
channel_playlist_id = self._search_regex(
r'vnd\.youtube://user/([0-9A-Za-z_-]+)',
channel_url, 'channel id', default=None)
if channel_playlist_id and channel_playlist_id.startswith('UC'):
playlist_id = 'UU' + channel_playlist_id[2:]
return self.url_result(
compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
autogenerated = re.search(r'''(?x)
class="[^"]*?(?:
channel-header-autogenerated-label|
yt-channel-title-autogenerated
)[^"]*"''', channel_page) is not None
if autogenerated:
# The videos are contained in a single page
# the ajax pages can't be used, they are empty
entries = [
self.url_result(
video_id, 'Youtube', video_id=video_id,
video_title=video_title)
for video_id, video_title in self.extract_videos_from_page(channel_page)]
return self.playlist_result(entries, channel_id)
try:
next(self._entries(channel_page, channel_id))
except StopIteration:
alert_message = self._html_search_regex(
r'(?s)<div[^>]+class=(["\']).*?\byt-alert-message\b.*?\1[^>]*>(?P<alert>[^<]+)</div>',
channel_page, 'alert', default=None, group='alert')
if alert_message:
raise ExtractorError('Youtube said: %s' % alert_message, expected=True)
return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
class YoutubeUserIE(YoutubeChannelIE):
IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
_VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:(?P<user>user|c)/)?(?!(?:attribution_link|watch|results|shared)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/%s/%s/videos'
IE_NAME = 'youtube:user'
_TESTS = [{
'url': 'https://www.youtube.com/user/TheLinuxFoundation',
'playlist_mincount': 320,
'info_dict': {
'id': 'UUfX55Sx5hEFjoC3cNs6mCUQ',
'title': 'Uploads from The Linux Foundation',
'uploader': 'The Linux Foundation',
'uploader_id': 'TheLinuxFoundation',
}
}, {
# Only available via https://www.youtube.com/c/12minuteathlete/videos
# but not https://www.youtube.com/user/12minuteathlete/videos
'url': 'https://www.youtube.com/c/12minuteathlete/videos',
'playlist_mincount': 249,
'info_dict': {
'id': 'UUVjM-zV6_opMDx7WYxnjZiQ',
'title': 'Uploads from 12 Minute Athlete',
'uploader': '12 Minute Athlete',
'uploader_id': 'the12minuteathlete',
}
}, {
'url': 'ytuser:phihag',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/gametrailers',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/gametrailers',
'only_matching': True,
}, {
# This channel is not available, geo restricted to JP
'url': 'https://www.youtube.com/user/kananishinoSMEJ/videos',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
# Don't return True if the url can be extracted with other youtube
# extractor, the regex would is too permissive and it would match.
other_yt_ies = iter(klass for (name, klass) in globals().items() if name.startswith('Youtube') and name.endswith('IE') and klass is not cls)
if any(ie.suitable(url) for ie in other_yt_ies):
return False
else:
return super(YoutubeUserIE, cls).suitable(url)
def _build_template_url(self, url, channel_id):
mobj = re.match(self._VALID_URL, url)
return self._TEMPLATE_URL % (mobj.group('user') or 'user', mobj.group('id'))
class YoutubeLiveIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com live streams'
_VALID_URL = r'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:(?:user|channel|c)/)?(?P<id>[^/]+))/live'
IE_NAME = 'youtube:live'
_TESTS = [{
'url': 'https://www.youtube.com/user/TheYoungTurks/live',
'info_dict': {
'id': 'a48o2S1cPoo',
'ext': 'mp4',
'title': 'The Young Turks - Live Main Show',
'uploader': 'The Young Turks',
'uploader_id': 'TheYoungTurks',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
'upload_date': '20150715',
'license': 'Standard YouTube License',
'description': 'md5:438179573adcdff3c97ebb1ee632b891',
'categories': ['News & Politics'],
'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/TheYoungTurks/live',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
channel_id = mobj.group('id')
base_url = mobj.group('base_url')
webpage = self._download_webpage(url, channel_id, fatal=False)
if webpage:
page_type = self._og_search_property(
'type', webpage, 'page type', default='')
video_id = self._html_search_meta(
'videoId', webpage, 'video id', default=None)
if page_type.startswith('video') and video_id and re.match(
r'^[0-9A-Za-z_-]{11}$', video_id):
return self.url_result(video_id, YoutubeIE.ie_key())
return self.url_result(base_url)
class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
IE_DESC = 'YouTube.com user/channel playlists'
_VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists'
IE_NAME = 'youtube:playlists'
_TESTS = [{
'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
'playlist_mincount': 4,
'info_dict': {
'id': 'ThirstForScience',
'title': 'ThirstForScience',
},
}, {
# with "Load more" button
'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
'playlist_mincount': 70,
'info_dict': {
'id': 'igorkle1',
'title': 'Игорь Клейнер',
},
}, {
'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists',
'playlist_mincount': 17,
'info_dict': {
'id': 'UCiU1dHvZObB2iP6xkJ__Icw',
'title': 'Chem Player',
},
'skip': 'Blocked',
}]
class YoutubeSearchBaseInfoExtractor(YoutubePlaylistBaseInfoExtractor):
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})(?:[^"]*"[^>]+\btitle="(?P<title>[^"]+))?'
class YoutubeSearchIE(SearchInfoExtractor, YoutubeSearchBaseInfoExtractor):
IE_DESC = 'YouTube.com searches'
# there doesn't appear to be a real limit, for example if you search for
# 'python' you get more than 8.000.000 results
_MAX_RESULTS = float('inf')
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
_EXTRA_QUERY_ARGS = {}
_TESTS = []
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
videos = []
limit = n
url_query = {
'search_query': query.encode('utf-8'),
}
url_query.update(self._EXTRA_QUERY_ARGS)
result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query)
for pagenum in itertools.count(1):
data = self._download_json(
result_url, video_id='query "%s"' % query,
note='Downloading page %s' % pagenum,
errnote='Unable to download API page',
query={'spf': 'navigate'})
html_content = data[1]['body']['content']
if 'class="search-message' in html_content:
raise ExtractorError(
'[youtube] No video results', expected=True)
new_videos = list(self._process_page(html_content))
videos += new_videos
if not new_videos or len(videos) > limit:
break
next_link = self._html_search_regex(
r'href="(/results\?[^"]*\bsp=[^"]+)"[^>]*>\s*<span[^>]+class="[^"]*\byt-uix-button-content\b[^"]*"[^>]*>Next',
html_content, 'next link', default=None)
if next_link is None:
break
result_url = compat_urlparse.urljoin('https://www.youtube.com/', next_link)
if len(videos) > n:
videos = videos[:n]
return self.playlist_result(videos, query)
class YoutubeSearchDateIE(YoutubeSearchIE):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube.com searches, newest videos first'
_EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
class YoutubeSearchURLIE(YoutubeSearchBaseInfoExtractor):
IE_DESC = 'YouTube.com search URLs'
IE_NAME = 'youtube:search_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'title': 'youtube-dl test video',
}
}, {
'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
query = compat_urllib_parse_unquote_plus(mobj.group('query'))
webpage = self._download_webpage(url, query)
return self.playlist_result(self._process_page(webpage), playlist_title=query)
class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):
IE_DESC = 'YouTube.com (multi-season) shows'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/show/(?P<id>[^?#]*)'
IE_NAME = 'youtube:show'
_TESTS = [{
'url': 'https://www.youtube.com/show/airdisasters',
'playlist_mincount': 5,
'info_dict': {
'id': 'airdisasters',
'title': 'Air Disasters',
}
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
return super(YoutubeShowIE, self)._real_extract(
'https://www.youtube.com/show/%s/playlists' % playlist_id)
class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
"""
Base class for feed extractors
Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
"""
_LOGIN_REQUIRED = True
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
def _real_initialize(self):
self._login()
def _entries(self, page):
# The extraction process is the same as for playlists, but the regex
# for the video ids doesn't contain an index
ids = []
more_widget_html = content_html = page
for page_num in itertools.count(1):
matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
# 'recommended' feed has infinite 'load more' and each new portion spins
# the same videos in (sometimes) slightly different order, so we'll check
# for unicity and break when portion has no new videos
new_ids = list(filter(lambda video_id: video_id not in ids, orderedSet(matches)))
if not new_ids:
break
ids.extend(new_ids)
for entry in self._ids_to_results(new_ids):
yield entry
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
more_widget_html = more['load_more_widget_html']
def _real_extract(self, url):
page = self._download_webpage(
'https://www.youtube.com/feed/%s' % self._FEED_NAME,
self._PLAYLIST_TITLE)
return self.playlist_result(
self._entries(page), playlist_title=self._PLAYLIST_TITLE)
class YoutubeWatchLaterIE(YoutubePlaylistIE):
IE_NAME = 'youtube:watchlater'
IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
_TESTS = [{
'url': 'https://www.youtube.com/playlist?list=WL',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
'only_matching': True,
}]
def _real_extract(self, url):
_, video = self._check_download_just_video(url, 'WL')
if video:
return video
_, playlist = self._extract_playlist('WL')
return playlist
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:favorites'
IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
_LOGIN_REQUIRED = True
def _real_extract(self, url):
webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
return self.url_result(playlist_id, 'YoutubePlaylist')
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/recommended|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_PLAYLIST_TITLE = 'Youtube Recommended videos'
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
_FEED_NAME = 'subscriptions'
_PLAYLIST_TITLE = 'Youtube Subscriptions'
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/history|:ythistory'
_FEED_NAME = 'history'
_PLAYLIST_TITLE = 'Youtube History'
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
t=[0-9]+
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?t=2372',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like youtube-dl '
'"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply youtube-dl BaW_jenozKc .',
expected=True)
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
expected=True)
|
vinegret/youtube-dl
|
youtube_dl/extractor/youtube.py
|
Python
|
unlicense
| 151,125
|
[
"ADF"
] |
f4926c7b2adebe6e277c09b937a3deb54e39278bee1d95e3deec72a980308224
|
import numpy as np
# ----------
# Class for univariate gaussian
# p(x) = 1/sqrt(2*pi*simga^2) * e ^ - (x-miu)^2/2*sigma^2
# Where miu is the gaussian mean, and sigma^2 is the gaussian variance
# ----------
class Gaussian:
def __init__(self, mean, variance):
self.mean = mean
self.variance = variance
def sample(self, points):
return np.random.normal(self.mean, self.variance, points)
# Returns the mean and the variance of a data set of X points assuming
# that the points come from a gaussian distribution X
def estimate_gaussian(X):
mean = np.mean(X, 0)
variance = np.var(X, 0)
return Gaussian(mean, variance)
|
ramon-astudillo/lxmls-toolkit
|
lxmls/distributions/gaussian.py
|
Python
|
mit
| 664
|
[
"Gaussian"
] |
1ffc8284600a1eb43ca496c2bea0138f7666b1bcd1eab074a8c38c7bd22eb37b
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from urllib import quote
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1364979193.604698
__CHEETAH_genTimestamp__ = 'Wed Apr 3 17:53:13 2013'
__CHEETAH_src__ = '/home/fermi/Work/Model/tmsingle/openpli3.0/build-tmsingle/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-0.1+git1+279a2577c3bc6defebd4bf9e61a046dcf7f37c01-r0.72/git/plugin/controllers/views/ajax/bouquets.tmpl'
__CHEETAH_srcLastModified__ = 'Wed Apr 3 17:10:17 2013'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class bouquets(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(bouquets, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<script>
\t
\t $("#accordion").accordion({
active:false,
change:function(event, ui) {
ui.oldContent.empty();
ui.newContent.load(ui.newHeader.find(\'a\').attr(\'id\'));
},
autoHeight: false,
\t\t collapsible: true
});
</script>
<div id="accordion">
''')
for bouquet in VFFSL(SL,"bouquets",True): # generated from line 20, col 1
write(u'''\t<h1><a href="#" id="ajax/channels?id=''')
_v = VFFSL(SL,"quote",False)(VFFSL(SL,"bouquet",True)[0]) # u'$quote($bouquet[0])' on line 21, col 39
if _v is not None: write(_filter(_v, rawExpr=u'$quote($bouquet[0])')) # from line 21, col 39.
write(u'''&stype=''')
_v = VFFSL(SL,"stype",True) # u'$stype' on line 21, col 65
if _v is not None: write(_filter(_v, rawExpr=u'$stype')) # from line 21, col 65.
write(u'''">''')
_v = VFFSL(SL,"bouquet",True)[1] # u'$bouquet[1]' on line 21, col 73
if _v is not None: write(_filter(_v, rawExpr=u'$bouquet[1]')) # from line 21, col 73.
write(u'''</a></h1>
\t<div>
loading...
\t</div>
''')
write(u'''
</div>''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_bouquets= 'respond'
## END CLASS DEFINITION
if not hasattr(bouquets, '_initCheetahAttributes'):
templateAPIClass = getattr(bouquets, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(bouquets)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=bouquets()).run()
|
pli3/Openwebif
|
plugin/controllers/views/ajax/bouquets.py
|
Python
|
gpl-2.0
| 5,534
|
[
"VisIt"
] |
06f22f89f705327e34fca045dd9179dcb675aabcd716231cae9006a3e9f64c12
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2018, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
TABLES = {
'HL70001': ('Administrative sex', ('A', 'F', 'M', 'N', 'O', 'U')),
'HL70002': (
'Marital status', ('A', 'B', 'C', 'D', 'E', 'G', 'I', 'M', 'N', 'O', 'P', 'R', 'S', 'T', 'U', 'W')),
'HL70003': ('Event type',
('A01', 'A02', 'A03', 'A04', 'A05', 'A06', 'A07', 'A08', 'A09', 'A10', 'A11', 'A12', 'A13', 'A14',
'A15',
'A16', 'A17', 'A18', 'A19', 'A20', 'A21', 'A22', 'A23', 'A24', 'A25', 'A26', 'A27', 'A28', 'A29',
'A30',
'A31', 'A32', 'A33', 'A34', 'A35', 'A36', 'A37', 'A38', 'A39', 'A40', 'A41', 'A42', 'A43', 'A44',
'A45',
'A46', 'A47', 'A48', 'A49', 'A50', 'A51', 'A52', 'A53', 'A54', 'A55', 'A60', 'A61', 'A62', 'B01',
'B02',
'B03', 'B04', 'B05', 'B06', 'C01', 'C02', 'C03', 'C04', 'C05', 'C06', 'C07', 'C08', 'C09', 'C10',
'C11',
'C12', 'I01', 'I02', 'I03', 'I04', 'I05', 'I06', 'I07', 'I08', 'I09', 'I10', 'I11', 'I12', 'I13',
'I14',
'I15', 'J01', 'J02', 'K21', 'K22', 'K23', 'K24', 'K25', 'M01', 'M02', 'M03', 'M04', 'M05', 'M06',
'M07',
'M08', 'M09', 'M10', 'M11', 'M12', 'N01', 'N02', 'O01', 'O02', 'O03', 'O04', 'O05', 'O06', 'O07',
'O08',
'O09', 'O10', 'O11', 'O12', 'O13', 'O14', 'O15', 'O16', 'O17', 'O18', 'O19', 'O20', 'O21', 'P01',
'P02',
'P03', 'P04', 'P05', 'P06', 'P07', 'P08', 'P09', 'P10', 'PC1', 'PC2', 'PC3', 'PC4', 'PC5', 'PC6',
'PC7',
'PC8', 'PC9', 'PCA', 'PCB', 'PCC', 'PCD', 'PCE', 'PCF', 'PCG', 'PCH', 'PCJ', 'PCK', 'PCL', 'Q01',
'Q02',
'Q03', 'Q04', 'Q05', 'Q06', 'Q07', 'Q08', 'Q09', 'Q16', 'Q17', 'Q21', 'Q22', 'Q23', 'Q24', 'Q25',
'R01',
'R02', 'R03', 'R04', 'R05', 'R06', 'R07', 'R08', 'R09', 'R21', 'RAR', 'RDR', 'RER', 'RGR', 'ROR',
'S01',
'S02', 'S03', 'S04', 'S05', 'S06', 'S07', 'S08', 'S09', 'S10', 'S11', 'S12', 'S13', 'S14', 'S15',
'S16',
'S17', 'S18', 'S19', 'S20', 'S21', 'S22', 'S23', 'S24', 'S25', 'S26', 'T01', 'T02', 'T03', 'T04',
'T05',
'T06', 'T07', 'T08', 'T09', 'T10', 'T11', 'T12', 'U01', 'U02', 'U03', 'U04', 'U05', 'U06', 'U07',
'U08',
'U09', 'U10', 'U11', 'U12', 'U13', 'V01', 'V02', 'V03', 'V04', 'Varies', 'W01', 'W02')),
'HL70004': ('Patient class', ('B', 'C', 'E', 'I', 'N', 'O', 'P', 'R', 'U')),
'HL70005': ('Race', ('1002-5', '2028-9', '2054-5', '2076-8', '2106-3', '2131-1')),
'HL70006': ('Religion',
('ABC', 'AGN', 'AME',
'AMT', 'ANG', 'AOG',
'ATH', 'BAH', 'BAP',
'BMA', 'BOT', 'BTA',
'BTH', 'BUD', 'CAT',
'CFR', 'CHR', 'CHS',
'CMA', 'CNF', 'COC',
'COG', 'COI', 'COL',
'COM', 'COP', 'COT',
'CRR', 'EOT', 'EPI',
'ERL', 'EVC', 'FRQ',
'FWB', 'GRE', 'HIN',
'HOT', 'HSH', 'HVA',
'JAI', 'JCO', 'JEW',
'JOR', 'JOT', 'JRC',
'JRF', 'JRN', 'JWN',
'LMS', 'LUT', 'MEN',
'MET', 'MOM', 'MOS',
'MOT', 'MSH', 'MSU',
'NAM', 'NAZ', 'NOE',
'NRL', 'ORT', 'OTH',
'PEN', 'PRC', 'PRE',
'PRO', 'QUA', 'REC',
'REO', 'SAA', 'SEV',
'SHN', 'SIK', 'SOU',
'SPI', 'UCC', 'UMD',
'UNI', 'UNU', 'VAR',
'WES', 'WMC')),
'HL70007': ('Admission type', ('A', 'C', 'E', 'L', 'N', 'R', 'U')),
'HL70008': ('Acknowledgment code', ('AA', 'AE', 'AR', 'CA', 'CE', 'CR')),
'HL70009': ('Ambulatory status',
('A0', 'A1', 'A2', 'A3', 'A4',
'A5', 'A6', 'A7', 'A8', 'A9',
'B1', 'B2', 'B3', 'B4', 'B5',
'B6')),
'HL70017': ('Transaction type', ('AJ', 'CD', 'CG', 'CO', 'PY')),
'HL70023': ('Admit source', ('1', '2', '3', '4', '5', '6', '7', '8', '9')),
'HL70027': ('Priority', ('A', 'P', 'R', 'S', 'T')),
'HL70038': ('Order status', ('A', 'CA', 'CM', 'DC', 'ER', 'HD', 'IP', 'RP', 'SC')),
'HL70043': ('Condition code',
('01', '02', '03',
'04', '05', '06',
'07', '08', '09',
'10', '11',
'12 ... 16', '18',
'19', '20', '21',
'26', '27', '28',
'29', '31', '32',
'33', '34', '36',
'37', '38', '39',
'40', '41', '46',
'48', '55', '56',
'57', '60', '61',
'62', '66', '67',
'68', '70', '71',
'72', '73', '74',
'75', '76', '77',
'78', '79', '80')),
'HL70048': ('What subject filter',
('ADV', 'ANU', 'APA', 'APM', 'APN', 'APP', 'ARN', 'CAN', 'DEM', 'FIN', 'GID', 'GOL', 'MRI', 'MRO',
'NCK',
'NSC', 'NST', 'ORD', 'OTH', 'PRB', 'PRO', 'RAR', 'RDR', 'RER', 'RES', 'RGR', 'ROR', 'SAL', 'SBK',
'SBL',
'SOF', 'SOP', 'SSA', 'SSR', 'STA', 'VXI', 'XID')),
'HL70052': ('Diagnosis type', ('A', 'F', 'W')),
'HL70061': ('Check digit scheme', ('ISO', 'M10', 'M11', 'NPI')),
'HL70062': ('Event reason', ('01', '02', '03')),
'HL70063': ('Relationship',
('ASC', 'BRO', 'CGV', 'CHD', 'DEP', 'DOM', 'EMC', 'EME', 'EMR', 'EXF', 'FCH', 'FND', 'FTH', 'GCH',
'GRD',
'GRP', 'MGR', 'MTH', 'NCH', 'NON', 'OAD', 'OTH', 'OWN', 'PAR', 'SCH', 'SEL', 'SIB', 'SIS', 'SPO',
'TRA',
'UNK', 'WRD')),
'HL70065': ('Specimen action code', ('A', 'G', 'L', 'O', 'P', 'R', 'S')),
'HL70066': ('Employment status', ('1', '2', '3', '4', '5', '6', '9', 'C', 'D', 'F', 'L', 'O', 'P', 'T')),
'HL70069': ('Hospital service', ('CAR', 'MED', 'PUL', 'SUR', 'URO')),
'HL70070': ('Specimen source codes',
('ABS', 'AMN', 'ASP', 'BBL', 'BDY',
'BIFL', 'BLD', 'BLDA', 'BLDC',
'BLDCO', 'BLDV', 'BON', 'BPH',
'BPU', 'BRN', 'BRO', 'BRTH', 'CALC',
'CBLD', 'CDM', 'CNJT', 'CNL', 'COL',
'CSF', 'CTP', 'CUR', 'CVM', 'CVX',
'CYST', 'DIAF', 'DOSE', 'DRN',
'DUFL', 'EAR', 'EARW', 'ELT',
'ENDC', 'ENDM', 'EOS', 'EXG',
'EXHLD', 'EYE', 'FIB', 'FIST',
'FLT', 'FLU', 'GAS', 'GAST', 'GEN',
'GENC', 'GENL', 'GENV', 'HAR',
'IHG', 'ISLT', 'IT', 'LAM', 'LIQ',
'LN', 'LNA', 'LNV', 'LYM', 'MAC',
'MAR', 'MBLD', 'MEC', 'MILK', 'MLK',
'NAIL', 'NOS', 'ORH', 'PAFL', 'PAT',
'PLAS', 'PLB', 'PLC', 'PLR', 'PMN',
'PPP', 'PRP', 'PRT', 'PUS', 'RBC',
'RT', 'SAL', 'SEM', 'SER', 'SKM',
'SKN', 'SMN', 'SNV', 'SPRM', 'SPT',
'SPTC', 'SPTT', 'STL', 'STON',
'SWT', 'TEAR', 'THRB', 'THRT',
'TISG', 'TISPL', 'TISS', 'TISU',
'TLGI', 'TLNG', 'TSMI', 'TUB',
'ULC', 'UMB', 'UMED', 'UR', 'URC',
'URNS', 'URT', 'URTH', 'USUB',
'VITF', 'VOM', 'WAT', 'WBC', 'WICK',
'WND', 'WNDA', 'WNDD', 'WNDE',
'XXX')),
'HL70074': ('Diagnostic service section ID',
('AU', 'BG', 'BLB', 'CH', 'CP', 'CT', 'CTH', 'CUS', 'EC', 'EN', 'HM', 'ICU', 'IMG', 'IMM', 'LAB',
'MB',
'MCB', 'MYC', 'NMR', 'NMS', 'NRS', 'OSL', 'OT', 'OTH', 'OUS', 'PAR', 'PAT', 'PF', 'PHR', 'PHY',
'PT',
'RAD', 'RC', 'RT', 'RUS', 'RX', 'SP', 'SR', 'TX', 'URN', 'VR', 'VUS', 'XRC')),
'HL70076': ('Message type',
('ACK', 'ADR',
'ADT', 'BAR',
'CRM', 'CSU',
'DFT', 'DOC',
'DSR', 'EAC',
'EAN', 'EAR',
'EDR', 'EQQ',
'ERP', 'ESR',
'ESU', 'INR',
'INU', 'LSR',
'LSU', 'MCF',
'MDM', 'MFD',
'MFK', 'MFN',
'MFQ', 'MFR',
'NMD', 'NMQ',
'NMR', 'OMD',
'OMG', 'OML',
'OMN', 'OMP',
'OMS', 'ORD',
'ORF', 'ORG',
'ORL', 'ORM',
'ORN', 'ORP',
'ORR', 'ORS',
'ORU', 'OSQ',
'OSR', 'OUL',
'PEX', 'PGL',
'PIN', 'PMU',
'PPG', 'PPP',
'PPR', 'PPT',
'PPV', 'PRR',
'PTR', 'QBP',
'QCK', 'QCN',
'QRY', 'QSB',
'QSX', 'QVR',
'RAR', 'RAS',
'RCI', 'RCL',
'RDE', 'RDR',
'RDS', 'RDY',
'REF', 'RER',
'RGR', 'RGV',
'ROR', 'RPA',
'RPI', 'RPL',
'RPR', 'RQA',
'RQC', 'RQI',
'RQP', 'RQQ',
'RRA', 'RRD',
'RRE', 'RRG',
'RRI', 'RSP',
'RTB', 'SIU',
'SPQ', 'SQM',
'SQR', 'SRM',
'SRR', 'SSR',
'SSU', 'SUR',
'TBR', 'TCR',
'TCU', 'UDM',
'VQQ', 'VXQ',
'VXR', 'VXU',
'VXX')),
'HL70078': ('Abnormal flags',
('<', '>', 'A', 'AA', 'B', 'D', 'H', 'HH', 'I', 'L', 'LL', 'MS', 'N', 'null', 'R', 'S', 'U', 'VS',
'W')),
'HL70080': ('Nature of abnormal testing', ('A', 'N', 'R', 'S')),
'HL70083': ('Outlier type', ('C', 'D')),
'HL70085': ('Observation result status codes interpretation',
('C', 'D', 'F', 'I', 'N', 'O', 'P', 'R', 'S', 'U', 'W', 'X')),
'HL70091': ('Query priority', ('D', 'I')),
'HL70092': ('Re-admission indicator', ('R',)),
'HL70093': ('Release information', ('...', 'N', 'Y')),
'HL70098': ('Type of agreement', ('M', 'S', 'U')),
'HL70100': ('When to charge', ('D', 'O', 'R', 'S', 'T')),
'HL70102': ('Delayed acknowledgment type', ('D', 'F')),
'HL70103': ('Processing ID', ('D', 'P', 'T')),
'HL70104': ('Version ID', ('2.0', '2.0D', '2.1', '2.2', '2.3', '2.3.1', '2.4')),
'HL70105': ('Source of comment', ('L', 'O', 'P')),
'HL70106': ('Query/response format code', ('D', 'R', 'T')),
'HL70107': ('Deferred response type', ('B', 'L')),
'HL70108': ('Query results level', ('O', 'R', 'S', 'T')),
'HL70109': ('Report priority', ('R', 'S')),
'HL70112': ('Discharge disposition',
('01', '02', '03', '04', '05', '06', '07', '08', '09',
'10 ...19', '20', '21 ... 29', '30', '31 ... 39', '40', '41',
'42')),
'HL70116': ('Bed status', ('C', 'H', 'I', 'K', 'O', 'U')),
'HL70121': ('Response flag', ('D', 'E', 'F', 'N', 'R')),
'HL70122': ('Charge type', ('CH', 'CO', 'CR', 'DP', 'GR', 'NC', 'PC', 'RS')),
'HL70123': ('Result status', ('A', 'C', 'F', 'I', 'O', 'P', 'R', 'S', 'X', 'Y', 'Z')),
'HL70124': ('Transportation mode', ('CART', 'PORT', 'WALK', 'WHLC')),
'HL70125': ('Value type',
('AD', 'CE', 'CF', 'CK', 'CN', 'CP',
'CX', 'DT', 'ED', 'FT', 'MO', 'NM',
'PN', 'RP', 'SN', 'ST', 'TM', 'TN',
'TS', 'TX', 'XAD', 'XCN', 'XON',
'XPN', 'XTN')),
'HL70126': ('Quantity limited request', ('CH', 'LI', 'PG', 'RD', 'ZO')),
'HL70127': ('Allergen type', ('AA', 'DA', 'EA', 'FA', 'LA', 'MA', 'MC', 'PA')),
'HL70128': ('Allergy severity', ('MI', 'MO', 'SV', 'U')),
'HL70130': ('Visit user code', ('HO', 'MO', 'PH', 'TE')),
'HL70133': (
'Procedure practitioner identifier code type', ('AN', 'AS', 'CM', 'NP', 'PR', 'PS', 'RD', 'RS', 'SN')),
'HL70135': ('Assignment of benefits', ('M', 'N', 'Y')),
'HL70136': ('Yes/no indicator', ('N', 'Y')),
'HL70137': ('Mail claim party', ('E', 'G', 'I', 'O', 'P')),
'HL70140': ('Military service',
('AUSA', 'AUSAF', 'AUSN', 'NATO', 'NOAA',
'USA', 'USAF', 'USCG', 'USMC', 'USN',
'USPHS')),
'HL70141': ('Military rank/grade', ('E1 ... E9', 'O1 ... O10', 'W1 ... W4')),
'HL70142': ('Military status', ('ACT', 'DEC', 'RET')),
'HL70144': ('Eligibility source', ('1', '2', '3', '4', '5', '6', '7')),
'HL70145': ('Room type', ('2ICU', '2PRI', '2SPR', 'ICU', 'PRI', 'SPR')),
'HL70146': ('Amount type', ('DF', 'LM', 'PC', 'RT', 'UL')),
'HL70147': ('Policy type', ('2ANC', '2MMD', '3MMD', 'ANC', 'MMD')),
'HL70148': ('Penalty type', ('AT', 'PC')),
'HL70149': ('Day type', ('AP', 'DE', 'PE')),
'HL70150': ('Pre-certification patient type', ('ER', 'IPE', 'OPE', 'UR')),
'HL70153': ('Value code',
('01', '02', '04', '05', '06', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '21',
'22',
'23', '24', '30', '31', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48',
'49',
'50', '51', '52', '53', '56', '57', '58', '59', '60', '67', '68', '70 ... 72', '75 ... 79', '80',
'81',
'A1', 'A2', 'A3', 'X0', 'X4')),
'HL70155': ('Accept/application acknowledgment conditions', ('AL', 'ER', 'NE', 'SU')),
'HL70156': ('Which date/time qualifier', ('ANY', 'COL', 'ORD', 'RCT', 'REP', 'SCHED')),
'HL70157': ('Which date/time status qualifier', ('ANY', 'CFN', 'COR', 'FIN', 'PRE', 'REP')),
'HL70158': ('Date/time selection qualifier', ('1ST', 'ALL', 'LST', 'REV')),
'HL70159': ('Diet code specification type', ('D', 'P', 'S')),
'HL70160': ('Tray type', ('EARLY', 'GUEST', 'LATE', 'MSG', 'NO')),
'HL70161': ('Allow substitution', ('G', 'N', 'T')),
'HL70162': ('Route of administration',
('AP', 'B', 'DT', 'EP', 'ET', 'GTT', 'GU', 'IA', 'IB', 'IC', 'ICV', 'ID', 'IH', 'IHA', 'IM',
'IMR', 'IN', 'IO', 'IP', 'IS', 'IT', 'IU', 'IV', 'MM', 'MTH', 'NG', 'NP', 'NS', 'NT', 'OP',
'OT', 'OTH', 'PF', 'PO', 'PR', 'RM', 'SC', 'SD', 'SL', 'TD', 'TL', 'TP', 'TRA', 'UR', 'VG',
'VM', 'WND')),
'HL70163': ('Body site',
('BE', 'BN', 'BU', 'CT', 'LA', 'LAC', 'LACF', 'LD', 'LE', 'LEJ', 'LF', 'LG', 'LH', 'LIJ', 'LLAQ',
'LLFA', 'LMFA', 'LN', 'LPC', 'LSC', 'LT', 'LUA', 'LUAQ', 'LUFA', 'LVG', 'LVL', 'NB', 'OD', 'OS',
'OU', 'PA', 'PERIN', 'RA', 'RAC', 'RACF', 'RD', 'RE', 'REJ', 'RF', 'RG', 'RH', 'RIJ', 'RLAQ',
'RLFA', 'RMFA', 'RN', 'RPC', 'RSC', 'RT', 'RUA', 'RUAQ', 'RUFA', 'RVG', 'RVL')),
'HL70164': ('Administration device', ('AP', 'BT', 'HL', 'IPPB', 'IVP', 'IVS', 'MI', 'NEB', 'PCA')),
'HL70165': ('Administration method',
('CH', 'DI', 'DU', 'IF', 'IR', 'IS', 'IVP', 'IVPB', 'NB', 'PF', 'PT', 'SH', 'SO', 'WA', 'WI')),
'HL70166': ('RX component type', ('A', 'B')),
'HL70167': ('Substitution status', ('0', '1', '2', '3', '4', '5', '7', '8', 'G', 'N', 'T')),
'HL70168': ('Processing priority', ('A', 'B', 'C', 'P', 'R', 'S', 'T')),
'HL70169': ('Reporting priority', ('C', 'R')),
'HL70170': ('Derived specimen', ('C', 'N', 'P')),
'HL70173': ('Coordination of benefits', ('CO', 'IN')),
'HL70174': ('Nature of service/test/observation', ('A', 'C', 'F', 'P', 'S')),
'HL70175': ('Master file identifier code',
('CDM', 'CLN', 'CMA', 'CMB', 'LOC', 'OMA', 'OMB', 'OMC', 'OMD', 'OME', 'PRA', 'STF')),
'HL70177': ('Confidentiality code', ('AID', 'EMP', 'ETH', 'HIV', 'PSY', 'R', 'U', 'UWM', 'V', 'VIP')),
'HL70178': ('File level event code', ('REP', 'UPD')),
'HL70179': ('Response level', ('AL', 'ER', 'NE', 'SU')),
'HL70180': ('Record-level event code', ('MAC', 'MAD', 'MDC', 'MDL', 'MUP')),
'HL70181': ('MFN record-level error return', ('S', 'U')),
'HL70182': ('Staff type', ()),
'HL70183': ('Active/inactive', ('A', 'I')),
'HL70184': ('Department', ()),
'HL70185': ('Preferred method of contact', ('B', 'C', 'E', 'F', 'H', 'O')),
'HL70187': ('Provider billing', ('I', 'P')),
'HL70189': ('Ethnic group', ('...', 'H', 'N', 'U')),
'HL70190': ('Address type', ('B', 'BA', 'BDL', 'BR', 'C', 'F', 'H', 'L', 'M', 'N', 'O', 'P', 'RH')),
'HL70191': ('Type of referenced data', ('AP', 'AU', 'FT', 'IM', 'multipart', 'NS', 'SD', 'SI', 'TEXT', 'TX')),
'HL70193': ('Amount class', ('AT', 'LM', 'PC', 'UL')),
'HL70200': ('Name type', ('A', 'B', 'C', 'D', 'I', 'L', 'M', 'N', 'P', 'R', 'S', 'T', 'U')),
'HL70201': ('Telecommunication use code', ('ASN', 'BPN', 'EMR', 'NET', 'ORN', 'PRN', 'VHN', 'WPN')),
'HL70202': ('Telecommunication equipment type', ('BP', 'CP', 'FX', 'Internet', 'MD', 'PH', 'X.400')),
'HL70203': ('Identifier type',
('AM', 'AN', 'BA', 'BR', 'BRN', 'DI', 'DL', 'DN', 'DR', 'DS', 'EI', 'EN', 'FI', 'GI', 'GN', 'HC',
'JHN',
'LN', 'LR', 'MA', 'MC', 'MCN', 'MR', 'MS', 'NE', 'NH', 'NI', 'NNxxx', 'NPI', 'PEN', 'PI', 'PN',
'PRN',
'PT', 'RR', 'RRI', 'SL', 'SR', 'SS', 'U', 'UPIN', 'VN', 'VS', 'WC', 'WCN', 'XX')),
'HL70204': ('Organizational name type', ('A', 'D', 'L', 'SL')),
'HL70205': ('Price type', ('AP', 'DC', 'IC', 'PF', 'TF', 'TP', 'UP')),
'HL70206': ('Segment action code', ('A', 'D', 'U')),
'HL70207': ('Processing mode', ('A', 'I', 'Not present', 'R', 'T')),
'HL70208': ('Query response status', ('AE', 'AR', 'NF', 'OK')),
'HL70209': ('Relational operator', ('CT', 'EQ', 'GE', 'GN', 'GT', 'LE', 'LT', 'NE')),
'HL70210': ('Relational conjunction', ('AND', 'OR')),
'HL70211': ('Alternate character sets',
('8859/1', '8859/2', '8859/3', '8859/4', '8859/5',
'8859/6', '8859/7', '8859/8', '8859/9', 'ASCII',
'ISO IR14', 'ISO IR159', 'ISO IR87', 'UNICODE')),
'HL70213': ('Purge status code', ('D', 'I', 'P')),
'HL70217': ('Visit priority code', ('1', '2', '3')),
'HL70220': ('Living arrangement', ('A', 'F', 'I', 'R', 'S', 'U')),
'HL70223': ('Living dependency', ('C', 'CB', 'D', 'M', 'O', 'S', 'U', 'WU')),
'HL70224': ('Transport arranged', ('A', 'N', 'U')),
'HL70225': ('Escort required', ('N', 'R', 'U')),
'HL70227': ('Manufacturers of vaccines (code=MVX)',
('AB', 'AD', 'ALP', 'AR', 'AVI', 'BA', 'BAY', 'BP', 'BPC', 'CEN', 'CHI', 'CON', 'EVN', 'GRE',
'IAG', 'IM',
'IUS', 'JPN', 'KGC', 'LED', 'MA', 'MED', 'MIL', 'MIP', 'MSD', 'NAB', 'NAV', 'NOV', 'NYB', 'ORT',
'OTC',
'OTH', 'PD', 'PMC', 'PRX', 'SCL', 'SI', 'SKB', 'UNK', 'USA', 'WA', 'WAL')),
'HL70228': ('Diagnosis classification', ('C', 'D', 'I', 'M', 'O', 'R', 'S', 'T')),
'HL70229': ('DRG payor', ('C', 'G', 'M')),
'HL70230': ('Procedure functional type', ('A', 'D', 'I', 'P')),
'HL70231': ('Student status', ('F', 'N', 'P')),
'HL70232': ('Insurance company contact reason', ('01', '02', '03')),
'HL70234': ('Report timing', ('10D', '15D', '30D', '3D', '7D', 'AD', 'CO', 'DE', 'PD', 'RQ')),
'HL70235': ('Report source', ('C', 'D', 'E', 'H', 'L', 'M', 'N', 'O', 'P', 'R')),
'HL70236': ('Event reported to', ('D', 'L', 'M', 'R')),
'HL70237': ('Event qualification', ('A', 'B', 'D', 'I', 'L', 'M', 'O', 'W')),
'HL70238': ('Event seriousness', ('N', 'S', 'Y')),
'HL70239': ('Event expected', ('N', 'U', 'Y')),
'HL70240': ('Event consequence', ('C', 'D', 'H', 'I', 'J', 'L', 'O', 'P', 'R')),
'HL70241': ('Patient outcome', ('D', 'F', 'N', 'R', 'S', 'U', 'W')),
'HL70242': ('Primary observer\'s qualification', ('C', 'H', 'L', 'M', 'O', 'P', 'R')),
'HL70243': ('Identity may be divulged', ('N', 'NA', 'Y')),
'HL70247': ('Status of evaluation', ('A', 'C', 'D', 'I', 'K', 'O', 'P', 'Q', 'R', 'U', 'X', 'Y')),
'HL70248': ('Product source', ('A', 'L', 'N', 'R')),
'HL70250': ('Relatedness assessment', ('H', 'I', 'M', 'N', 'S')),
'HL70251': ('Action taken in response to the event', ('DI', 'DR', 'N', 'OT', 'WP', 'WT')),
'HL70252': ('Causality observations', ('AW', 'BE', 'DR', 'EX', 'IN', 'LI', 'OE', 'OT', 'PL', 'SE', 'TC')),
'HL70253': ('Indirect exposure mechanism', ('B', 'F', 'O', 'P', 'X')),
'HL70254': ('Kind of quantity',
('ABS', 'ACNC', 'ACT', 'APER',
'ARB', 'AREA', 'ASPECT', 'CACT',
'CCNT', 'CCRTO', 'CFR', 'CLAS',
'CNC', 'CNST', 'COEF', 'COLOR',
'CONS', 'CRAT', 'CRTO', 'DEN',
'DEV', 'DIFF', 'ELAS', 'ELPOT',
'ELRAT', 'ELRES', 'ENGR', 'ENT',
'ENTCAT', 'ENTNUM', 'ENTSUB',
'ENTVOL', 'EQL', 'FORCE', 'FREQ',
'IMP', 'KINV', 'LEN', 'LINC',
'LIQ', 'MASS', 'MCNC', 'MCNT',
'MCRTO', 'MFR', 'MGFLUX', 'MINC',
'MORPH', 'MOTIL', 'MRAT', 'MRTO',
'NCNC', 'NCNT', 'NFR', 'NRTO',
'NUM', 'OD', 'OSMOL', 'PRES',
'PRID', 'PWR', 'RANGE', 'RATIO',
'RCRLTM', 'RDEN', 'REL', 'RLMCNC',
'RLSCNC', 'RLTM', 'SATFR', 'SCNC',
'SCNCIN', 'SCNT', 'SCNTR', 'SCRTO',
'SFR', 'SHAPE', 'SMELL', 'SRAT',
'SRTO', 'SUB', 'SUSC', 'TASTE',
'TEMP', 'TEMPDF', 'TEMPIN',
'THRMCNC', 'THRSCNC', 'TIME',
'TITR', 'TMDF', 'TMSTP', 'TRTO',
'TYPE', 'VCNT', 'VEL', 'VELRT',
'VFR', 'VISC', 'VOL', 'VRAT',
'VRTO')),
'HL70255': ('Duration categories',
('*', '12H', '1H', '1L', '1W', '24H', '2.5H', '2D', '2H', '2L', '2W', '30M', '3D', '3H', '3L',
'3W', '4D',
'4H', '4W', '5D', '5H', '6D', '6H', '7H', '8H', 'PT')),
'HL70256': ('Time delay post challenge',
('10D', '10M', '12H', '15M', '1H',
'1L', '1M', '1W', '20M', '24H',
'2.5H', '25M', '2D', '2H', '2L',
'2M', '2W', '30M', '3D', '3H', '3L',
'3M', '3W', '4D', '4H', '4M', '4W',
'5D', '5H', '5M', '6D', '6H', '6M',
'7D', '7H', '7M', '8H', '8H SHIFT',
'8M', '9M', 'BS', 'PEAK', 'RANDOM',
'TROUGH')),
'HL70257': ('Nature of challenge', ('CFST', 'EXCZ', 'FFST')),
'HL70258': ('Relationship modifier', ('BPU', 'CONTROL', 'DONOR', 'PATIENT')),
'HL70259': ('Modality',
('AS', 'BS', 'CD', 'CP',
'CR', 'CS', 'CT', 'DD',
'DG', 'DM', 'EC', 'ES',
'FA', 'FS', 'LP', 'LS',
'MA', 'MS', 'NM', 'OT',
'PT', 'RF', 'ST', 'TG',
'US', 'XA')),
'HL70260': ('Patient location type', ('B', 'C', 'D', 'E', 'L', 'N', 'O', 'R')),
'HL70261': ('Location equipment', ('EEG', 'EKG', 'INF', 'IVP', 'OXY', 'SUC', 'VEN', 'VIT')),
'HL70262': ('Privacy level', ('F', 'J', 'P', 'Q', 'S', 'W')),
'HL70263': ('Level of care', ('A', 'C', 'E', 'F', 'N', 'R', 'S')),
'HL70265': ('Specialty type',
('ALC', 'AMB', 'CAN', 'CAR', 'CCR',
'CHI', 'EDI', 'EMR', 'FPC', 'INT',
'ISO', 'NAT', 'NBI', 'OBG', 'OBS',
'OTH', 'PED', 'PHY', 'PIN', 'PPS',
'PRE', 'PSI', 'PSY', 'REH', 'SUR',
'WIC')),
'HL70267': ('Days of the week', ('FRI', 'MON', 'SAT', 'SUN', 'THU', 'TUE', 'WED')),
'HL70268': ('Override', ('A', 'R', 'X')),
'HL70269': ('Charge on indicator', ('O', 'R')),
'HL70270': (
'Document type', ('AR', 'CD', 'CN', 'DI', 'DS', 'ED', 'HP', 'OP', 'PC', 'PH', 'PN', 'PR', 'SP', 'TS')),
'HL70271': ('Document completion status', ('AU', 'DI', 'DO', 'IN', 'IP', 'LA', 'PA')),
'HL70272': ('Document confidentiality status', ('R', 'U', 'V')),
'HL70273': ('Document availability status', ('AV', 'CA', 'OB', 'UN')),
'HL70275': ('Document storage status', ('AA', 'AC', 'AR', 'PU')),
'HL70276': ('Appointment reason codes', ('CHECKUP', 'EMERGENCY', 'FOLLOWUP', 'ROUTINE', 'WALKIN')),
'HL70277': ('Appointment type codes', ('Complete', 'Normal', 'Tentative')),
'HL70278': ('Filler status codes',
('Blocked', 'Booked',
'Cancelled', 'Complete', 'Dc',
'Deleted', 'Overbook',
'Pending', 'Started',
'Waitlist')),
'HL70279': ('Allow substitution codes', ('Confirm', 'No', 'Notify', 'Yes')),
'HL70280': ('Referral priority', ('A', 'R', 'S')),
'HL70281': ('Referral type', ('Hom', 'Lab', 'Med', 'Psy', 'Rad', 'Skn')),
'HL70282': ('Referral disposition', ('AM', 'RP', 'SO', 'WR')),
'HL70283': ('Referral status', ('A', 'E', 'P', 'R')),
'HL70284': ('Referral category', ('A', 'E', 'I', 'O')),
'HL70286': ('Provider role', ('CP', 'PP', 'RP', 'RT')),
'HL70287': ('Problem/goal action code', ('AD', 'CO', 'DE', 'LI', 'UC', 'UN', 'UP')),
'HL70290': ('MIME base64 encoding characters',
('0', '1', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '2', '20', '21', '22',
'23', '24', '25', '26', '27', '28', '29', '3', '30', '31', '32', '33', '34', '35', '36', '37',
'38',
'39', '4', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '5', '50', '51', '52',
'53', '54', '55', '56', '57', '58', '59', '6', '60', '61', '62', '63', '7', '8', '9', '(pad)')),
'HL70291': ('Subtype of referenced data',
('BASIC', 'DICOM', 'FAX', 'GIF', 'HTML', 'JOT', 'JPEG', 'Octet-stream', 'PICT', 'PostScript',
'RTF',
'SGML', 'TIFF', 'x-hl7-cda-level-one', 'XML')),
'HL70292': ('Vaccines administered (code = CVX)(parenteral, unless oral is noted)',
('01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '100', '101', '11', '12', '13', '14',
'15',
'16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31',
'32',
'33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48',
'49',
'50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65',
'66',
'67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82',
'83',
'84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99',
'999')),
'HL70294': ('Time selection criteria parameter class codes',
('Fri', 'Mon', 'Prefend', 'Prefstart', 'Sat', 'Sun', 'Thu', 'Tue', 'Wed')),
'HL70298': ('CP range type', ('F', 'P')),
'HL70299': ('Encoding', ('A', 'Base64', 'Hex')),
'HL70301': (
'Universal ID type', ('DNS', 'GUID', 'HCD', 'HL7', 'ISO', 'L,M,N', 'Random', 'UUID', 'x400', 'x500')),
'HL70305': ('Person location type', ('C', 'D', 'H', 'N', 'O', 'P', 'S')),
'HL70309': ('Coverage type', ('B', 'H', 'P')),
'HL70311': ('Job status', ('O', 'P', 'T', 'U')),
'HL70315': ('Living will code', ('F', 'I', 'N', 'U', 'Y')),
'HL70316': ('Organ donor code', ('F', 'I', 'N', 'P', 'R', 'U', 'Y')),
'HL70317': ('Annotations', ('9900', '9901', '9902', '9903', '9904')),
'HL70321': ('Dispense method', ('AD', 'F', 'TR', 'UD')),
'HL70322': ('Completion status', ('CP', 'NA', 'PA', 'RE')),
'HL70323': ('Action code', ('A', 'D', 'U')),
'HL70324': ('Location characteristic ID',
('GEN', 'IMP', 'INF', 'LCR', 'LIC', 'OVR', 'PRL', 'SET',
'SHA', 'SMK', 'STF', 'TEA')),
'HL70325': ('Location relationship ID', ('ALI', 'DTY', 'LAB', 'LB2', 'PAR', 'RX', 'RX2')),
'HL70326': ('Visit indicator', ('A', 'V')),
'HL70329': ('Quantity method', ('A', 'E')),
'HL70330': ('Marketing basis', ('510E', '510K', '522S', 'PMA', 'PRE', 'TXN')),
'HL70331': ('Facility type', ('A', 'D', 'M', 'U')),
'HL70332': ('Source type', ('A', 'I')),
'HL70334': ('Disabled person', ('AP', 'GT', 'IN', 'PT')),
'HL70335': ('Repeat pattern',
('A', 'BID', 'C', 'D', 'I', 'M',
'Meal Related Timings', 'Once', 'P', 'PRN',
'PRNxxx', 'QAM', 'QHS', 'QID', 'Q<integer>D',
'Q<integer>H', 'Q<integer>J<day#>',
'Q<integer>L', 'Q<integer>M', 'Q<integer>S',
'Q<integer>W', 'QOD', 'QPM', 'QSHIFT', 'TID',
'U <spec>', 'V', 'xID')),
'HL70336': ('Referral reason', ('O', 'P', 'S', 'W')),
'HL70337': ('Certification status', ('C', 'E')),
'HL70338': (
'Practitioner ID number type',
('CY', 'DEA', 'GL', 'L&I', 'MCD', 'MCR', 'QA', 'SL', 'TAX', 'TRL', 'UPIN')),
'HL70339': ('Advanced beneficiary notice code', ('1', '2', '3', '4')),
'HL70344': ('Patient\'s relationship to insured',
('01', '02', '03', '04', '05', '06',
'07', '08', '09', '10', '11', '12',
'13', '14', '15', '16', '17', '18',
'19')),
'HL70348': ('Special program indicator', ('01', '02', '03', '04', '05', '06', '07', '08')),
'HL70349': ('PSRO/UR approval indicator', ('1', '2', '3', '4', '5')),
'HL70350': ('Occurrence code',
('01', '02', '03', '04', '05', '06',
'09', '10', '11', '12', '17', '18',
'19', '20', '21', '22', '24', '25',
'26', '27', '28', '29', '30', '31',
'32', '33', '34', '35', '36', '37',
'40', '41', '42', '43', '44', '45',
'46', '47 ... 49', '50', '51',
'70 ... 99', 'A1', 'A2', 'A3')),
'HL70351': ('Occurrence span', ('70', '71', '72', '73', '74', '75', '76', '77', '78', '79', 'M0')),
'HL70353': ('CWE statuses', ('NA', 'NASK', 'NAV', 'U', 'UASK')),
'HL70354': ('Message structure',
('ACK', 'ADR_A19', 'ADT_A01', 'ADT_A02',
'ADT_A03', 'ADT_A05', 'ADT_A06',
'ADT_A09', 'ADT_A15', 'ADT_A16',
'ADT_A17', 'ADT_A18', 'ADT_A20',
'ADT_A21', 'ADT_A24', 'ADT_A30',
'ADT_A37', 'ADT_A38', 'ADT_A39',
'ADT_A43', 'ADT_A45', 'ADT_A50',
'ADT_A52', 'ADT_A54', 'ADT_A60',
'ADT_A61', 'BAR_P01', 'BAR_P02',
'BAR_P05', 'BAR_P06', 'BAR_P10',
'CRM_C01', 'CSU_C09', 'DFT_P03',
'DOC_T12', 'DSR_P04', 'DSR_Q01',
'DSR_Q03', 'EAC_U07', 'EAN_U09',
'EAR_U08', 'EDR_R07', 'EQQ_Q04',
'ERP_R09', 'ESR_U02', 'ESU_U01',
'INR_U06', 'INU_U05', 'LSU_U12',
'MDM_T01', 'MDM_T02', 'MFD_MFA',
'MFK_M01', 'MFN_M01', 'MFN_M02',
'MFN_M03', 'MFN_M04', 'MFN_M05',
'MFN_M06', 'MFN_M07', 'MFN_M08',
'MFN_M09', 'MFN_M10', 'MFN_M11',
'MFN_M12', 'MFQ_M01', 'MFR_M01',
'NMD_N02', 'NMQ_N01', 'NMR_N01',
'OMD_O03', 'OMG_O19', 'OML_O21',
'OMN_O07', 'OMP_O09', 'OMS_O05',
'ORD_O04', 'ORF_R04', 'ORG_O20',
'ORL_O22', 'ORM_O01', 'ORN_O08',
'ORP_O10', 'ORR_O02', 'ORS_O06',
'ORU_R01', 'ORU_W01', 'OSQ_Q06',
'OSR_Q06', 'OUL_R21', 'PEX_P07',
'PGL_PC6', 'PMU_B01', 'PMU_B03',
'PMU_B04', 'PPG_PCG', 'PPP_PCB',
'PPR_PC1', 'PPT_PCL', 'PPV_PCA',
'PRR_PC5', 'PTR_PCF', 'QBP_Q11',
'QBP_Q13', 'QBP_Q15', 'QBP_Q21',
'QCK_Q02', 'QCN_J01', 'QRY_A19',
'QRY_PC4', 'QRY_Q01', 'QRY_Q02',
'QRY_R02', 'QRY_T12', 'QSB_Q16',
'QVR_Q17', 'RAR_RAR', 'RAS_O17',
'RCI_I05', 'RCL_I06', 'RDE_O11',
'RDR_RDR', 'RDS_O13', 'RDY_K11',
'RDY_K15', 'REF_I12', 'RER_RER',
'RGR_RGR', 'RGV_O15', 'ROR_ROR',
'RPA_I08', 'RPI_I01', 'RPI_I04',
'RPL_I02', 'RPR_I03', 'RQA_I08',
'RQC_I05', 'RQI_I01', 'RQP_I04',
'RQQ_Q09', 'RRA_O18', 'RRD_O14',
'RRE_O12', 'RRG_O16', 'RRI_I12',
'RSP_K21', 'RSP_K22', 'RSP_K23',
'RSP_K24', 'RSP_K25', 'RTB_K13',
'SIU_S12', 'SPQ_Q08', 'SQM_S25',
'SQR_S25', 'SRM_S01', 'SRR_S01',
'SSR_U04', 'SSU_U03', 'SUR_P09',
'TBR_R08', 'TCU_U10', 'UDM_Q05',
'VQQ_Q07', 'VXQ_V01', 'VXR_V03',
'VXU_V04', 'VXX_V02')),
'HL70355': ('Primary key value type', ('CE', 'PL')),
'HL70356': ('Alternate character set handling scheme', ('2.3', 'ISO 2022-1994', '<null>')),
'HL70357': ('Message error condition codes',
(
'0', '100', '101', '102', '103', '200', '201', '202', '203', '204', '205', '206', '207',
'Errors',
'Rejection', 'Success')),
'HL70359': ('Diagnosis priority', ('0', '1', '2 ...')),
'HL70360': ('Degree',
('AA',
'AAS',
'ABA',
'AE', 'AS',
'BA',
'BBA',
'BE',
'BFA',
'BN', 'BS',
'BSL',
'BT',
'CER',
'DBA',
'DED',
'DIP',
'DO', 'HS',
'JD', 'MA',
'MBA',
'MCE',
'MD',
'MDI',
'ME',
'MED',
'MEE',
'MFA',
'MME',
'MS',
'MSL',
'MT', 'NG',
'PharmD',
'PHD',
'PHE',
'PHS',
'SEC',
'TS')),
'HL70363': ('Assigning authority',
('AUSDVA', 'AUSHIC', 'CANAB', 'CANBC', 'CANMB', 'CANNB', 'CANNF', 'CANNS', 'CANNT', 'CANNU',
'CANON',
'CANPE', 'CANQC', 'CANSK', 'CANYT', 'NLVWS', 'USCDC', 'USHCFA', 'USSSA')),
'HL70364': ('Comment type', ('1R', '2R', 'AI', 'DR', 'GI', 'GR', 'PI', 'RE')),
'HL70365': ('Equipment state', ('CL', 'CO', 'ES', 'ID', 'IN', 'OP', 'PA', 'PD', 'PU')),
'HL70366': ('Local/remote control state', ('L', 'R')),
'HL70367': ('Alert level', ('C', 'N', 'S', 'W')),
'HL70368': ('Remote control command',
('AB', 'CL', 'CN', 'DI', 'EN', 'ES', 'EX', 'IN', 'LC', 'LK', 'LO', 'PA', 'RC', 'RE', 'SA', 'SU',
'TT', 'UC', 'UN')),
'HL70369': ('Specimen role', ('B', 'C', 'P', 'Q', 'R')),
'HL70370': ('Container status', ('I', 'L', 'M', 'O', 'P', 'R', 'U', 'X')),
'HL70371': ('Additive', ('BOR', 'C32', 'C38', 'EDTK', 'EDTN', 'HCL6', 'HEPL', 'HEPN')),
'HL70372': ('Specimen component', ('BLD', 'BSEP', 'PLAS', 'PPP', 'PRP', 'SED', 'SER', 'SUP')),
'HL70373': ('Treatment', ('ACID', 'ALK', 'DEFB', 'FILT', 'LDLP', 'NEUT', 'RECA', 'UFIL')),
'HL70374': ('System induced contaminants', ('CNTM',)),
'HL70375': ('Artificial blood', ('FLUR', 'SFHB')),
'HL70376': ('Special handling considerations', ('C37', 'CAMB', 'CATM', 'CFRZ', 'CREF', 'PRTL')),
'HL70377': ('Other environmental factors', ('A60', 'ATM')),
'HL70383': ('Substance status', ('CE', 'CW', 'EE', 'EW', 'NE', 'NW', 'OE', 'OK', 'OW', 'QE', 'QW')),
'HL70384': ('Substance type', ('CO', 'DI', 'LI', 'LW', 'MR', 'OT', 'PT', 'PW', 'RC', 'SC', 'SR', 'SW')),
'HL70387': ('Command response', ('ER', 'OK', 'ST', 'TI', 'UN')),
'HL70388': ('Processing type', ('E', 'P')),
'HL70389': ('Analyte repeat status', ('D', 'F', 'O', 'R')),
'HL70391': ('Segment group', ('etc', 'OBRG', 'ORCG', 'PIDG', 'RXAG', 'RXDG', 'RXEG', 'RXOG')),
'HL70392': ('Match reason', ('DB', 'NA', 'NP', 'SS')),
'HL70393': ('Match algorithms', ('LINKSOFT_2.01', 'MATCHWARE_1.2')),
'HL70394': ('Response modality', ('B', 'R', 'T')),
'HL70395': ('Modify indicator', ('M', 'N')),
'HL70396': ('Coding System',
('L', 'ACR', 'ART', 'AS4', 'AS4E', 'ATC', 'C4', 'C5', 'CAS',
'CD2', 'CDCA', 'CDCM', 'CDS', 'CE', 'CLP', 'CPTM', 'CST',
'CVX', 'DCL', 'DCM', 'DQL', 'E', 'E5', 'E6', 'E7', 'ENZC',
'FDDC', 'FDDX', 'FDK', 'HB', 'HCPCS', 'HHC', 'HI', 'HL7nnnn',
'HPC', 'I10', 'I10P', 'I9', 'I9C', 'IBT', 'IC2', 'ICDO',
'ICS', 'ICSD', 'ISOnnnn', 'IUPC', 'IUPP', 'JC8', 'LB', 'LN',
'MCD', 'MCR', 'MDDX', 'MEDC', 'MEDR', 'MEDX', 'MGPI', 'MVX',
'NDA', 'NDC', 'NIC', 'NPI', 'OHA', 'POS', 'RC', 'SDM', 'SNM',
'SNM3', 'SNT', 'UC', 'UMD', 'UML', 'UPC', 'UPIN', 'W1', 'W2',
'W4', 'WC', '99IHE')),
'HL70397': ('Sequencing', ('A', 'AN', 'D', 'DN', 'N')),
'HL70398': ('Continuation style code', ('F', 'I')),
'HL70399': ('Country code',
('ABW', 'AFG', 'AFT', 'AGO', 'AIA', 'ALB', 'AND', 'ANT', 'ARE', 'ARG', 'ARM', 'ASM', 'ATA', 'ATG',
'AUS',
'AUT', 'AZE', 'BDI', 'BEL', 'BEN', 'BFA', 'BGD', 'BGR', 'BHR', 'BHS', 'BIH', 'BLR', 'BLZ', 'BMU',
'BOL',
'BRA', 'BRB', 'BRN', 'BTN', 'BVT', 'BWA', 'CAF', 'CAN', 'CCK', 'CHE', 'CHL', 'CHN', 'CIV', 'CMR',
'COD',
'COG', 'COK', 'COL', 'COM', 'CPV', 'CRI', 'CUB', 'CXR', 'CYM', 'CYP', 'CZE', 'DEU', 'DJI', 'DMA',
'DNK',
'DOM', 'DZA', 'ECU', 'EGY', 'ERI', 'ESH', 'ESP', 'EST', 'ETH', 'FIN', 'FJI', 'FLK', 'FRA', 'FRO',
'FSM',
'GAB', 'GBR', 'GEO', 'GHA', 'GIB', 'GIN', 'GLP', 'GMB', 'GNB', 'GNQ', 'GRC', 'GRD', 'GRL', 'GTM',
'GUF',
'GUM', 'GUY', 'HKG', 'HMD', 'HND', 'HRV', 'HTI', 'HUN', 'IDN', 'IND', 'IOT', 'IRL', 'IRN', 'IRQ',
'ISL',
'ISR', 'ITA', 'JAM', 'JOR', 'JPN', 'KAZ', 'KEN', 'KGZ', 'KHM', 'KIR', 'KNA', 'KOR', 'KWT', 'LAO',
'LBN',
'LBR', 'LBY', 'LCA', 'LIE', 'LKA', 'LSO', 'LTU', 'LUX', 'LVA', 'MAC', 'MAR', 'MCO', 'MDA', 'MDG',
'MDV',
'MEX', 'MHL', 'MKD', 'MLI', 'MLT', 'MMR', 'MNG', 'MNP', 'MOZ', 'MRT', 'MSR', 'MTQ', 'MUS', 'MWI',
'MYS',
'MYT', 'NAM', 'NCL', 'NER', 'NFK', 'NGA', 'NIC', 'NIU', 'NLD', 'NOR', 'NPL', 'NRU', 'NZL', 'OMN',
'PAK',
'PAN', 'PCN', 'PER', 'PHL', 'PLW', 'PNG', 'POL', 'PRI', 'PRK', 'PRT', 'PRY', 'PYF', 'QAT', 'REU',
'ROM',
'RUS', 'RWA', 'SAU', 'SDN', 'SEN', 'SGP', 'SGS', 'SHN', 'SJM', 'SLB', 'SLE', 'SLV', 'SMR', 'SOM',
'SPM',
'STP', 'SUR', 'SVK', 'SVN', 'SWE', 'SWZ', 'SYC', 'SYR', 'TCA', 'TCD', 'TGO', 'THA', 'TJK', 'TKL',
'TKM',
'TMP', 'TON', 'TTO', 'TUN', 'TUR', 'TUV', 'TWN', 'TZA', 'UGA', 'UKR', 'UMI', 'URY', 'USA', 'UZB',
'VAT',
'VCT', 'VEN', 'VGB', 'VIR', 'VNM', 'VUT', 'WLF', 'WSM', 'YEM', 'YUG', 'ZAF', 'ZMB', 'ZWE')),
'HL70401': ('Government reimbursement program', ('C', 'MM')),
'HL70402': ('School type', ('D', 'G', 'M', 'U')),
'HL70403': ('Language ability', ('1', '2', '3', '4', '5')),
'HL70404': ('Language proficiency', ('1', '2', '3', '4', '5', '6')),
'HL70406': ('Organization unit type', ('1', '2', '3', '4', '5', 'H', 'O')),
'HL70409': ('Application change type', ('M', 'SD', 'SU')),
'HL70411': ('Supplemental service information values',
('1ST', '2ND', '3RD', '4TH', '5TH', 'ANT',
'A/P', 'BLT', 'DEC', 'DST', 'LAT', 'LFT',
'LLQ', 'LOW', 'LUQ', 'MED', 'OR', 'PED', 'POS',
'PRT', 'PRX', 'REC', 'RGH', 'RLQ', 'RUQ',
'UPP', 'UPR', 'WCT', 'WOC', 'WSD')),
'HL70415': ('DRG transfer type', ('E', 'N')),
'HL70416': ('Procedure DRG type', ('1', '2', '3', '4', '5')),
'HL70417': ('Tissue type code', ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'B', 'C', 'G')),
'HL70418': ('Procedure priority', ('0', '1', '2 ...')),
'HL70421': ('Severity of illness code', ('MI', 'MO', 'SE')),
'HL70422': ('Triage code', ('1', '2', '3', '4', '5', '99')),
'HL70423': ('Case category code', ('D',)),
'HL70424': ('Gestation category code', ('1', '2', '3')),
'HL70425': ('Newborn code', ('1', '2', '3', '4', '5')),
'HL70426': ('Blood product code',
('CRYO', 'CRYOP', 'FFP', 'FFPTH', 'PC', 'PCA', 'PCNEO', 'PCW', 'PLT', 'PLTNEO', 'PLTP', 'PLTPH',
'PLTPHLR', 'RWB', 'WBA')),
'HL70427': (
'Risk management incident code', ('B', 'C', 'D', 'E', 'F', 'H', 'I', 'J', 'K', 'O', 'P', 'R', 'S', 'T')),
'HL70428': ('Incident type code', ('O', 'P', 'U')),
'HL70429': ('Production class Code', ('BR', 'DA', 'DR', 'DU', 'LY', 'MT', 'NA', 'OT', 'PL', 'RA', 'SH', 'U')),
'HL70430': ('Mode of arrival code', ('A', 'C', 'F', 'H', 'O', 'P', 'U')),
'HL70431': ('Recreational drug use code', ('A', 'C', 'K', 'M', 'O', 'T', 'U')),
'HL70432': ('Admission level of care code', ('AC', 'CH', 'CO', 'CR', 'IM', 'MO')),
'HL70433': ('Precaution code', ('A', 'B', 'C', 'D', 'I', 'N', 'O', 'P', 'U')),
'HL70434': ('Patient condition code', ('A', 'C', 'O', 'P', 'S', 'U')),
'HL70435': ('Advance directive code', ('DNR',)),
'HL70436': ('Sensitivity to Causative Agent code', ('AD', 'AL', 'CT', 'IN')),
'HL70437': ('Alert device code', ('B', 'N', 'W')),
'HL70438': ('Allergy clinical status', ('C', 'D', 'E', 'I', 'P', 'S', 'U')),
'HL70440': ('Data types',
('AD', 'CD', 'CE', 'CF', 'CK',
'CM', 'CN', 'CNE', 'CP',
'CQ', 'CWE', 'CX', 'DLN',
'DR', 'DT', 'ED', 'EI', 'FC',
'FN', 'FT', 'HD', 'ID', 'IS',
'JCC', 'MA', 'MO', 'NA',
'NM', 'PL', 'PN', 'PPN',
'PT', 'QIP', 'QSC', 'RCD',
'RI', 'RP', 'SAD', 'SCV',
'SI', 'SN', 'SRT', 'ST',
'TM', 'TN', 'TQ', 'TS', 'TX',
'VH', 'VID', 'XAD', 'XCN',
'XON', 'XPN', 'XTN')),
'HL70441': ('Immunization registry status', ('A', 'I', 'L', 'M', 'O', 'P', 'U')),
'HL70442': ('Location service code', ('D', 'E', 'P', 'T')),
'HL70443': ('Provider role', ('AD', 'AT', 'CP', 'FHCP', 'PP', 'RP', 'RT')),
'HL70444': ('Name assembly order', ('F', 'G')),
'HL70445': ('Identity Reliability Code', ('AL', 'UA', 'UD', 'US')),
'HL70450': ('Event type', ('LOG', 'SER')),
'HL70451': ('Substance identifier', ('ALL',)),
'HL70452': ('Health care provider type code', ('SUGGESTION',)),
'HL70453': ('Health care provider classification', ('SUGGESTION',)),
'HL70454': ('Health care provider area of specialization', ('SUGGESTION',)),
'HL70455': ('Type of bill code', ('...', '131', '141')),
'HL70456': ('Revenue code', ('...', '260', '280', '301', '991', '993', '994')),
'HL70457': ('Overall claim disposition code', ('0', '1', '2', '3', '4')),
'HL70458': ('OCE edit code',
('...', '1', '10', '11', '12',
'13', '14', '15', '16', '17',
'18', '19', '2', '20', '21',
'22', '23', '24', '25', '26',
'27', '28', '29', '3', '30',
'31', '32', '33', '34', '35.',
'36.', '37', '38.', '39.', '4',
'40.', '41.', '42.', '5', '6',
'7', '8', '9')),
'HL70459': ('Reimbursement Action Code', ('0', '1', '2', '3')),
'HL70460': ('Denial or rejection code', ('0', '1', '2')),
'HL70465': ('Name/address representation', ('A', 'I', 'P')),
'HL70466': ('Ambulatory payment classification code', ('...', '031', '163', '181')),
'HL70467': ('Modifier edit code', ('0', '1', '2', '3', '4', 'U')),
'HL70468': ('Payment adjustment code', ('1', '2', '3', '4', '5')),
'HL70469': ('Packaging status code', ('0', '1', '2')),
'HL70470': ('Reimbursement type code',
('Crnl', 'DME', 'EPO', 'Lab', 'Mamm', 'NoPay',
'OPPS', 'PartH', 'Pckg', 'Thrpy')),
'HL70472': ('TQ Conjunction ID', ('A', 'C', 'S')),
'HL70473': ('Formulary status', ('G', 'N', 'R', 'Y')),
'HL70474': ('Organization unit type - ORG', ('D', 'F', 'L', 'M', 'S', 'U', 'V'))}
|
crs4/hl7apy
|
hl7apy/v2_4/tables.py
|
Python
|
mit
| 46,671
|
[
"BWA",
"VisIt"
] |
62554885479ada4e2fb502f3f9c8954de5c4847cf28fe604274a8b3fb5f79123
|
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy.lib.six import string_types, exec_
import sys
import keyword
import re
import inspect
import types
import warnings
from scipy.misc import doccer
from ._distr_params import distcont, distdiscrete
from scipy.special import (comb, chndtr, gammaln, hyp0f1,
entr, kl_div)
# for root finding for discrete distribution ppf, and max likelihood estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
from numpy import (arange, putmask, ravel, take, ones, sum, shape,
product, reshape, zeros, floor, logical_and, log, sqrt, exp,
ndarray)
from numpy import (place, any, argsort, argmax, vectorize,
asarray, nan, inf, isinf, NINF, empty)
import numpy as np
import numpy.random as mtrand
from ._constants import _EPS, _XMAX
try:
from new import instancemethod
except ImportError:
# Python 3
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'parameters': """\nParameters\n---------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
``rvs(%(shapes)s, loc=0, scale=1, size=1)``
Random variates.
"""
_doc_pdf = """\
``pdf(x, %(shapes)s, loc=0, scale=1)``
Probability density function.
"""
_doc_logpdf = """\
``logpdf(x, %(shapes)s, loc=0, scale=1)``
Log of the probability density function.
"""
_doc_pmf = """\
``pmf(x, %(shapes)s, loc=0, scale=1)``
Probability mass function.
"""
_doc_logpmf = """\
``logpmf(x, %(shapes)s, loc=0, scale=1)``
Log of the probability mass function.
"""
_doc_cdf = """\
``cdf(x, %(shapes)s, loc=0, scale=1)``
Cumulative density function.
"""
_doc_logcdf = """\
``logcdf(x, %(shapes)s, loc=0, scale=1)``
Log of the cumulative density function.
"""
_doc_sf = """\
``sf(x, %(shapes)s, loc=0, scale=1)``
Survival function (1-cdf --- sometimes more accurate).
"""
_doc_logsf = """\
``logsf(x, %(shapes)s, loc=0, scale=1)``
Log of the survival function.
"""
_doc_ppf = """\
``ppf(q, %(shapes)s, loc=0, scale=1)``
Percent point function (inverse of cdf --- percentiles).
"""
_doc_isf = """\
``isf(q, %(shapes)s, loc=0, scale=1)``
Inverse survival function (inverse of sf).
"""
_doc_moment = """\
``moment(n, %(shapes)s, loc=0, scale=1)``
Non-central moment of order n
"""
_doc_stats = """\
``stats(%(shapes)s, loc=0, scale=1, moments='mv')``
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
``entropy(%(shapes)s, loc=0, scale=1)``
(Differential) entropy of the RV.
"""
_doc_fit = """\
``fit(data, %(shapes)s, loc=0, scale=1)``
Parameter estimates for generic data.
"""
_doc_expect = """\
``expect(func, %(shapes)s, loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
``expect(func, %(shapes)s, loc=0, lb=None, ub=None, conditional=False)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
``median(%(shapes)s, loc=0, scale=1)``
Median of the distribution.
"""
_doc_mean = """\
``mean(%(shapes)s, loc=0, scale=1)``
Mean of the distribution.
"""
_doc_var = """\
``var(%(shapes)s, loc=0, scale=1)``
Variance of the distribution.
"""
_doc_std = """\
``std(%(shapes)s, loc=0, scale=1)``
Standard deviation of the distribution.
"""
_doc_interval = """\
``interval(alpha, %(shapes)s, loc=0, scale=1)``
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
# Note that the two lines for %(shapes) are searched for and replaced in
# rv_continuous and rv_discrete - update there if the exact string changes
_doc_default_callparams = """
Parameters
----------
x : array_like
quantiles
q : array_like
lower or upper tail probability
%(shapes)s : array_like
shape parameters
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : str, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis.
Default is 'mv'.
"""
_doc_default_longsummary = """\
Continuous random variables are defined from a standard form and may
require some shape parameters to complete its specification. Any
optional keyword parameters can be passed to the methods of the RV
object as given below:
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, freeze the distribution and display the frozen pdf:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note,
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'callparams': _doc_default_callparams,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'Continuous', 'Discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
docdict_discrete['example'] = _doc_default_discrete_example
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['callparams'],
docdict_discrete['frozennote']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
try:
del s
except NameError:
# in Python 3, loop variables are not visible after the loop
pass
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""
kurtosis is fourth central moment / variance**2 - 3
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._ctor_param)
# a, b may be set in _argcheck, depending on *args, **kwds. Ouch.
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.dist._argcheck(*shapes)
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None):
kwds = self.kwds.copy()
kwds.update({'size': size})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def valarray(shape, value=nan, typecode=None):
"""Return an array of all value.
"""
out = ones(shape, dtype=bool) * value
if typecode is not None:
out = out.astype(typecode)
if not isinstance(out, ndarray):
out = asarray(out)
return out
def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
"""
np.where(cond, x, fillvalue) always evaluates x even where cond is False.
This one only evaluates f(arr1[cond], arr2[cond], ...).
For example,
>>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])
>>> def f(a, b):
return a*b
>>> _lazywhere(a > 2, (a, b), f, np.nan)
array([ nan, nan, 21., 32.])
Notice it assumes that all `arrays` are of the same shape, or can be
broadcasted together.
"""
if fillvalue is None:
if f2 is None:
raise ValueError("One of (fillvalue, f2) must be given.")
else:
fillvalue = np.nan
else:
if f2 is not None:
raise ValueError("Only one of (fillvalue, f2) can be given.")
arrays = np.broadcast_arrays(*arrays)
temp = tuple(np.extract(cond, arr) for arr in arrays)
out = valarray(shape(arrays[0]), value=fillvalue)
np.place(out, cond, f(*temp))
if f2 is not None:
temp = tuple(np.extract(~cond, arr) for arr in arrays)
np.place(out, ~cond, f2(*temp))
return out
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4, 5))
>>> B = 2
>>> C = rand((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2, B2, C2] = argsreduce(cond, A, B, C)
>>> B2.shape
(15,)
"""
newargs = np.atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs, ]
expand_arr = (cond == cond)
return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return (%(shape_arg_str)s), %(locscale_out)s, size
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# I think the function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
a = asarray(df/2.0)
fac = -nc/2.0 - x/2.0 + (a-1)*log(x) - a*log(2) - gammaln(a)
return fac + np.nan_to_num(log(hyp0f1(a, nc * x/4.0)))
def _ncx2_pdf(x, df, nc):
return np.exp(_ncx2_log_pdf(x, df, nc))
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self):
super(rv_generic, self).__init__()
# figure out if _stats signature has 'moments' keyword
sign = inspect.getargspec(self._stats)
self._stats_has_moments = ((sign[2] is not None) or
('moments' in sign[0]))
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser for the shape arguments.
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Is supposed to be called in __init__ of a class for each distribution.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, string_types):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments
shapes_list = []
for meth in meths_to_inspect:
shapes_args = inspect.getargspec(meth)
shapes_list.append(shapes_args.args)
# *args or **kwargs are not allowed w/automatic shapes
# (generic methods have 'self, x' only)
if len(shapes_args.args) > 2:
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.keywords is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
shapes = max(shapes_list, key=lambda x: len(x))
shapes = shapes[2:] # remove self, x,
# make sure the signatures are consistent
# (generic methods have 'self, x' only)
for item in shapes_list:
if len(item) > 2 and item[2:] != shapes:
raise TypeError('Shape arguments are inconsistent.')
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
ns = {}
exec_(parse_arg_template % dct, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name,
instancemethod(ns[name], self, self.__class__)
)
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join(str(_) for _ in shapes_vals)
tempdict['vals'] = vals
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['callparams', 'default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Central moments
def _munp(self, n, *args):
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = self.generic_moment(n, *args)
np.seterr(**olderr)
return vals
## These are the methods you must define (standard form functions)
## NB: generic _pdf, _logpdf, _cdf are different for
## rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
##(return 1-d using self._size to get number)
def _rvs(self, *args):
## Use basic inverse cdf algorithm for RV generation as default.
U = mtrand.sample(self._size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default=1).
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
raise ValueError("Domain error in arguments.")
# self._size is total size of all output values
self._size = product(size, axis=0)
if self._size is not None and self._size > 1:
size = np.array(size, ndmin=1)
if np.all(scale == 0):
return loc*ones(size, 'd')
vals = self._rvs(*args)
if self._size is not None:
vals = reshape(vals, size)
vals = vals * scale + loc
# Cast to int if discrete
if discrete:
if np.isscalar(vals):
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def stats(self, *args, **kwds):
"""
Some statistics of the given RV
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (discrete RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default='mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = valarray(shape(cond), self.badvalue)
# Use only entries that are valid in calculation
if any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if g1 is None:
mu3 = None
else:
if mu2 is None:
mu2 = self._munp(2, *goodargs)
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
mu2 = mu2p - mu * mu
if np.isinf(mu):
#if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
mu3 = mu3p - 3 * mu * mu2 - mu**3
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if mu3 is None:
mu3p = self._munp(3, *goodargs)
mu3 = mu3p - 3 * mu * mu2 - mu**3
mu4 = mu4p - 4 * mu * mu3 - 6 * mu * mu * mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = []
for _ in moments:
out0 = default.copy()
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, *args)
# I don't know when or why vecentropy got broken when numargs == 0
# 09.08.2013: is this still relevant? cf check_vecentropy test
# in tests/test_continuous_basic.py
if self.numargs == 0:
place(output, cond0, self._entropy() + log(scale))
else:
place(output, cond0, self.vecentropy(*goodargs) + log(scale))
return output
def moment(self, n, *args, **kwds):
"""
n'th order non-central moment of distribution.
Parameters
----------
n : int, n>=1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
kwds : keyword arguments, optional
These can include "loc" and "scale", as well as other keyword
arguments relevant for a given distribution.
"""
args, loc, scale = self._parse_args(*args, **kwds)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args, **mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n, k, exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
stats.distributions.rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""
Confidence interval with equal areas around the median.
Parameters
----------
alpha : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
alpha = asarray(alpha)
if any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
## continuous random variables: implement maybe later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances from for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : object, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
Methods
-------
``rvs(<shape(s)>, loc=0, scale=1, size=1)``
random variates
``pdf(x, <shape(s)>, loc=0, scale=1)``
probability density function
``logpdf(x, <shape(s)>, loc=0, scale=1)``
log of the probability density function
``cdf(x, <shape(s)>, loc=0, scale=1)``
cumulative density function
``logcdf(x, <shape(s)>, loc=0, scale=1)``
log of the cumulative density function
``sf(x, <shape(s)>, loc=0, scale=1)``
survival function (1-cdf --- sometimes more accurate)
``logsf(x, <shape(s)>, loc=0, scale=1)``
log of the survival function
``ppf(q, <shape(s)>, loc=0, scale=1)``
percent point function (inverse of cdf --- quantiles)
``isf(q, <shape(s)>, loc=0, scale=1)``
inverse survival function (inverse of sf)
``moment(n, <shape(s)>, loc=0, scale=1)``
non-central n-th moment of the distribution. May not work for array
arguments.
``stats(<shape(s)>, loc=0, scale=1, moments='mv')``
mean('m'), variance('v'), skew('s'), and/or kurtosis('k')
``entropy(<shape(s)>, loc=0, scale=1)``
(differential) entropy of the RV.
``fit(data, <shape(s)>, loc=0, scale=1)``
Parameter estimates for generic data
``expect(func=None, args=(), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)``
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
``median(<shape(s)>, loc=0, scale=1)``
Median of the distribution.
``mean(<shape(s)>, loc=0, scale=1)``
Mean of the distribution.
``std(<shape(s)>, loc=0, scale=1)``
Standard deviation of the distribution.
``var(<shape(s)>, loc=0, scale=1)``
Variance of the distribution.
``interval(alpha, <shape(s)>, loc=0, scale=1)``
Interval that with `alpha` percent probability contains a random
realization of this distribution.
``__call__(<shape(s)>, loc=0, scale=1)``
Calling a distribution instance creates a frozen RV object with the
same methods but holding the given shape, location, and scale fixed.
See Notes section.
**Parameters for Methods**
x : array_like
quantiles
q : array_like
lower or upper tail probability
<shape(s)> : array_like
shape parameters
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : string, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis. (default='mv')
n : int
order of moment to calculate in method moments
Notes
-----
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
**Frozen Distribution**
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
frozen RV object with the same methods but holding the given shape,
location, and scale fixed
**Subclassing**
New random variables can be defined by subclassing rv_continuous class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1) which will be given clean arguments (in between
a and b) and passing the argument check method.
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
Rarely would you override ``_isf``, ``_sf`` or ``_logsf``, but you could.
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments=<str>``,
where <str> is a string composed of 'm', 'v', 's',
and/or 'k'. Only the components appearing in string
should be computed and returned in the order 'm', 'v',
's', or 'k' with missing values returned as None.
Alternatively, you can override ``_munp``, which takes n and shape
parameters and returns the nth non-central moment of the distribution.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, the `shapes` will be automatically deduced from the signatures of the
overridden methods.
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
Examples
--------
To create a new Gaussian distribution, we would do the following::
class gaussian_gen(rv_continuous):
"Gaussian distribution"
def _pdf(self, x):
...
...
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None):
super(rv_continuous, self).__init__()
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self._size = 1
self.moment_type = momtype
self.shapes = shapes
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
# backwards compat. these were removed in 0.14.0, put back but
# deprecated in 0.14.1:
self.vecfunc = np.deprecate(self._ppfvec, "vecfunc")
self.veccdf = np.deprecate(self._cdfvec, "veccdf")
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc)
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s continuous random variable.' % longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
left = right = None
if self.a > -np.inf:
left = self.a
if self.b < np.inf:
right = self.b
factor = 10.
if not left: # i.e. self.a = -inf
left = -1.*factor
while self._ppf_to_solve(left, q, *args) > 0.:
right = left
left *= factor
# left is now such that cdf(left) < q
if not right: # i.e. self.b = inf
right = factor
while self._ppf_to_solve(right, q, *args) < 0.:
left = right
right *= factor
# right is now such that cdf(right) > q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
return integrate.quad(self._mom_integ0, self.a, self.b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
def _cdf_single(self, x, *args):
return integrate.quad(self._pdf, self.a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
## in rv_generic
def pdf(self, x, *args, **kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = asarray((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = asarray((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""
Survival function (1-cdf) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of cdf) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _nnlf(self, x, *args):
return -sum(self._logpdf(x, *args), axis=0)
def nnlf(self, theta, x):
'''Return negative loglikelihood function
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where theta are the
parameters (including loc and scale).
'''
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
cond0 = (x <= self.a) | (self.b <= x)
if (any(cond0)):
return inf
else:
N = len(x)
return self._nnlf(x, *args) + N * log(scale)
def _penalized_nnlf(self, theta, x):
''' Return negative loglikelihood function,
i.e., - sum (log pdf(x, theta), axis=0)
where theta are the parameters (including loc and scale)
'''
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
loginf = log(_XMAX)
if np.isneginf(self.a).all() and np.isinf(self.b).all():
Nbad = 0
else:
cond0 = (x <= self.a) | (self.b <= x)
Nbad = sum(cond0)
if Nbad > 0:
x = argsreduce(~cond0, x)[0]
N = len(x)
return self._nnlf(x, *args) + N*log(scale) + Nbad * 100.0 * loginf
# return starting point for fit (shape arguments + loc + scale)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
return args + self.fit_loc_scale(data, *args)
# Return the (possibly reduced) function to optimize in order to find MLE
# estimates for the .fit method
def _reduce_func(self, args, kwds):
args = list(args)
Nargs = len(args)
fixedn = []
index = list(range(Nargs))
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in zip(index, names):
if key in kwds:
fixedn.append(n)
args[n] = kwds[key]
else:
x0.append(args[n])
if len(fixedn) == 0:
func = self._penalized_nnlf
restore = None
else:
if len(fixedn) == len(index):
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return self._penalized_nnlf(newtheta, x)
return x0, func, restore, args
def fit(self, data, *args, **kwds):
"""
Return MLEs for shape, location, and scale parameters from data.
MLE stands for Maximum Likelihood Estimate. Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in calculating the MLEs.
args : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
Starting values for the location and scale parameters; no default.
Special keyword arguments are recognized as holding certain
parameters fixed:
f0...fn : hold respective shape parameters fixed.
floc : hold location parameter fixed to specified value.
fscale : hold scale parameter fixed to specified value.
optimizer : The optimizer to use. The optimizer must take func,
and starting position as the first two arguments,
plus args (for extra arguments to pass to the
function to be optimized) and disp=0 to suppress
output as keyword arguments.
Returns
-------
shape, loc, scale : tuple of floats
MLEs for any shape statistics, followed by those for location and
scale.
Notes
-----
This fit is computed by maximizing a log-likelihood function, with
penalty applied for samples outside of range of the distribution. The
returned answer is not guaranteed to be the globally optimal MLE, it
may only be locally optimal, or the optimization may fail altogether.
"""
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.get('loc', start[-2])
scale = kwds.get('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds)
optimizer = kwds.get('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
if not callable(optimizer) and isinstance(optimizer, string_types):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError:
raise ValueError("%s is not a valid optimizer" % optimizer)
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
return vals
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
@np.deprecate
def est_loc_scale(self, data, *args):
"""This function is deprecated, use self.fit_loc_scale(data) instead.
"""
return self.fit_loc_scale(data, *args)
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
olderr = np.seterr(over='ignore')
h = integrate.quad(integ, self.a, self.b)[0]
np.seterr(**olderr)
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(self.b):
upper = upp
else:
upper = self.b
if np.isinf(self.a):
lower = low
else:
lower = self.a
return integrate.quad(integ, lower, upper)[0]
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
"""
args, loc, scale = self._parse_args(*args, **kwds)
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, *args)
# np.vectorize doesn't work when numargs == 0 in numpy 1.5.1
if self.numargs == 0:
place(output, cond0, self._entropy() + log(scale))
else:
place(output, cond0, self.vecentropy(*goodargs) + log(scale))
return output
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ubound
E[x] = Integral(f(x) * dist.pdf(x))
lbound
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Argument (parameters) of the distribution.
lb, ub : scalar, optional
Lower and upper bound for integration. default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`integrate.quad`.
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + self.a * scale
if ub is None:
ub = loc + self.b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
np.seterr(**olderr)
return vals
## Handlers for generic case where xk and pk are given
## The _drv prefix probably means discrete random variable.
def _drv_pmf(self, xk, *args):
try:
return self.P[xk]
except KeyError:
return 0.0
def _drv_cdf(self, xk, *args):
indx = argmax((self.xk > xk), axis=-1)-1
return self.F[self.xk[indx]]
def _drv_ppf(self, q, *args):
indx = argmax((self.qvals >= q), axis=-1)
return self.Finv[self.qvals[indx]]
def _drv_nonzero(self, k, *args):
return 1
def _drv_moment(self, n, *args):
n = asarray(n)
return sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
def _drv_moment_gen(self, t, *args):
t = asarray(t)
return sum(exp(self.xk * t[np.newaxis, ...]) * self.pk, axis=0)
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
# many changes, originally not even a return
tot = 0.0
diff = 1e100
# pos = self.a
pos = max(0.0, 1.0*self.a)
count = 0
# handle cases with infinite support
ulimit = max(1000, (min(self.b, 1000) + max(self.a, -1000))/2.0)
llimit = min(-1000, (min(self.b, 1000) + max(self.a, -1000))/2.0)
while (pos <= self.b) and ((pos <= ulimit) or
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos, *args)
# use pmf because _pmf does not check support in randint and there
# might be problems ? with correct self.a, self.b at this stage
tot += diff
pos += self.inc
count += 1
if self.a < 0: # handle case when self.a = -inf
diff = 1e100
pos = -self.inc
while (pos >= self.a) and ((pos >= llimit) or
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos, *args)
# using pmf instead of _pmf, see above
tot += diff
pos -= self.inc
count += 1
return tot
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
b = self.b
a = self.a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= self.b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= self.a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
# testcase: return wrong number at lower index
# python -c "from scipy.stats import zipf;print zipf.ppf(0.01, 2)" wrong
# python -c "from scipy.stats import zipf;print zipf.ppf([0.01, 0.61, 0.77, 0.83], 2)"
# python -c "from scipy.stats import logser;print logser.ppf([0.1, 0.66, 0.86, 0.93], 0.6)"
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
def entropy(pk, qk=None, base=None):
"""Calculate the entropy of a distribution for given probability values.
If only probabilities `pk` are given, the entropy is calculated as
``S = -sum(pk * log(pk), axis=0)``.
If `qk` is not None, then compute the Kullback-Leibler divergence
``S = sum(pk * log(pk / qk), axis=0)``.
This routine will normalize `pk` and `qk` if they don't sum to 1.
Parameters
----------
pk : sequence
Defines the (discrete) distribution. ``pk[i]`` is the (possibly
unnormalized) probability of event ``i``.
qk : sequence, optional
Sequence against which the relative entropy is computed. Should be in
the same format as `pk`.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
Returns
-------
S : float
The calculated entropy.
"""
pk = asarray(pk)
pk = 1.0*pk / sum(pk, axis=0)
if qk is None:
vec = entr(pk)
else:
qk = asarray(qk)
if len(qk) != len(pk):
raise ValueError("qk and pk must have same length.")
qk = 1.0*qk / sum(qk, axis=0)
vec = kl_div(pk, qk)
S = sum(vec, axis=0)
if base is not None:
S /= log(base)
return S
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""
A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances from for discrete random variables. rv_discrete can be used
to construct an arbitrary distribution with defined by a list of support
points and the corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments
values : tuple of two array_like
(xk, pk) where xk are points (integers) with positive probability pk
with sum(pk) = 1
inc : integer
increment for the support of the distribution, default: 1
other values have not been tested
badvalue : object, optional
The value in (masked) arrays that indicates a value that should be
ignored.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the first two arguments for all
its methods.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
Methods
-------
``generic.rvs(<shape(s)>, loc=0, size=1)``
random variates
``generic.pmf(x, <shape(s)>, loc=0)``
probability mass function
``logpmf(x, <shape(s)>, loc=0)``
log of the probability density function
``generic.cdf(x, <shape(s)>, loc=0)``
cumulative density function
``generic.logcdf(x, <shape(s)>, loc=0)``
log of the cumulative density function
``generic.sf(x, <shape(s)>, loc=0)``
survival function (1-cdf --- sometimes more accurate)
``generic.logsf(x, <shape(s)>, loc=0, scale=1)``
log of the survival function
``generic.ppf(q, <shape(s)>, loc=0)``
percent point function (inverse of cdf --- percentiles)
``generic.isf(q, <shape(s)>, loc=0)``
inverse survival function (inverse of sf)
``generic.moment(n, <shape(s)>, loc=0)``
non-central n-th moment of the distribution. May not work for array
arguments.
``generic.stats(<shape(s)>, loc=0, moments='mv')``
mean('m', axis=0), variance('v'), skew('s'), and/or kurtosis('k')
``generic.entropy(<shape(s)>, loc=0)``
entropy of the RV
``generic.expect(func=None, args=(), loc=0, lb=None, ub=None, conditional=False)``
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
``generic.median(<shape(s)>, loc=0)``
Median of the distribution.
``generic.mean(<shape(s)>, loc=0)``
Mean of the distribution.
``generic.std(<shape(s)>, loc=0)``
Standard deviation of the distribution.
``generic.var(<shape(s)>, loc=0)``
Variance of the distribution.
``generic.interval(alpha, <shape(s)>, loc=0)``
Interval that with `alpha` percent probability contains a random
realization of this distribution.
``generic(<shape(s)>, loc=0)``
calling a distribution instance returns a frozen distribution
Notes
-----
You can construct an arbitrary discrete rv where ``P{X=xk} = pk``
by passing to the rv_discrete initialization method (through the
values=keyword) a tuple of sequences (xk, pk) which describes only those
values of X (xk) that occur with nonzero probability (pk).
To create a new discrete distribution, we would do the following::
class poisson_gen(rv_discrete):
# "Poisson distribution"
def _pmf(self, k, mu):
...
and create an instance::
poisson = poisson_gen(name="poisson",
longname='A Poisson')
The docstring can be created from a template.
Alternatively, the object may be called (as a function) to fix the shape
and location parameters returning a "frozen" discrete RV object::
myrv = generic(<shape(s)>, loc=0)
- frozen RV object with the same methods but holding the given
shape and location fixed.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, the `shapes` will be automatically deduced from the signatures of the
overridden methods.
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
Examples
--------
Custom made discrete distribution:
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None):
super(rv_discrete, self).__init__()
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.a = a
self.b = b
self.name = name
self.moment_tol = moment_tol
self.inc = inc
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.return_integers = 1
self.vecentropy = vectorize(self._entropy)
self.shapes = shapes
self.extradoc = extradoc
if values is not None:
self.xk, self.pk = values
self.return_integers = 0
indx = argsort(ravel(self.xk))
self.xk = take(ravel(self.xk), indx, 0)
self.pk = take(ravel(self.pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.P = dict(zip(self.xk, self.pk))
self.qvals = np.cumsum(self.pk, axis=0)
self.F = dict(zip(self.xk, self.qvals))
decreasing_keys = sorted(self.F.keys(), reverse=True)
self.Finv = dict((self.F[k], k) for k in decreasing_keys)
self._ppf = instancemethod(vectorize(_drv_ppf, otypes='d'),
self, rv_discrete)
self._pmf = instancemethod(vectorize(_drv_pmf, otypes='d'),
self, rv_discrete)
self._cdf = instancemethod(vectorize(_drv_cdf, otypes='d'),
self, rv_discrete)
self._nonzero = instancemethod(_drv_nonzero, self, rv_discrete)
self.generic_moment = instancemethod(_drv_moment,
self, rv_discrete)
self.moment_gen = instancemethod(_drv_moment_gen,
self, rv_discrete)
self._construct_argparser(meths_to_inspect=[_drv_pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
else:
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = instancemethod(_vec_generic_moment,
self, rv_discrete)
# backwards compat. was removed in 0.14.0, put back but
# deprecated in 0.14.1:
self.vec_generic_moment = np.deprecate(_vec_generic_moment,
"vec_generic_moment",
"generic_moment")
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2 # +1 is for self
self._ppfvec = instancemethod(_vppf,
self, rv_discrete)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc)
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
#discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the rv_discrete template."""
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s discrete random variable.' % longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict_discrete)
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdf_single(self, k, *args):
m = arange(int(self.a), k+1)
return sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (default=1). Note that `size`
has to be given as keyword, not as positional argument.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super(rv_discrete, self).rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""
Probability mass function at k of the given RV.
Parameters
----------
k : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""
Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""
Log of the cumulative distribution function at k of the given RV
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""
Survival function (1-cdf) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as ``1 - cdf``,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of cdf) at q of the given RV
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.a-1)
place(output, cond2, self.b)
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.b)
place(output, cond2, self.a-1)
# call place only if at least 1 valid argument
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return entropy(self.pk)
else:
mu = int(self.stats(*args, **{'moments': 'm'}))
val = self.pmf(mu, *args)
ent = entr(val)
k = 1
term = 1.0
while (abs(term) > _EPS):
val = self.pmf(mu+k, *args)
term = entr(val)
val = self.pmf(mu-k, *args)
term += entr(val)
k += 1
ent += term
return ent
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution
Parameters
----------
fn : function (default: identity mapping)
Function for which sum is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers, optional
lower and upper bound for integration, default is set to the
support of the distribution, lb and ub are inclusive (ul<=k<=ub)
conditional : bool, optional
Default is False.
If true then the expectation is corrected by the conditional
probability of the integration interval. The return value is the
expectation of the function, conditional on being in the given
interval (k such that ul<=k<=ub).
Returns
-------
expect : float
Expected value.
Notes
-----
* function is not vectorized
* accuracy: uses self.moment_tol as stopping criterium
for heavy tailed distribution e.g. zipf(4), accuracy for
mean, variance in example is only 1e-5,
increasing precision (moment_tol) makes zipf very slow
* suppnmin=100 internal parameter for minimum number of points to
evaluate could be added as keyword parameter, to evaluate functions
with non-monotonic shapes, points include integers in (-suppnmin,
suppnmin)
* uses maxcount=1000 limits the number of points that are evaluated
to break loop for infinite sums
(a maximum of suppnmin+1000 positive plus suppnmin+1000 negative
integers are evaluated)
"""
# moment_tol = 1e-12 # increase compared to self.moment_tol,
# too slow for only small gain in precision for zipf
# avoid endless loop with unbound integral, eg. var of zipf(2)
maxcount = 1000
suppnmin = 100 # minimum number of points to evaluate (+ and -)
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = (self.a)
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = (self.b)
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
if np.isposinf(ub)[()]:
# work around bug: stats.poisson.sf(stats.poisson.b, 2) is nan
invfac = 1 - self.cdf(lb-1, *args)
else:
invfac = 1 - self.cdf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
tot = 0.0
low, upp = self._ppf(0.001, *args), self._ppf(0.999, *args)
low = max(min(-suppnmin, low), lb)
upp = min(max(suppnmin, upp), ub)
supp = np.arange(low, upp+1, self.inc) # check limits
tot = np.sum(fun(supp))
diff = 1e100
pos = upp + self.inc
count = 0
# handle cases with infinite support
while (pos <= ub) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos += self.inc
count += 1
if self.a < 0: # handle case when self.a = -inf
diff = 1e100
pos = low - self.inc
while ((pos >= lb) and (diff > self.moment_tol) and
count <= maxcount):
diff = fun(pos)
tot += diff
pos -= self.inc
count += 1
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot/invfac
def get_distribution_names(namespace_pairs, rv_base_class):
"""
Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
|
chaluemwut/fbserver
|
venv/lib/python2.7/site-packages/scipy/stats/_distn_infrastructure.py
|
Python
|
apache-2.0
| 109,496
|
[
"Gaussian"
] |
67e77676c32e643dd761e0c2ce1b76006325a0f211d3a62c9ac856668130a5ef
|
#
# Copyright (C) 2002-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""Exposes a class for matching fragments of molecules.
The class exposes a simple API:
If you want a matcher that hits C=O, you'd do:
>>> p = FragmentMatcher()
>>> p.Init('C=O')
you can then match with:
>>> mol = Chem.MolFromSmiles('CC(=O)O')
>>> p.HasMatch(mol)
1
>>> p.HasMatch(Chem.MolFromSmiles('CC(C)C'))
0
information about the matches:
>>> len(p.GetMatches(Chem.MolFromSmiles('CC=O')))
1
>>> len(p.GetMatches(Chem.MolFromSmiles('O=CC=O')))
2
or, you can add exclusion fragments (defined as smarts) with:
>>> p.AddExclusion('c1ccccc1')
now the matcher will not hit anything that has a benzene ring.
>>> p.HasMatch(Chem.MolFromSmiles('CC=O'))
1
>>> p.HasMatch(Chem.MolFromSmiles('c1ccccc1CC=O'))
0
"""
from rdkit import Chem
class FragmentMatcher(object):
def __init__(self):
self._onPatt = None
self._offPatts = []
def AddExclusion(self, sma):
self._offPatts.append(Chem.MolFromSmarts(sma))
def Init(self, sma):
self._onPatt = Chem.MolFromSmarts(sma)
def GetSMARTS(self):
pass
def GetExclusionSMARTS(self):
pass
def HasMatch(self, mol):
if self._onPatt is None:
return 0
t = mol.HasSubstructMatch(self._onPatt)
if not t:
return 0
else:
for patt in self._offPatts:
if mol.HasSubstructMatch(patt):
return 0
return 1
def GetMatch(self, mol):
if self._onPatt is None:
return None
return mol.GetSubstructMatch(self._onPatt)
def GetMatches(self, mol, uniquify=1):
if self._onPatt is None:
return None
return mol.GetSubstructMatches(self._onPatt, uniquify=uniquify)
def GetBond(self, idx):
if self._onPatt is None:
return None
return self._onPatt.GetBondWithIdx(idx)
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest, sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed, tried = _test()
sys.exit(failed)
|
ptosco/rdkit
|
rdkit/Chem/FragmentMatcher.py
|
Python
|
bsd-3-clause
| 2,268
|
[
"RDKit"
] |
78de740eb1a7e0a7546cf8b9c8f7c534d641d22e0b4656e0cff2024c8a49bed7
|
# Copyright (C) 2012,2013,2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import espressopp
def getAllParticles(system, *properties):
"""
returns a list of all particle properties of all particles of the system (currently no atomistic AdResS particles are included)
"""
allParticles = []
maxParticleID = int(espressopp.analysis.MaxPID(system).compute())
pid = 0
while pid <= maxParticleID:
particle = system.storage.getParticle(pid)
part = []
if particle.pos:
for val in properties:
if val.lower() == "id" : part.append(particle.id)
elif val.lower() == "pos" : part.append(particle.pos)
elif val.lower() == "type" : part.append(particle.type)
elif val.lower() == "mass" : part.append(particle.mass)
elif val.lower() == "v" : part.append(particle.v)
elif val.lower() == "f" : part.append(particle.f)
elif val.lower() == "q" : part.append(particle.q)
elif val.lower() == "adrat" : part.append(particle.adrat)
else: raise "unknown particle property: %s"%val
allParticles.append(part)
pid += 1
else:
pid += 1
return allParticles
def getAllBonds(system):
"""
return all bonds of the system (currently only FixedPairLists are supported)
"""
bonds = []
nInteractions = system.getNumberOfInteractions()
for i in xrange(nInteractions):
if system.getInteraction(i).isBonded():
try:
FixedPairList = system.getInteraction(i).getFixedPairList().getBonds()
j = 0
while j < len(FixedPairList):
fplb = FixedPairList[j]
k = 0
while k < len(fplb):
bonds.append(fplb[k])
k += 1
j += 1
except:
pass
return bonds
|
kkreis/espressopp
|
src/tools/info.py
|
Python
|
gpl-3.0
| 2,683
|
[
"ESPResSo"
] |
4244ccd870f2baca12ffd9d5849f8be7c9857b75bb6018a1d7ca8b871a1c43f7
|
# (c) 2017, Brian Coca
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
cache: yaml
short_description: YAML formatted files.
description:
- This cache uses YAML formatted, per host, files saved to the filesystem.
version_added: "2.3"
author: Brian Coca (@bcoca)
options:
_uri:
required: True
description:
- Path in which the cache plugin will save the files
type: list
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
ini:
- key: fact_caching_connection
section: defaults
_prefix:
description: User defined prefix to use when creating the files
env:
- name: ANSIBLE_CACHE_PLUGIN_PREFIX
ini:
- key: fact_caching_prefix
section: defaults
_timeout:
default: 86400
description: Expiration timeout for the cache plugin data
env:
- name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
ini:
- key: fact_caching_timeout
section: defaults
type: integer
'''
import codecs
import yaml
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.cache import BaseFileCacheModule
class CacheModule(BaseFileCacheModule):
"""
A caching module backed by yaml files.
"""
def _load(self, filepath):
with codecs.open(filepath, 'r', encoding='utf-8') as f:
return AnsibleLoader(f).get_single_data()
def _dump(self, value, filepath):
with codecs.open(filepath, 'w', encoding='utf-8') as f:
yaml.dump(value, f, Dumper=AnsibleDumper, default_flow_style=False)
|
hryamzik/ansible
|
lib/ansible/plugins/cache/yaml.py
|
Python
|
gpl-3.0
| 1,924
|
[
"Brian"
] |
4866089e0a6cb1c1d751a1d4cd5458f581edea96cf9918f03d5dc77fb5bb35b9
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
try:
# Python 3
import http.client as httplib
except ImportError:
# Python 2
import httplib
try:
# Python 3
import xmlrpc.client as xmlrpclib
except ImportError:
# Python 2
import xmlrpclib
import pytest
from django.test import TestCase
from karaage.machines.models import Account
from karaage.people.models import Group, Person
class DjangoTestClientTransport(object):
client = None
def __init__(self, client):
self.client = client
def getparser(self):
return xmlrpclib.getparser()
def request(self, host, handler, request_body, verbose=False):
parser, unmarshaller = self.getparser()
response = self.client.post(handler, request_body, 'text/xml')
if response.status_code != 200:
raise xmlrpclib.ProtocolError(
'%s%s' % (host, handler),
response.status_code,
httplib.responses.get(response.status_code, ''),
dict(response.items()),
)
parser.feed(response.content)
return unmarshaller.close()
@pytest.mark.django_db
class XmlrpcTestCase(TestCase):
fixtures = [
'test_karaage.json',
]
def get_server_proxy(self):
return xmlrpclib.ServerProxy(
'http://testserver/xmlrpc/',
transport=DjangoTestClientTransport(self.client),
)
def setUp(self):
super(XmlrpcTestCase, self).setUp()
self.server = self.get_server_proxy()
def test_get_disk_quota(self):
server = self.server
result = server.get_disk_quota("kgtestuser1")
self.assertEqual(result, "Account not found")
result = server.get_disk_quota("kgtestuser3")
self.assertEqual(result, False)
result = server.get_disk_quota("kgtestuser3", "tango")
self.assertEqual(result, False)
account = Account.objects.get(username="kgtestuser3")
account.disk_quota = 1
account.save()
result = server.get_disk_quota("kgtestuser3")
self.assertEqual(result, 1048576)
result = server.get_disk_quota("kgtestuser3", "tango")
self.assertEqual(result, 1048576)
def test_get_projects(self):
server = self.server
with self.assertRaises(xmlrpclib.Fault) as cm:
server.get_projects("tango", "aqws12")
self.assertEqual(cm.exception.faultCode, 81)
self.assertEqual(
cm.exception.faultString, 'Username and/or password is incorrect')
result = server.get_projects("tango", "aq12ws")
self.assertEqual(result, ['TestProject1'])
result = server.get_projects("wexstan", "aq12ws")
self.assertEqual(result, ['TestProject1'])
result = server.get_projects("edda", "aq12ws")
self.assertEqual(result, ['TestProject1'])
def test_get_project(self):
server = self.server
# account does not exist
result = server.get_project("kgtestuser1", "TestProject1")
self.assertEqual(result, "Account 'kgtestuser1' not found")
# project does exist, and person belongs to it
result = server.get_project("kgtestuser3", "TestProject1")
self.assertEqual(result, "TestProject1")
result = server.get_project("kgtestuser3", "TestProject1", "tango")
self.assertEqual(result, "TestProject1")
result = server.get_project("kgtestuser3", "TestProject1", "wexstan")
self.assertEqual(result, "TestProject1")
result = server.get_project("kgtestuser3", "TestProject1", "edda")
self.assertEqual(result, "TestProject1")
# project does not exist - should fall back to default
result = server.get_project("kgtestuser3", "TestProjectx", "tango")
self.assertEqual(result, "TestProject1")
result = server.get_project("kgtestuser3", "TestProjectx", "wexstan")
self.assertEqual(result, "TestProject1")
result = server.get_project("kgtestuser3", "TestProjectx", "edda")
self.assertEqual(result, "TestProject1")
# project does exist, and person doesn't belong to it
# in this case default fall back fails too
person = Person.objects.get(username="kgtestuser3")
group = Group.objects.get(name="testproject1")
group.members.remove(person)
result = server.get_project("kgtestuser3", "TestProject1")
self.assertEqual(result, "None")
result = server.get_project("kgtestuser3", "TestProject1", "tango")
self.assertEqual(result, "None")
result = server.get_project("kgtestuser3", "TestProject1", "wexstan")
self.assertEqual(result, "None")
result = server.get_project("kgtestuser3", "TestProject1", "edda")
self.assertEqual(result, "None")
def test_get_project_members(self):
server = self.server
with self.assertRaises(xmlrpclib.Fault) as cm:
server.get_project_members("tango", "aqws12", "TestProject2")
self.assertEqual(cm.exception.faultCode, 81)
self.assertEqual(
cm.exception.faultString, 'Username and/or password is incorrect')
# Project has no ProjectQuota
result = server.get_project_members("tango", "aq12ws", "TestProject2")
self.assertEqual(result, "Project not found")
result = server.get_project_members(
"wexstan", "aq12ws", "TestProject2")
self.assertEqual(result, "Project not found")
result = server.get_project_members("edda", "aq12ws", "TestProject2")
self.assertEqual(result, "Project not found")
# Project has ProjectQuota for default machine category
result = server.get_project_members("tango", "aq12ws", "TestProject1")
self.assertEqual(result, ['kgtestuser3'])
result = server.get_project_members(
"wexstan", "aq12ws", "TestProject1")
self.assertEqual(result, ['kgtestuser3'])
result = server.get_project_members("edda", "aq12ws", "TestProject1")
self.assertEqual(result, ['kgtestuser3'])
def test_get_users_project(self):
server = self.server
with self.assertRaises(xmlrpclib.Fault) as cm:
server.get_users_projects("tango", "aq12ws")
self.assertEqual(cm.exception.faultCode, 81)
self.assertEqual(
cm.exception.faultString, 'Username and/or password is incorrect')
result = server.get_users_projects("kgtestuser1", "aq12ws")
self.assertEqual(result, [0, []])
result = server.get_users_projects("kgtestuser2", "aq12ws")
self.assertEqual(result, [0, []])
result = server.get_users_projects("kgtestuser3", "aq12ws")
self.assertEqual(result, [0, ['TestProject1']])
|
brianmay/karaage
|
karaage/tests/test_xmlrpc.py
|
Python
|
gpl-3.0
| 7,501
|
[
"Brian"
] |
8f79c26c10b6fa909cff2682ace2b9c14c9a8889d3c70771a9d0c3ee64f902d6
|
"""Integration with Galaxy nglims.
"""
import collections
import copy
import glob
import gzip
import operator
import os
import subprocess
import joblib
import yaml
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.galaxy.api import GalaxyApiAccess
from bcbio.illumina import flowcell
from bcbio.pipeline.run_info import clean_name
from bcbio.workflow import template
def prep_samples_and_config(run_folder, ldetails, fastq_dir, config):
"""Prepare sample fastq files and provide global sample configuration for the flowcell.
Handles merging of fastq files split by lane and also by the bcl2fastq
preparation process.
"""
fastq_final_dir = utils.safe_makedir(os.path.join(fastq_dir, "merged"))
cores = utils.get_in(config, ("algorithm", "num_cores"), 1)
ldetails = joblib.Parallel(cores)(joblib.delayed(_prep_sample_and_config)(x, fastq_dir, fastq_final_dir)
for x in _group_same_samples(ldetails))
config_file = _write_sample_config(run_folder, [x for x in ldetails if x])
return config_file, fastq_final_dir
def _prep_sample_and_config(ldetail_group, fastq_dir, fastq_final_dir):
"""Prepare output fastq file and configuration for a single sample.
Only passes non-empty files through for processing.
"""
files = []
print "->", ldetail_group[0]["name"], len(ldetail_group)
for read in ["R1", "R2"]:
fastq_inputs = sorted(list(set(reduce(operator.add,
(_get_fastq_files(x, read, fastq_dir) for x in ldetail_group)))))
if len(fastq_inputs) > 0:
files.append(_concat_bgzip_fastq(fastq_inputs, fastq_final_dir, read, ldetail_group[0]))
if len(files) > 0:
if _non_empty(files[0]):
out = ldetail_group[0]
out["files"] = files
return out
def _non_empty(f):
with gzip.open(f) as in_handle:
for line in in_handle:
return True
return False
def _write_sample_config(run_folder, ldetails):
"""Generate a bcbio-nextgen YAML configuration file for processing a sample.
"""
out_file = os.path.join(run_folder, "%s.yaml" % os.path.basename(run_folder))
with open(out_file, "w") as out_handle:
fc_name, fc_date = flowcell.parse_dirname(run_folder)
out = {"details": sorted([_prepare_sample(x, run_folder) for x in ldetails],
key=operator.itemgetter("name", "description")),
"fc_name": fc_name,
"fc_date": fc_date}
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
return out_file
def _prepare_sample(data, run_folder):
"""Extract passed keywords from input LIMS information.
"""
want = set(["description", "files", "genome_build", "name", "analysis", "upload", "algorithm"])
out = {}
for k, v in data.items():
if k in want:
out[k] = _relative_paths(v, run_folder)
if "algorithm" not in out:
analysis, algorithm = _select_default_algorithm(out.get("analysis"))
out["algorithm"] = algorithm
out["analysis"] = analysis
description = "%s-%s" % (out["name"], clean_name(out["description"]))
out["name"] = [out["name"], description]
out["description"] = description
return out
def _select_default_algorithm(analysis):
"""Provide default algorithm sections from templates or standard
"""
if not analysis or analysis == "Standard":
return "Standard", {"aligner": "bwa", "platform": "illumina", "quality_format": "Standard",
"recalibrate": False, "realign": False, "mark_duplicates": True,
"variantcaller": False}
elif "variant" in analysis:
try:
config, _ = template.name_to_config(analysis)
except ValueError:
config, _ = template.name_to_config("freebayes-variant")
return "variant", config["details"][0]["algorithm"]
else:
return analysis, {}
def _relative_paths(xs, base_path):
"""Adjust paths to be relative to the provided base path.
"""
if isinstance(xs, basestring):
if xs.startswith(base_path):
return xs.replace(base_path + "/", "", 1)
else:
return xs
elif isinstance(xs, (list, tuple)):
return [_relative_paths(x, base_path) for x in xs]
elif isinstance(xs, dict):
out = {}
for k, v in xs.items():
out[k] = _relative_paths(v, base_path)
return out
else:
return xs
def _get_fastq_files(ldetail, read, fastq_dir):
"""Retrieve fastq files corresponding to the sample and read number.
"""
return glob.glob(os.path.join(fastq_dir, "Project_%s" % ldetail["project_name"],
"Sample_%s" % ldetail["name"],
"%s_*_%s_*.fastq.gz" % (ldetail["name"], read)))
def _concat_bgzip_fastq(finputs, out_dir, read, ldetail):
"""Concatenate multiple input fastq files, preparing a bgzipped output file.
"""
out_file = os.path.join(out_dir, "%s_%s.fastq.gz" % (ldetail["name"], read))
if not utils.file_exists(out_file):
with file_transaction(out_file) as tx_out_file:
subprocess.check_call("zcat %s | bgzip -c > %s" % (" ".join(finputs), tx_out_file), shell=True)
return out_file
def _group_same_samples(ldetails):
"""Move samples into groups -- same groups have identical names.
"""
sample_groups = collections.defaultdict(list)
for ldetail in ldetails:
sample_groups[ldetail["name"]].append(ldetail)
return sorted(sample_groups.values(), key=lambda xs: xs[0]["name"])
def get_runinfo(galaxy_url, galaxy_apikey, run_folder, storedir):
"""Retrieve flattened run information for a processed directory from Galaxy nglims API.
"""
galaxy_api = GalaxyApiAccess(galaxy_url, galaxy_apikey)
fc_name, fc_date = flowcell.parse_dirname(run_folder)
galaxy_info = galaxy_api.run_details(fc_name, fc_date)
if "error" in galaxy_info:
return galaxy_info
if not galaxy_info["run_name"].startswith(fc_date) and not galaxy_info["run_name"].endswith(fc_name):
raise ValueError("Galaxy NGLIMS information %s does not match flowcell %s %s" %
(galaxy_info["run_name"], fc_date, fc_name))
ldetails = _flatten_lane_details(galaxy_info)
out = []
for item in ldetails:
# Do uploads for all non-controls
if item["description"] != "control" or item["project_name"] != "control":
item["upload"] = {"method": "galaxy", "run_id": galaxy_info["run_id"],
"fc_name": fc_name, "fc_date": fc_date,
"dir": storedir,
"galaxy_url": galaxy_url, "galaxy_api_key": galaxy_apikey}
for k in ["lab_association", "private_libs", "researcher", "researcher_id", "sample_id",
"galaxy_library", "galaxy_role"]:
item["upload"][k] = item.pop(k, "")
out.append(item)
return out
def _flatten_lane_details(runinfo):
"""Provide flattened lane information with multiplexed barcodes separated.
"""
out = []
for ldetail in runinfo["details"]:
# handle controls
if "project_name" not in ldetail and ldetail["description"] == "control":
ldetail["project_name"] = "control"
for i, barcode in enumerate(ldetail.get("multiplex", [{}])):
cur = copy.deepcopy(ldetail)
cur["name"] = "%s-%s" % (ldetail["name"], i + 1)
cur["description"] = barcode.get("name", ldetail["description"])
cur["bc_index"] = barcode.get("sequence", "")
cur["project_name"] = clean_name(ldetail["project_name"])
out.append(cur)
return out
|
Cyberbio-Lab/bcbio-nextgen
|
bcbio/galaxy/nglims.py
|
Python
|
mit
| 7,932
|
[
"BWA",
"Galaxy"
] |
3405687dd80d20a96e73a341433e871d9f623e8162aa06c7b70e5eecdc4e9bf1
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .solver import Solver
class SimpleFill(Solver):
def __init__(self, fill_method="mean", min_value=None, max_value=None):
"""
Possible values for fill_method:
"zero": fill missing entries with zeros
"mean": fill with column means
"median" : fill with column medians
"min": fill with min value per column
"random": fill with gaussian noise according to mean/std of column
"""
Solver.__init__(
self,
fill_method=fill_method,
min_value=None,
max_value=None)
def solve(self, X, missing_mask):
"""
Since X is given to us already filled, just return it.
"""
return X
|
iskandr/fancyimpute
|
fancyimpute/simple_fill.py
|
Python
|
apache-2.0
| 1,292
|
[
"Gaussian"
] |
5abc125902dd6e7a3bf122500ec60119c3ff04cb3d525c6b5358646b1973f62f
|
import os
from ase import *
from gpaw import GPAW
from gpaw.mpi import world
O = Atoms([Atom('O')])
O.center(vacuum=2.)
calc = GPAW(nbands=6,
h=.25,
convergence={'eigenstates':1.e-2, 'energy':.1, 'density':.1},
hund=True,
parallel={'domain': world.size})
O.set_calculator(calc)
O.get_potential_energy()
print "calc.wfs.gd.comm.size, world.size=", calc.wfs.gd.comm.size, world.size
assert(calc.wfs.gd.comm.size == world.size)
|
qsnake/gpaw
|
oldtest/parallel/domain_only.py
|
Python
|
gpl-3.0
| 477
|
[
"ASE",
"GPAW"
] |
95f70a61bfaa3fc57da173c13b9ccf8ac322afc271c686b82f6343213e44b946
|
# coding: utf-8
from __future__ import unicode_literals
from __future__ import division
"""
Evaluate the defect concentration based on composition, temperature,
and defect energies using "Dilute Solution Model"
Reference: Phys Rev B, 63, 094103, 2001,
"Density of constitutional and thermal point defects in L12 Al3Sc",
C. Woodward, M. Asta, G. Kresse and J. Hafner.
"""
__author__ = 'Bharat Medasani'
__version__ = "0.2"
__maintainer__ = "Bharat Medasani"
__email__ = "mbkumar@gmail.com"
__status__ = "Alpha"
__date__ = "6/4/14"
import math
import copy
import numpy as np
from six.moves import zip
from monty.dev import requires
from monty.fractions import gcd
try:
from sympy import Symbol, nsolve, Integer, Float, Matrix, exp, solve, Eq
sympy_found = True
except ImportError:
sympy_found = False
# physical consts
k_B=8.6173324e-5 # eV/K
# Check the inputs
def check_input(def_list):
flag = True
for defect in def_list:
if not defect:
flag = False
break
return flag
@requires(sympy_found,
"dilute_solution_model requires Sympy module. Please install it.")
def dilute_solution_model(structure, e0, vac_defs, antisite_defs, T,
trial_chem_pot = None, generate='plot'):
"""
Compute the defect densities using dilute solution model.
Args:
structure: pymatgen.core.structure.Structure object representing the
primitive or unitcell of the crystal.
e0: The total energy of the undefected system.
This is E0 from VASP calculation.
vac_defs: List of vacancy defect parameters in the dictionary format.
The keys of the dict associated with each vacancy defect are
1) site_index, 2) site_specie, 3) site_multiplicity, and
4) energy. 1-3 can be obtained from
pymatgen.analysis.defects.point_defects.Vacancy class.
Site index is expected to start with 1 (fortran index).
antisite_defs: List of antisite defect parameters in the dictionary
format. The keys of the dict associated with each antisite defect
are 1) site_index, 2) site_specie, 3) site_multiplicity,
4) substitution_specie, and 5) energy. 1-3 can be obtained
from pymatgen.analysis.defects.point_defects.Vacancy class.
T: Temperature in Kelvin
trial_chem_pot (optional): Trial chemical potentials to speedup
the plot generation. Format is {el1:mu1,...}
generate (string): Options are plot or energy
Chemical potentials are also returned with energy option.
If energy option is not chosen, plot is generated.
Returns:
If generate=plot, the plot data is generated and returned in
HighCharts format.
If generate=energy, defect formation enthalpies and chemical
potentials are returned.
"""
if not check_input(vac_defs):
raise ValueError('Vacancy energy is not defined')
if not check_input(antisite_defs):
raise ValueError('Antisite energy is not defined')
formation_energies = {}
formation_energies['vacancies'] = copy.deepcopy(vac_defs)
formation_energies['antisites'] = copy.deepcopy(antisite_defs)
for vac in formation_energies['vacancies']:
del vac['energy']
for asite in formation_energies['antisites']:
del asite['energy']
# Setup the system
site_species = [vac_def['site_specie'] for vac_def in vac_defs]
multiplicity = [vac_def['site_multiplicity'] for vac_def in vac_defs]
m = len(set(site_species)) # distinct species
n = len(vac_defs) # inequivalent sites
# Reduce the system and associated parameters such that only distinctive
# atoms are retained
comm_div = gcd(*tuple(multiplicity))
multiplicity = [val/comm_div for val in multiplicity]
e0 = e0/comm_div
T = Float(T)
#c0 = np.diag(multiplicity)
c0 = np.diag(np.ones(n))
mu = [Symbol('mu'+i.__str__()) for i in range(m)]
# Generate maps for hashing
# Generate specie->mu map and use it for site->mu map
specie_order = [] # Contains hash for site->mu map Eg: [Al, Ni]
site_specie_set = set() # Eg: {Ni, Al}
for i in range(n):
site_specie = site_species[i]
if site_specie not in site_specie_set:
site_specie_set.add(site_specie)
specie_order.append(site_specie)
site_mu_map = [] # Eg: [mu0,mu0,mu0,mu1] where mu0->Al, and mu1->Ni
for i in range(n):
site_specie = site_species[i]
j = specie_order.index(site_specie)
site_mu_map.append(j)
specie_site_index_map = [] # Eg: [(0,3),(3,4)] for Al & Ni
for i in range(m):
low_ind = site_species.index(specie_order[i])
if i < m-1:
hgh_ind = site_species.index(specie_order[i+1])
else:
hgh_ind = n
specie_site_index_map.append((low_ind,hgh_ind))
"""
dC: delta concentration matrix:
dC[i,j,k]: Concentration change of atom i, due to presence of atom
j on lattice site k
Special case is [i,i,i] which is considered as vacancy
Few cases: dC[i,i,i] = -1 due to being vacancy special case
dC[k,k,i] = +1 due to increment in k at i lattice if i
lattice type is of different element
dC[i,k,i] = -1 due to decrement of ith type atom due to
presence of kth type atom on ith sublattice and kth type
atom specie is different from ith sublattice atom specie
dC[i,k,k] = 0 due to no effect on ith type atom
dC[i,j,k] = 0 if i!=j!=k
"""
dC = np.zeros((n,n,n), dtype=np.int)
for i in range(n):
for j in range(n):
for k in range(n):
if i == j and site_species[j] != site_species[k] and \
site_species[i] != site_species[k]:
dC[i,j,k] = 1
for j in range(n):
for k in range(n):
if i == k:
dC[i,j,k] = -1
for k in range(n):
for j in range(n):
for i in range(n):
if i != j:
if site_species[j] == site_species[k]:
dC[i,j,k] = 0
for ind_map in specie_site_index_map:
if ind_map[1]-ind_map[0] > 1:
for index1 in range(ind_map[0]+1,ind_map[1]):
for index2 in range(ind_map[0]):
for i in range(n):
dC[i,index1,index2] = 0
for index2 in range(ind_map[1],n):
for i in range(n):
dC[i,index1,index2] = 0
# dE matrix: Flip energies (or raw defect energies)
els = [vac_def['site_specie'] for vac_def in vac_defs]
dE = []
for i in range(n):
dE.append([])
for i in range(n):
for j in range(n):
dE[i].append(0)
for j in range(n):
for i in range(n):
if i == j:
dE[i][j] = vac_defs[i]['energy']
else:
sub_specie = vac_defs[i]['site_specie']
site_specie = vac_defs[j]['site_specie']
if site_specie == sub_specie:
dE[i][j] = 0
else:
for as_def in antisite_defs:
if int(as_def['site_index']) == j+1 and \
sub_specie == as_def['substitution_specie']:
dE[i][j] = as_def['energy']
break
dE = np.array(dE)
# Initialization for concentrations
# c(i,p) == presence of ith type atom on pth type site
c = Matrix(n,n,[0]*n**2)
for i in range(n):
for p in range(n):
c[i,p] = Integer(c0[i,p])
site_flip_contribs = []
for epi in range(n):
sum_mu = sum([mu[site_mu_map[j]]*Integer(dC[j,epi,p]) \
for j in range(n)])
flip = Integer(dC[i,epi,p]) * \
exp(-(dE[epi,p]-sum_mu)/(k_B*T))
if flip not in site_flip_contribs:
site_flip_contribs.append(flip)
c[i,p] += flip
total_c = []
for ind in specie_site_index_map:
val = 0
for i in range(*ind):
sum_i = sum([c[i,j]*multiplicity[j] for j in range(n)])
val += sum_i
total_c.append(val)
c_ratio = [total_c[-1]/total_c[i] for i in range(m)]
# Expression for Omega, the Grand Potential
omega1 = e0 - sum([mu[site_mu_map[i]]*sum(c0[i,:])*multiplicity[i] \
for i in range(n)])
omega2 = []
fm_en_eff = []
used_dEs = []
for p_r in range(n):
for epi in range(n):
sum_mu = sum([mu[site_mu_map[j]]*Float(
dC[j,epi,p_r]) for j in range(n)])
if p_r != epi and site_mu_map[p_r] == site_mu_map[epi]:
continue
if dE[epi,p_r] not in used_dEs:
omega2.append(k_B*T*multiplicity[p_r] * \
exp(-(dE[epi,p_r]-sum_mu)/(k_B*T)))
fm_en_eff.append(dE[epi,p_r]-sum_mu)
used_dEs.append(dE[epi, p_r])
omega = omega1-sum(omega2)
# Compute composition range
li = specie_site_index_map[0][0]
hi = specie_site_index_map[0][1]
comp1_min = sum(multiplicity[li:hi])/sum(multiplicity)*100-1
comp1_max = sum(multiplicity[li:hi])/sum(multiplicity)*100+1
delta = float(comp1_max-comp1_min)/120.0
yvals = []
for comp1 in np.arange(comp1_min,comp1_max+delta,delta):
comp2 = 100-comp1
y = comp2/comp1
yvals.append(y)
def reduce_mu():
omega = [e0 - sum([mu[site_mu_map[i]]*sum(c0[i,:]) for i in range(n)])]
x = solve(omega)
return x
def compute_mus_by_search():
# Compute trial mu
mu_red = reduce_mu()
mult = multiplicity
specie_concen = [sum(mult[ind[0]:ind[1]]) for ind in specie_site_index_map]
y_vect = [specie_concen[-1]/specie_concen[i] for i in range(m)]
vector_func = [y_vect[i]-c_ratio[i] for i in range(m-1)]
vector_func.append(omega)
min_diff = 1e10
mu_vals = None
c_val = None
m1_min = -20.0
if e0 > 0:
m1_max = 10 # Search space needs to be modified
else:
m1_max = 0
for m1 in np.arange(m1_min,m1_max,0.01):
m0 = mu_red[mu[0]].subs(mu[-1],m1)
try:
x = nsolve(vector_func,mu,[m0,m1],module="numpy")
except:
continue
c_val = c.subs(dict(zip(mu,x)))
#if all(x >= 0 for x in c_val):
specie_concen = []
for ind in specie_site_index_map:
specie_concen.append(sum([sum(c_val[i,:]) for i in range(*ind)]))
y_comp = [specie_concen[-1]/specie_concen[i] for i in range(m)]
diff = math.sqrt(sum([pow(abs(y_comp[i]-y_vect[i]),2) for i in range(m)]))
if diff < min_diff:
min_diff = diff
mu_vals = x
if mu_vals:
mu_vals = [float(mu_val) for mu_val in mu_vals]
else:
raise ValueError()
return mu_vals
def compute_def_formation_energies():
i = 0
for vac_def in vac_defs:
site_specie = vac_def['site_specie']
ind = specie_order.index(site_specie)
uncor_energy = vac_def['energy']
formation_energy = uncor_energy + mu_vals[ind]
formation_energies['vacancies'][i]['formation_energy'] = formation_energy
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
if not specie_ind_del-1:
label = '$V_{'+site_specie+'}$'
else:
label = '$V_{'+site_specie+'_'+str(cur_ind)+'}$'
formation_energies['vacancies'][i]['label'] = label
i += 1
i = 0
for as_def in antisite_defs:
site_specie = as_def['site_specie']
sub_specie = as_def['substitution_specie']
ind1 = specie_order.index(site_specie)
ind2 = specie_order.index(sub_specie)
uncor_energy = as_def['energy']
formation_energy = uncor_energy + mu_vals[ind1] - mu_vals[ind2]
formation_energies['antisites'][i]['formation_energy'] = formation_energy
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
if not specie_ind_del-1:
label = '$'+sub_specie+'_{'+site_specie+'}$'
else:
label = '$'+sub_specie+'_{'+site_specie+'_'+str(cur_ind)+'}$'
formation_energies['antisites'][i]['label'] = label
i += 1
return formation_energies
# If generate option is energy compute effective formation energies
# at ideal stoichiometry and return the formation energies and chem pot.
if generate == 'energy':
if not trial_chem_pot:
mu_vals = compute_mus_by_search()
else:
try:
mu_vals = [trial_chem_pot[element] for element in specie_order]
except:
mu_vals = compute_mus()
formation_energies = compute_def_formation_energies()
mu_dict = dict(zip(specie_order,mu_vals))
return formation_energies, mu_dict
if not trial_chem_pot:
# Try computing mus by assuming one of the defects is dominant at 0.01
# concen. First vacancy is tried and then antisite
# Generate trial mus assuming vacancy as dominant defect
#for specie-0 at lower yval
li = specie_site_index_map[0][0]
hi = specie_site_index_map[0][1]
li1 = specie_site_index_map[1][0]
hi1 = specie_site_index_map[1][1]
spec_mult = [sum(multiplicity[li:hi]), sum(multiplicity[li1:hi1])]
ln_def_conc = 4.60517
for i in range(li,hi):
vac_flip_en = vac_defs[i]['energy']
mu_vals = [ln_def_conc*k_B*T -vac_flip_en]
mu_vals.append((e0 - spec_mult[0]*mu_vals[0]) / spec_mult[1])
comp_ratio = yvals[0]
# Test if the trial mus are good
vector_func = [comp_ratio - c_ratio[0]]
vector_func.append(omega)
try:
mu_vals = nsolve(vector_func,mu,mu_vals)
if mu_vals:
mu_vals = [float(mu_val) for mu_val in mu_vals]
break
except: # Go for antisite as dominant defect
mu_gs = [Symbol('mu_gs'+j.__str__()) for j in range(m)]
eqs = [mu_gs[0]-mu_gs[1] - (ln_def_conc*k_B*T-antisite_defs[i][
'energy'])]
eqs.append(spec_mult[0]*mu_gs[0] + spec_mult[1]*mu_gs[1] - e0)
x = solve(eqs, mu_gs)
#mu_names = sorted([key.name for key in x.keys()])
mu_vals = []
for key in sorted(x.keys(),key=lambda inp: inp.name):
mu_vals.append(x[key])
vector_func = [comp_ratio - c_ratio[0]]
vector_func.append(omega)
try:
mu_vals = nsolve(vector_func,mu,mu_vals)
if mu_vals:
mu_vals = [float(mu_val) for mu_val in mu_vals]
break
except: # Go to the default option (search the space)
pass
else:
mu_vals = compute_mus_by_search()
else:
try:
mu_vals = [trial_chem_pot[element] for element in specie_order]
except:
mu_vals = compute_mus_by_search()
# Compile mu's for all composition ratios in the range
#+/- 1% from the stoichiometry
result = {}
i = 0
len_y = len(yvals)
failed_y, failed_i = [], []
for y in yvals:
vector_func = [y-c_ratio[0]]
vector_func.append(omega)
try:
x = nsolve(vector_func,mu,mu_vals,module="numpy")
if x:
mu_vals = [float(mu_val) for mu_val in x]
except:
failed_y.append(y)
failed_i.append(i)
continue
result[y] = list(mu_vals)
x = None
i += 1
def get_next_mu_val(i):
if i >= len(yvals):
return None
y = yvals[i+1]
x = result.get(y,None)
if x:
mu_vals = [float(mu_val) for mu_val in x]
return mu_vals
else:
return get_next_mu_val(i+1)
def get_prev_mu_val(i):
if i <= 0:
return None
y = yvals[i-1]
x = result.get(y,None)
if x:
mu_vals = [float(mu_val) for mu_val in x]
return mu_vals
else:
return get_next_mu_val(i-1)
# Try to get better trial mus for failed cases
for j in range(len(failed_y)):
i = failed_i[j]
prev_mu_val = get_prev_mu_val(i)
if not prev_mu_val:
continue
next_mu_val = get_next_mu_val(i)
if not next_mu_val:
continue
y = failed_y[j]
vector_func = [y-c_ratio[0]]
vector_func.append(omega)
trial_mu = list(map(lambda x: float(sum(x))/len(x), \
zip(prev_mu_val,next_mu_val)))
try:
x = nsolve(vector_func,mu,trial_mu,module="numpy")
if x:
mu_vals = [float(mu_val) for mu_val in x]
except:
continue
result[y] = mu_vals
x = None
# Alternate way of calculating trial mus for failed cases
# by taking average of trial mus at extremes.
#for j in range(len(failed_y)):
# y = yvals[0]
# prev_mu_val = result[y]
# y = yvals[-1]
# next_mu_val = result[y]
#
# trial_mu = list(map(lambda x: float(sum(x))/len(x), \
# zip(prev_mu_val,next_mu_val)))
# y = failed_y[j]
# vector_func = [y-c_ratio[0]]
# vector_func.append(omega)
# try:
# x = nsolve(vector_func,mu,trial_mu,module="numpy")
# if x:
# mu_vals = [float(mu_val) for mu_val in x]
# except:
# continue
# result[y] = list(mu_vals)
if len(result.keys()) < len(yvals)/2:
raise ValueError('Not sufficient data')
res = []
new_mu_dict = {}
# Compute the concentrations for all the compositions
for key in sorted(result.keys()):
mu_val = result[key]
total_c_val = [total_c[i].subs(dict(zip(mu,mu_val))) \
for i in range(len(total_c))]
c_val = c.subs(dict(zip(mu,mu_val)))
res1 = []
# Concentration of first element/over total concen
res1.append(float(total_c_val[0]/sum(total_c_val)))
new_mu_dict[res1[0]] = mu_val
sum_c0 = sum([c0[i,i] for i in range(n)])
for i in range(n):
for j in range(n):
if i == j: # Vacancy
vac_conc = float(exp(-(mu_val[site_mu_map[i]]+dE[i,i])/(k_B*T)))
res1.append(vac_conc)
else: # Antisite
res1.append(float(c_val[i,j]/c0[j,j]))
res.append(res1)
res = np.array(res)
dtype = [(str('x'),np.float64)]+[(str('y%d%d' % (i, j)), np.float64) \
for i in range(n) for j in range(n)]
res1 = np.sort(res.view(dtype), order=[str('x')],axis=0)
conc_data = {}
"""Because all the plots have identical x-points storing it in a
single array"""
conc_data['x'] = [dat[0][0] for dat in res1] # x-axis data
# Element whose composition is varied. For x-label
conc_data['x_label'] = els[0]+ " mole fraction"
conc_data['y_label'] = "Point defect concentration"
conc = []
for i in range(n):
conc.append([])
for j in range(n):
conc[i].append([])
for i in range(n):
for j in range(n):
y1 = [dat[0][i*n+j+1] for dat in res1]
conc[i][j] = y1
y_data = []
for i in range(n):
data = conc[i][i]
specie = els[i]
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
vac_string = "$Vac_{"
if not specie_ind_del-1:
label = vac_string+specie+'}$'
else:
label = vac_string+specie+'_'+str(cur_ind)+'}$'
# Plot data and legend info
y_data.append({'data':data,'name':label})
for i in range(n):
site_specie = els[i]
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
for j in range(m): # Antisite plot dat
sub_specie = specie_order[j]
if sub_specie == site_specie:
continue
if not specie_ind_del-1:
label = '$'+sub_specie+'_{'+site_specie+'}$'
else:
label = '$'+sub_specie+'_{'+site_specie+'_'+str(cur_ind)+'}$'
inds = specie_site_index_map[j]
# TODO: Investigate the value below
data = np.sum([conc[ind][i] for ind in range(*inds)],axis=0)
data = data.tolist()
y_data.append({'data':data,'name':label})
conc_data['y'] = y_data
# Compute the formation energies
def compute_vac_formation_energies(mu_vals):
en = []
for vac_def in vac_defs:
site_specie = vac_def['site_specie']
ind = specie_order.index(site_specie)
uncor_energy = vac_def['energy']
formation_energy = uncor_energy + mu_vals[ind]
en.append(float(formation_energy))
return en
en_res = []
for key in sorted(new_mu_dict.keys()):
mu_val = new_mu_dict[key]
en_res.append(compute_vac_formation_energies(mu_val))
en_data = {'x_label':els[0]+' mole fraction', 'x':[]}
en_data['x'] = [dat[0][0] for dat in res1] # x-axis data
i = 0
y_data = []
for vac_def in vac_defs:
data = [data[i] for data in en_res]
site_specie = vac_def['site_specie']
ind = specie_order.index(site_specie)
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
vac_string = "$Vac_{"
if not specie_ind_del-1:
label = vac_string+site_specie+'}$'
else:
label = vac_string+site_specie+'_'+str(cur_ind)+'}$'
y_data.append({'data':data,'name':label})
i += 1
def compute_as_formation_energies(mu_vals):
en = []
for as_def in antisite_defs:
site_specie = as_def['site_specie']
sub_specie = as_def['substitution_specie']
ind1 = specie_order.index(site_specie)
ind2 = specie_order.index(sub_specie)
uncor_energy = as_def['energy']
form_en = uncor_energy + mu_vals[ind1] - mu_vals[ind2]
en.append(form_en)
return en
en_res = []
for key in sorted(new_mu_dict.keys()):
mu_val = new_mu_dict[key]
en_res.append(compute_as_formation_energies(mu_val))
i = 0
for as_def in antisite_defs:
data = [data[i] for data in en_res]
site_specie = as_def['site_specie']
sub_specie = as_def['substitution_specie']
ind1 = specie_order.index(site_specie)
ind2 = specie_order.index(sub_specie)
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
if not specie_ind_del-1:
label = '$'+sub_specie+'_{'+site_specie+'}$'
else:
label = '$'+sub_specie+'_{'+site_specie+'_'+str(cur_ind)+'}$'
y_data.append({'data':data,'name':label})
i += 1
en_data['y'] = y_data
# Return chem potential as well
mu_data = {'x_label':els[0]+' mole fraction', 'x':[]}
mu_data['x'] = [dat[0][0] for dat in res1] # x-axis data
y_data = []
for j in range(m):
specie = specie_order[j]
mus = [new_mu_dict[key][j] for key in sorted(new_mu_dict.keys())]
y_data.append({'data':mus, 'name':specie})
mu_data['y'] = y_data
return conc_data, en_data, mu_data
@requires(sympy_found,
"comute_defect_density requires Sympy module. Please install it.")
def compute_defect_density(structure, e0, vac_defs, antisite_defs, T=800,
trial_chem_pot=None, plot_style="highcharts"):
"""
Wrapper for the dilute_solution_model.
The computed plot data is prepared based on plot_style.
Args:
structure: pymatgen.core.structure.Structure object representing the
primitive or unitcell of the crystal.
e0: The total energy of the undefected system.
This is E0 from VASP calculation.
vac_defs: List of vacancy defect parameters in the dictionary format.
The keys of the dict associated with each vacancy defect are
1) site_index, 2) site_specie, 3) site_multiplicity, and
4) energy. 1-3 can be obtained from
pymatgen.analysis.defects.point_defects.Vacancy class.
Site index is expected to start with 1 (fortran index).
antisite_defs: List of antisite defect parameters in the dictionary
format. The keys of the dict associated with each antisite defect
are 1) site_index, 2) site_specie, 3) site_multiplicity,
4) substitution_specie, and 5) energy. 1-3 can be obtained
from pymatgen.analysis.defects.point_defects.Vacancy class.
T: Temperature in Kelvin
trial_chem_pot (optional): Trial chemical potentials to speedup
the plot generation. Format is {el1:mu1,...}
plot_style (string): Allowed options are
1) highcharts (default)
2) gnuplot
Returns:
The plot data is generated and returned in asked format.
"""
conc_data, en_data, mu_data = dilute_solution_model(
structure,e0,vac_defs,antisite_defs,T,
trial_chem_pot=trial_chem_pot)
if plot_style == 'highcharts':
"Energy data is ignored in this mode"
hgh_chrt_data = {}
hgh_chrt_data['xAxis'] = conc_data['x_label']
hgh_chrt_data['yAxis'] = conc_data['y_label']
series = []
x = conc_data['x']
for y_data in conc_data['y']:
y = y_data['data']
xy = zip(x,y)
xy = [list(el) for el in xy]
name = y_data['name'].strip('$')
flds= name.split('_')
def_string = flds[0]
site_string = flds[1].strip('{}')
name = def_string+"<sub>"+site_string+"</sub>"
#series.append({'data':xy, 'name':y_data['name']})
series.append({'data':xy, 'name':name})
hgh_chrt_data['series'] = series
return hgh_chrt_data
elif plot_style == 'gnuplot':
def data_to_rows(inp_data):
rows = []
labels = []
labels.append(inp_data['x_label'])
labels += [y['name'] for y in inp_data['y']]
#labels.sort()
rows.append('#'+'\t'.join(labels))
m = len(inp_data['x'])
for i in range(m):
data = []
data.append(inp_data['x'][i])
data += [y['data'][i] for y in inp_data['y']]
data = [float(x) for x in data]
rows.append('\t'.join(list(map(str,data))))
return rows
conc_rows = data_to_rows(conc_data)
en_rows = data_to_rows(en_data)
mu_rows = data_to_rows(mu_data)
return conc_rows, en_rows, mu_rows
#solute_site_preference_finder is based on dilute_solution_model and so most
#of the code is same. However differences exist in setting up and processing
#hence new function
@requires(sympy_found, "solute_site_preference_finder requires Sympy module. "\
"Please install it.")
def solute_site_preference_finder(
structure, e0, T, vac_defs, antisite_defs, solute_defs,
solute_concen=0.01, trial_chem_pot = None):
"""
Compute the solute defect densities using dilute solution model.
Args:
structure: pymatgen.core.structure.Structure object representing the
primitive or unitcell of the crystal.
e0: The total energy of the undefected system.
This is E0 from VASP calculation.
T: Temperature in Kelvin
vac_defs: List of vacancy defect parameters in the dictionary format.
The keys of the dict associated with each vacancy defect are
1) site_index, 2) site_specie, 3) site_multiplicity, and
4) energy. 1-3 can be obtained from
pymatgen.analysis.defects.point_defects.Vacancy class.
Site index is expected to start with 1 (fortran index).
antisite_defs: List of antisite defect parameters in the dictionary
format. The keys of the dict associated with each antisite
defect are 1) site_index, 2) site_specie, 3) site_multiplicity,
4) substitution_specie, and 5) energy. 1-3 can be obtained
from pymatgen.analysis.defects.point_defects.Vacancy class.
solute_defs: List of solute defect parameters in the dictionary
format. Similary to that of antisite defs, wtih solute specie
specified in substitution_specie
solute_concen: Solute concentration (in fractional value)
trial_chem_pot: Trial chemical potentials to speedup the plot
generation. Format is {el1:mu1,...}
Returns:
plot_data: The data for plotting the solute defect concentration.
"""
if not check_input(vac_defs):
raise ValueError('Vacancy energy is not defined')
if not check_input(antisite_defs):
raise ValueError('Antisite energy is not defined')
formation_energies = {}
formation_energies['vacancies'] = copy.deepcopy(vac_defs)
formation_energies['antisites'] = copy.deepcopy(antisite_defs)
formation_energies['solute'] = copy.deepcopy(solute_defs)
for vac in formation_energies['vacancies']:
del vac['energy']
for asite in formation_energies['antisites']:
del asite['energy']
for solute in formation_energies['solute']:
del solute['energy']
# Setup the system
site_species = [vac_def['site_specie'] for vac_def in vac_defs]
solute_specie = solute_defs[0]['substitution_specie']
site_species.append(solute_specie)
multiplicity = [vac_def['site_multiplicity'] for vac_def in vac_defs]
m = len(set(site_species)) # distinct species
n = len(vac_defs) # inequivalent sites
# Reduce the system and associated parameters such that only distinctive
# atoms are retained
comm_div = gcd(*tuple(multiplicity))
multiplicity = [val/comm_div for val in multiplicity]
multiplicity.append(0)
e0 = e0/comm_div
T = Float(T)
#c0 = np.diag(multiplicity)
c0 = np.diag(np.ones(n+1))
c0[n,n] = 0
mu = [Symbol('mu'+str(i)) for i in range(m)]
# Generate maps for hashing
# Generate specie->mu map and use it for site->mu map
specie_order = [] # Contains hash for site->mu map Eg: [Al, Ni]
site_specie_set = set() # Eg: {Ni, Al}
for i in range(len(site_species)):
site_specie = site_species[i]
if site_specie not in site_specie_set:
site_specie_set.add(site_specie)
specie_order.append(site_specie)
site_mu_map = [] # Eg: [mu0,mu0,mu0,mu1] where mu0->Al, and mu1->Ni
for i in range(len(site_species)):
site_specie = site_species[i]
j = specie_order.index(site_specie)
site_mu_map.append(j)
specie_site_index_map = [] # Eg: [(0,3),(3,4)] for Al & Ni
for i in range(m):
low_ind = site_species.index(specie_order[i])
if i < m-1:
hgh_ind = site_species.index(specie_order[i+1])
else:
hgh_ind = len(site_species)
specie_site_index_map.append((low_ind,hgh_ind))
"""
dC: delta concentration matrix:
dC[i,j,k]: Concentration change of atom i, due to presence of atom
j on lattice site k
Special case is [i,i,i] which is considered as vacancy
Few cases: dC[i,i,i] = -1 due to being vacancy special case
dC[k,k,i] = +1 due to increment in k at i lattice if i
lattice type is of different element
dC[i,k,i] = -1 due to decrement of ith type atom due to
presence of kth type atom on ith sublattice and kth type
atom specie is different from ith sublattice atom specie
dC[i,k,k] = 0 due to no effect on ith type atom
dC[i,j,k] = 0 if i!=j!=k
"""
dC = np.zeros((n+1,n+1,n), dtype=np.int)
for i in range(n):
for j in range(n):
for k in range(n):
if i == j and site_species[j] != site_species[k] and \
site_species[i] != site_species:
dC[i,j,k] = 1
for j in range(n+1):
for k in range(n):
if i == k:
dC[i,j,k] = -1
for k in range(n):
dC[n,n,k] = 1
for k in range(n):
for j in range(n):
if i != j:
if site_species[i] == site_species[k]:
dC[i,j,k] = 0
for ind_map in specie_site_index_map:
if ind_map[1]-ind_map[0] > 1:
for index1 in range(ind_map[0]+1,ind_map[1]):
for index2 in range(ind_map[0]):
for i in range(n):
dC[i,index1,index2] = 0
for index2 in range(ind_map[1],n):
for i in range(n):
dC[i,index1,index2] = 0
# dE matrix: Flip energies (or raw defect energies)
els = [vac_def['site_specie'] for vac_def in vac_defs]
dE = []
for i in range(n+1):
dE.append([])
for i in range(n+1):
for j in range(n):
dE[i].append(0)
for j in range(n):
for i in range(n):
if i == j:
dE[i][j] = vac_defs[i]['energy']
else:
sub_specie = vac_defs[i]['site_specie']
site_specie = vac_defs[j]['site_specie']
if site_specie == sub_specie:
dE[i][j] = 0
else:
for as_def in antisite_defs:
if int(as_def['site_index']) == j+1 and \
sub_specie == as_def['substitution_specie']:
dE[i][j] = as_def['energy']
break
# Solute
site_specie = vac_defs[j]['site_specie']
for solute_def in solute_defs:
def_site_ind = int(solute_def['site_index'])
def_site_specie = solute_def['site_specie']
if def_site_specie == site_specie and def_site_ind == j+1:
dE[n][j] = solute_def['energy']
break
dE = np.array(dE)
#np.where(dE == np.array(None), 0, dE)
# Initialization for concentrations
# c(i,p) == presence of ith type atom on pth type site
c = Matrix(n+1,n,[0]*n*(n+1))
for i in range(n+1):
for p in range(n):
c[i,p] = Integer(c0[i,p])
site_flip_contribs = []
for epi in range(n+1):
sum_mu = sum([mu[site_mu_map[j]]*Integer(
dC[j,epi,p]) for j in range(n+1)])
flip = dC[i,epi,p] * exp(-(dE[epi,p]-sum_mu)/(k_B*T))
if flip not in site_flip_contribs:
site_flip_contribs.append(flip)
c[i,p] += flip
host_c = Matrix(n,n,[0]*n*n)
for i in range(n):
for p in range(n):
host_c[i,p] = Integer(c0[i,p])
site_flip_contribs = []
for epi in range(n):
sum_mu = sum([mu[site_mu_map[j]]*Integer(
dC[j,epi,p]) for j in range(n)])
flip = dC[i,epi,p] * exp(-(dE[epi,p]-sum_mu)/(k_B*T))
if flip not in site_flip_contribs:
site_flip_contribs.append(flip)
host_c[i,p] += flip
#specie_concen = [sum(mult[ind[0]:ind[1]]) for ind in specie_site_index_map]
#total_c = [sum(c[ind[0]:ind[1]]) for ind in specie_site_index_map]
total_c = []
for ind in specie_site_index_map:
val = 0
for i in range(*ind):
sum_i = sum([c[i,j]*multiplicity[j] for j in range(n)])
val += sum_i
total_c.append(val)
c_ratio = [total_c[i]/sum(total_c) for i in range(m)]
host_total_c = []
for ind in specie_site_index_map[:-1]:
val = 0
for i in range(*ind):
sum_i = sum([host_c[i,j]*multiplicity[j] for j in range(n)])
val += sum_i
host_total_c.append(val)
host_c_ratio = [host_total_c[i]/sum(host_total_c) for i in range(m-1)]
# Expression for Omega, the Grand Potential
omega1 = e0 - sum([mu[site_mu_map[i]]*sum(c0[i,:])*multiplicity[i] \
for i in range(n)])
omega = omega1
used_dEs = []
for p_r in range(n):
for epi in range(n):
sum_mu1 = sum([mu[site_mu_map[j]]*Integer(
dC[j,epi,p_r]) for j in range(n)])
sum_mu = sum_mu1 - mu[site_mu_map[n]]* dC[n,epi,p_r]
if p_r != epi and site_mu_map[p_r] == site_mu_map[epi]:
continue
if dE[epi,p_r] not in used_dEs:
omega1 -= k_B*T*multiplicity[p_r] * \
exp(-(dE[epi,p_r]-sum_mu1)/(k_B*T))
omega -= k_B*T*multiplicity[p_r] * \
exp(-(dE[epi,p_r]-sum_mu)/(k_B*T))
used_dEs.append(dE[epi,p_r])
# Compute composition ranges
max_host_specie_concen = 1-solute_concen
mult = multiplicity
specie_concen = [
sum(mult[ind[0]:ind[1]]) for ind in specie_site_index_map]
host_specie_concen_ratio = [specie_concen[i]/sum(specie_concen)* \
max_host_specie_concen for i in range(m)]
host_specie_concen_ratio[-1] = solute_concen
li = specie_site_index_map[0][0]
hi = specie_site_index_map[0][1]
comp1_min = sum(multiplicity[li:hi])/sum(multiplicity)* \
max_host_specie_concen - 0.01
comp1_max = sum(multiplicity[li:hi])/sum(multiplicity)* \
max_host_specie_concen + 0.01
delta = (comp1_max - comp1_min)/50.0
#def reduce_mu():
# omega = [e0 - sum([mu[site_mu_map[i]]*sum(c0[i,:]) for i in range(n)])]
# x = solve(omega)
# return x
def reduce_mu():
host_concen = 1-solute_concen
new_c0 = c0.astype(float)
for i in range(n):
new_c0[i,i] = host_concen*c0[i,i]
new_c0[n,n] = 2*solute_concen
omega = [
e0-sum([mu[site_mu_map[i]]*sum(new_c0[i,:])
for i in range(n+1)])]
x = solve(omega)
return x
def compute_solute_mu_by_lin_search(host_mu_vals):
# Compute trial mu
mu_red = reduce_mu()
mult = multiplicity
specie_concen = [sum(mult[ind[0]:ind[1]]) for ind in specie_site_index_map]
max_host_specie_concen = 1-solute_concen
host_specie_concen_ratio = [specie_concen[i]/sum(specie_concen)* \
max_host_specie_concen for i in range(m)]
host_specie_concen_ratio[-1] = solute_concen
y_vect = host_specie_concen_ratio
vector_func = [y_vect[i]-c_ratio[i] for i in range(m)]
vector_func.append(omega)
min_diff = 1e10
mu_vals = None
c_val = None
m1_min = -20.0
if e0 > 0:
m1_max = 10 # Search space needs to be modified
else:
m1_max = 0
for m1 in np.arange(m1_min,m1_max,0.1):
trial_mus = host_mu_vals+[m1]
try:
x = nsolve(vector_func,mu,trial_mus,module="numpy")
if x:
mu_vals = [float(mu_val) for mu_val in x]
break
except:
continue
else:
raise ValueError()
return mu_vals
def compute_mus():
# Compute trial mu
mu_red = reduce_mu()
mult = multiplicity
specie_concen = [
sum(mult[ind[0]:ind[1]]) for ind in specie_site_index_map]
max_host_specie_concen = 1-solute_concen
host_specie_concen_ratio = [specie_concen[i]/sum(specie_concen)* \
max_host_specie_concen for i in range(m)]
host_specie_concen_ratio[-1] = solute_concen
y_vect = host_specie_concen_ratio
vector_func = [y_vect[i]-c_ratio[i] for i in range(m)]
vector_func.append(omega)
mu_vals = None
c_val = None
m_min = -15.0
if e0 > 0:
m_max = 10 # Search space needs to be modified
else:
m_max = 0
for m1 in np.arange(m_min,m_max,0.3):
for m2 in np.arange(m_min,m_max,0.3):
m0 = mu_red[mu[0]].subs([(mu[1],m1),(mu[2],m2)])
try:
mu_vals = nsolve(vector_func,mu,[m0,m1,m2],module="numpy")
# Line needs to be modified to include all mus when n > 2
except:
continue
break
if mu_vals:
mu_vals = [float(mu_val) for mu_val in mu_vals]
break
else:
raise ValueError("Couldn't find mus")
return mu_vals
if not trial_chem_pot:
# Try computing mus by assuming one of the defects is dominant at 0.01
# concen. First vacancy is tried and then antisite
# Generate trial mus assuming vacancy as dominant defect
#for specie-0 at lower yval
li = specie_site_index_map[0][0]
hi = specie_site_index_map[0][1]
li1 = specie_site_index_map[1][0]
hi1 = specie_site_index_map[1][1]
spec_mult = [sum(multiplicity[li:hi]), sum(multiplicity[li1:hi1])]
ln_def_conc = 4.60517
for i in range(li,hi):
vac_flip_en = vac_defs[i]['energy']
mu_vals = [ln_def_conc*k_B*T -vac_flip_en]
mu_vals.append((e0 - spec_mult[0]*mu_vals[0]) / spec_mult[1])
comp_ratio = comp1_min
# Test if the trial mus are good
vector_func = [comp_ratio - host_c_ratio[0]]
vector_func.append(omega1)
try:
host_mu_vals = nsolve(vector_func,mu[:-1],mu_vals)
if host_mu_vals:
host_mu_vals = [float(mu_val) for mu_val in host_mu_vals]
compute_solute_mu_by_lin_search(host_mu_vals)
break
except: # Go for antisite as dominant defect
mu_gs = [Symbol('mu_gs'+j.__str__()) for j in range(m-1)]
eqs = [mu_gs[0]-mu_gs[1] - (ln_def_conc*k_B*T-antisite_defs[i][
'energy'])]
eqs.append(spec_mult[0]*mu_gs[0] + spec_mult[1]*mu_gs[1] - e0)
x = solve(eqs, mu_gs)
host_mu_vals = []
for key in sorted(x.keys(),key=lambda inp: inp.name):
host_mu_vals.append(x[key])
vector_func = [comp_ratio - host_c_ratio[0]]
vector_func.append(omega1)
try:
host_mu_vals = nsolve(vector_func,mu[:-1],host_mu_vals)
if host_mu_vals:
host_mu_vals = [float(mu_val) for mu_val in host_mu_vals]
mu_vals = compute_solute_mu_by_lin_search(host_mu_vals)
break
except: # Go to the default option (search the space)
pass
else:
mu_vals = compute_mus()
else:
try:
mu_vals = [trial_chem_pot[element] for element in specie_order]
except:
mu_vals = compute_mus()
# Compile mu's for all composition ratios in the range
#+/- 1% from the stoichiometry
result = {}
for y in np.arange(comp1_min,comp1_max+delta,delta):
y_vect = []
y_vect.append(y)
y2 = max_host_specie_concen - y
y_vect.append(y2)
y_vect.append(solute_concen)
vector_func = [y_vect[i]-c_ratio[i] for i in range(1,m)]
vector_func.append(omega)
try:
x = nsolve(vector_func,mu,mu_vals)
if x:
mu_vals = [float(mu_val) for mu_val in x]
except:
continue
result[y] = mu_vals
res = []
# Compute the concentrations for all the compositions
for key in sorted(result.keys()):
mu_val = result[key]
total_c_val = [total_c[i].subs(dict(zip(mu,mu_val))) \
for i in range(len(total_c))]
c_val = c.subs(dict(zip(mu,mu_val)))
# Concentration of first element/over total concen
res1 = []
res1.append(float(total_c_val[0]/sum(total_c_val)))
sum_c0 = sum([c0[i,i] for i in range(n)])
for i in range(n+1):
for j in range(n):
if i == j: # Vacancy
vac_conc = float(exp(-(mu_val[site_mu_map[i]]+dE[i,i])/(k_B*T)))
res1.append(vac_conc)
else: # Antisite
res1.append(float(c_val[i,j]/c0[j,j]))
res.append(res1)
res = np.array(res)
dtype = [(str('x'),np.float64)]+[(str('y%d%d' % (i, j)), np.float64) \
for i in range(n+1) for j in range(n)]
res1 = np.sort(res.view(dtype),order=[str('x')],axis=0)
conc = []
for i in range(n+1):
conc.append([])
for j in range(n):
conc[i].append([])
for i in range(n+1): # Append vacancies
for j in range(n):
y1 = [dat[0][i*n+j+1] for dat in res1]
conc[i][j] = y1
# Compute solute site preference
# Removing the functionality
#site_pref_data = {}
"""Because all the plots have identical x-points storing it in a
single array"""
#site_pref_data['x'] = [dat[0][0] for dat in res1] # x-axis data
# Element whose composition is varied. For x-label
#site_pref_data['x_label'] = els[0]+ "_mole_fraction"
#site_pref_data['y_label'] = "$"+solute_specie+"_{"+els[0]+"}/("+\
# solute_specie+"_{"+els[0]+"}+"+solute_specie+"_{"+els[1]+"})$"
#y_data = []
#inds = specie_site_index_map[m-1]
#data1 = np.sum([multiplicity[0]*conc[ind][0] for ind in range(*inds)],axis=0)
#data2 = np.sum([multiplicity[1]*conc[ind][1] for ind in range(*inds)],axis=0)
#frac_data = data1/(data1+data2)
#frac_data = frac_data.tolist()
#y_data.append({'data':frac_data})
#site_pref_data['y'] = y_data
# Return all defect concentrations
conc_data = {}
"""Because all the plots have identical x-points storing it in a
single array"""
conc_data['x'] = [dat[0][0] for dat in res1] # x-axis data
# Element whose composition is varied. For x-label
conc_data['x_label'] = els[0]+ " mole fraction"
conc_data['y_label'] = "Point defect concentration"
y_data = []
# Vacancy
for i in range(n):
data = conc[i][i]
specie = els[i]
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
vac_string = "$Vac_{"
if not specie_ind_del-1:
label = vac_string+specie+'}$'
else:
label = vac_string+specie+'_'+str(cur_ind)+'}$'
# Plot data and legend info
y_data.append({'data':data,'name':label})
# Antisites and solute
for i in range(n):
site_specie = els[i]
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
for j in range(m):
sub_specie = specie_order[j]
if sub_specie == site_specie:
continue
if not specie_ind_del-1:
label = '$'+sub_specie+'_{'+site_specie+'}$'
else:
label = '$'+sub_specie+'_{'+site_specie+'_'+str(cur_ind)+'}$'
inds = specie_site_index_map[j]
# TODO: Investigate the value below
data = np.sum([conc[ind][i] for ind in range(*inds)],axis=0)
data = data.tolist()
y_data.append({'data':data,'name':label})
conc_data['y'] = y_data
#return site_pref_data, conc_data
return conc_data
@requires(sympy_found,
"solute_defect_density requires Sympy module. Please install it.")
def solute_defect_density(structure, e0, vac_defs, antisite_defs, solute_defs,
solute_concen=0.01, T=800, trial_chem_pot = None,
plot_style="highchargs"):
"""
Wrapper for the solute_site_preference_finder.
The computed plot data is prepared based on plot_style.
Args:
structure: pymatgen.core.structure.Structure object representing the
primitive or unitcell of the crystal.
e0: The total energy of the undefected system.
This is E0 from VASP calculation.
vac_defs: List of vacancy defect parameters in the dictionary format.
The keys of the dict associated with each vacancy defect are
1) site_index, 2) site_specie, 3) site_multiplicity, and
4) energy. 1-3 can be obtained from
pymatgen.analysis.defects.point_defects.Vacancy class.
Site index is expected to start with 1 (fortran index).
antisite_defs: List of antisite defect parameters in the dictionary
format. The keys of the dict associated with each antisite defect
are 1) site_index, 2) site_specie, 3) site_multiplicity,
4) substitution_specie, and 5) energy. 1-3 can be obtained
from pymatgen.analysis.defects.point_defects.Vacancy class.
solute_defs: List of solute defect parameters in the dictionary
format. Similary to that of antisite defs, wtih solute specie
specified in substitution_specie
solute_concen: Solute concentration (in fractional value)
T: Temperature in Kelvin
trial_chem_pot (optional): Trial chemical potentials to speedup
the plot generation. Format is {el1:mu1,...}
plot_style (string): Allowed options are
1) highcharts (default)
2) gnuplot
Returns:
The plot data is generated and returned in asked format.
"""
#solute_site_pref_data, def_conc_data = solute_site_preference_finder(
def_conc_data = solute_site_preference_finder(
structure, e0, T, vac_defs, antisite_defs, solute_defs,
solute_concen=solute_concen, trial_chem_pot=trial_chem_pot)
if plot_style == 'highcharts':
"Energy data is ignored in this mode"
hgh_chrt_data = {}
hgh_chrt_data['xAxis'] = def_conc_data['x_label']
hgh_chrt_data['yAxis'] = def_conc_data['y_label']
series = []
x = def_conc_data['x']
for y_data in def_conc_data['y']:
y = y_data['data']
xy = zip(x,y)
xy = [list(el) for el in xy]
name = y_data['name'].strip('$')
flds= name.split('_')
def_string = flds[0]
site_string = flds[1].strip('{}')
name = def_string+"<sub>"+site_string+"</sub>"
#series.append({'data':xy, 'name':y_data['name']})
series.append({'data':xy, 'name':name})
hgh_chrt_data['series'] = series
return hgh_chrt_data
elif plot_style == 'gnuplot':
def data_to_rows(inp_data, y_lbl_flg):
rows = []
labels = []
labels.append(inp_data['x_label'])
if y_lbl_flg:
labels.append(inp_data['y_label'])
else:
labels += [y['name'] for y in inp_data['y']]
rows.append('#'+'\t'.join(labels))
m = len(inp_data['x'])
for i in range(m):
data = []
data.append(inp_data['x'][i])
data += [y['data'][i] for y in inp_data['y']]
data = [float(x) for x in data]
rows.append('\t'.join(list(map(str,data))))
return rows
#solute_site_pref_rows = data_to_rows(solute_site_pref_data, True)
pt_def_conc_rows = data_to_rows(def_conc_data, False)
#return solute_site_pref_rows, pt_def_conc_rows
return pt_def_conc_rows
|
mbkumar/pydii
|
pydii/dilute_solution_model.py
|
Python
|
mit
| 53,799
|
[
"CRYSTAL",
"VASP",
"pymatgen"
] |
bd68cd12fa00b7dc820b70be6b93adf6d62bffa184ba770ffacf82112cbd08a9
|
"""Simple Python lib for the ISY home automation netapp
This is a Python interface to the ISY rest interface
providomg simple commands to query and control registared Nodes and Scenes
and well as a method of setting or querying vars
"""
__author__ = 'Peter Shipley <peter.shipley@gmail.com>'
__copyright__ = "Copyright (C) 2015 Peter Shipley"
__license__ = "BSD"
__version__ = "0.1.20160710"
#from xml.dom.minidom import parse, parseString
# from StringIO import StringIO
# import xml.etree.ElementTree as # ET
# import base64
import re
import os
import sys
#import string
import time
from warnings import warn
import logging
import xml.etree.ElementTree as ET
import json
#logging.basicConfig(level=logging.INFO)
import collections
#try:
# from suds.client import Client
# suds_import = 1
#except ImportError:
# suds_import = 0
from ISY.IsyUtilClass import IsyUtil, IsySubClass, et2d
# from ISY.IsyNodeClass import IsyNode, IsyScene, IsyNodeFolder, _IsyNodeBase
from ISY.IsyProgramClass import *
#from ISY.IsyVarClass import IsyVar
from ISY.IsyExceptionClass import *
from ISY.IsyEvent import ISYEvent
from ISY.IsyDebug import *
import pprint
if sys.hexversion < 0x3000000:
import urllib2 as URL
# HTTPPasswordMgrWithDefaultRealm = URL.HTTPPasswordMgrWithDefaultRealm
# Request, build_opener, request, HTTPBasicAuthHandler, HTTPPasswordMgrWithDefaultRealm, URLError, HTTPError
else:
import urllib as URL
from urllib.request import HTTPPasswordMgrWithDefaultRealm
# import netrc
# Debug Flags:
# 0x0001 = report loads
# 0x0002 = report urls call
# 0x0004 = report func call
# 0x0008 = Dump loaded data
#
# 0x0010 = report changes to nodes
# 0x0020 = report soap web
# 0x0040 = report events
# 0x0080 = print __del__()
#
# 0x0100 =
# 0x0200 = report responce data
# 0x0400 = report raw events
# 0x0800 =
#
# 0x1000 =
# 0x2000 =
# 0x4000 =
# 0x8000 =
#
#
# EventUpdate Mask:
# 0x00 = update all
# 0x01 = Ignore Node events
# 0x02 = Ignore Var events
# 0x04 = Ignore Program events
# 0x08 =
# 0x10 = Ignore Climate events
# 0x20 =
# 0x40 =
# 0x80 =
#
_pro_models = [1100, 1110, 1040, 1050]
__all__ = ['Isy', 'IsyGetArg']
# if hasattr(instance, 'tags') and isinstance(instance.tags, dict):
# for tag in instance.tags:
# def batch .write
# _nodedict dictionary of node data indexed by node ID
# node2addr dictionary mapping node names to node ID
# nodeCdict dictionary cache or node objects indexed by node ID
class Isy(IsyUtil):
""" Obj class the represents the ISY device
Keyword Args:
addr : IP address of ISY
userl/userp : User Login / Password
debug : Debug flags (default 0)
cachetime : cache experation time [NOT USED] (default 0)
faststart : ( ignored if eventupdate is used )
0=preload cache as startup
1=load cache on demand
eventupdates: run a sub-thread and stream events updates from ISY
same effect as calling Isy().start_event_thread()
"""
# import functions
from ISY._isyclimate import load_clim, clim_get_val, clim_query, clim_iter
from ISY._isyvar import load_vars, \
var_get_value, var_set_value, _var_set_value, \
var_addrs, var_ids, get_var, _var_get_id, \
var_get_type, var_iter, var_add, \
var_delete, _var_delete, \
var_rename, _var_rename, \
var_refresh_value
from ISY._isyprog import load_prog, get_prog, _prog_get_id, \
prog_iter, prog_get_src, prog_addrs, \
prog_comm, _prog_comm, \
prog_get_path, _prog_get_path, \
prog_rename, _prog_rename
from ISY._isynode import load_nodes, _gen_member_list, _gen_folder_list, \
_gen_nodegroups, _gen_nodedict, node_names, scene_names, \
node_addrs, scene_addrs, get_node, _node_get_id, node_get_prop, \
node_set_prop, _node_send, node_comm, _updatenode, \
load_node_types, node_get_type, node_iter, _updatenode, \
node_get_path, _node_get_path, _node_get_name, \
node_set_powerinfo, node_enable, \
node_del, _node_remove, \
node_restore, node_restore_all, \
node_get_notes
# node_rename,
from ISY._isynet_resources import _load_networking, load_net_resource, \
_net_resource_get_id, net_resource_run, \
net_resource_names, net_resource_iter, \
load_net_wol, net_wol, _net_wol_get_id, net_wol_names, net_wol_iter, \
net_wol_ids, net_resource_ids
# from ISY._isyzb import load_zb, zb_scannetwork, zb_ntable, zb_ping_node, \
# zbnode_addrs, zbnode_names, zbnode_iter
## set_var_value, _set_var_value, var_names
if sys.hexversion < 0x3000000:
_password_mgr = URL.HTTPPasswordMgrWithDefaultRealm()
_handler = URL.HTTPBasicAuthHandler(_password_mgr)
_opener = URL.build_opener(_handler)
#_opener = URL.build_opener(_handler, URL.HTTPHandler(debuglevel=1))
# URL.HTTPHandler(debuglevel=1)
else:
_password_mgr = URL.request.HTTPPasswordMgrWithDefaultRealm()
_handler = URL.request.HTTPBasicAuthHandler(_password_mgr)
_opener = URL.request.build_opener(_handler)
def __init__(self, **kwargs):
#
# Keyword args
#
self.userl = kwargs.get("userl", os.getenv('ISY_USER', "admin"))
self.userp = kwargs.get("userp", os.getenv('ISY_PASS', "admin"))
self.addr = kwargs.get("addr", os.getenv('ISY_ADDR', None))
# (self.userl, self.userp, self.addr) = authtuple
# print "AUTH: ", self.addr, self.userl, self.userp
self.debug = kwargs.get("debug", 0)
if "ISY_DEBUG" in os.environ:
self.debug = self.debug & int(os.environ["ISY_DEBUG"])
# self.cachetime = kwargs.get("cachetime", 0)
self.faststart = kwargs.get("faststart", 1)
self.eventupdates = kwargs.get("eventupdates", 0)
# and experiment alt to IsyGetArg
self.parsearg = kwargs.get("parsearg", False)
if self.parsearg:
self.parse_args()
self._isy_event = None
self.event_heartbeat = 0;
self.error_str = ""
self.callbacks = None
self._is_pro = True
# data dictionaries for ISY state
self._name2id = dict()
self.controls = None
self.name2control = None
self._nodefolder = None
self._folder2addr = None
self._progdict = None
self._nodedict = None
self._nodegroups = None
self._groups2addr = None
self._node2addr = None
self._nodeCategory = None
self._vardict = None
self._wolinfo = None
self._net_resource = None
self.climateinfo = None
self.isy_status = dict()
self.zigbee = dict()
if self.addr is None:
from ISY.IsyDiscover import isy_discover
units = isy_discover(count=1)
for device in units.values():
self.addr = device['URLBase'][7:]
self.baseurl = device['URLBase']
else:
self.baseurl = "http://" + self.addr
if self.addr is None:
warn("No ISY address : guessing \"isy\"")
self.addr = "isy"
# print "\n\taddr", "=>", self.addr, "\n\n"
# if ( not self.userl or not self.userp ):
# netrc_info = netrc.netrc()
# login, account, password = netrc_info.authenticators(self.addr)
# print "login", "=>", repr(login)
# print "account", "=>", repr(account)
# print "password", "=>", repr(password)
# self.userl = "admin"
# self.userp = "admin"
if self.debug & _debug_loads_:
print("class Isy __init__")
print("debug ", self.debug)
# print("cachetime ", self.cachetime)
print("faststart ", self.faststart)
print("address ", self.addr)
# parse ISY_AUTH as LOGIN:PASS
#
# general setup logic
#
Isy._handler.add_password(None, self.addr, self.userl, self.userp)
# self._opener = URL.build_opener(Isy._handler, URL.HTTPHandler(debuglevel=1))
# self._opener = URL.build_opener(Isy._handler)
if self.debug & 0x02:
print("baseurl: " + self.baseurl + " : " + self.userl + " : " + self.userp)
if self.faststart < 2:
try:
self.load_conf()
except URL.URLError as e:
print("Unexpected error:", sys.exc_info()[0])
print 'Problem connecting with ISY device :', self.addr
print e
raise IsyCommunicationError(e)
if not self.faststart:
self.load_nodes()
# There for id's to Node/Var/Prog objects
self.nodeCdict = dict()
self.varCdict = dict()
self.progCdict = dict()
self.folderCdict = dict()
if self.eventupdates:
if not self._progdict:
self.load_prog()
if not self._nodedict:
self.load_nodes()
self.start_event_thread()
# and experimental alternitive to IsyGetArg
def parse_args(self):
"""
Use argparse to extract common options
unused options placed in self.unknown_args
this is a alternitive to IsyGetArg
"""
import argparse
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("-d", "--debug", dest="debug",
default=self.debug,
type=int,
# action="count",
nargs='?',
help="debug options")
parser.add_argument("-a", "--address", dest="addr",
default=os.getenv('ISY_ADDR', None),
help="hostname or IP device")
parser.add_argument("-u", "--user", dest="user",
default=os.getenv('ISY_USER', None),
help="Admin Username")
parser.add_argument("-p", "--pass", dest="passw",
default=os.getenv('ISY_PASS', None),
help="Admin Password")
args, self.unknown_args = parser.parse_known_args()
if args.addr:
self.addr = args.addr
if args.user:
self.userl = args.user
if args.passw:
self.userp = args.passw
if args.debug:
self.debug = args.debug
self.parser = parser
#
# Event Subscription Code
# Allows for treaded realtime node status updating
#
def start_event_thread(self, mask=0):
""" starts event stream update thread
mask will eventually be used to "masking" events
"""
from threading import Thread
if (self.debug & 0x40):
print "start_event_thread"
# if thread already runing we should update mask
if hasattr(self, 'event_thread') and isinstance(self.event_thread, Thread):
if self.event_thread.is_alive():
print "Thread already running ?"
return
#st = time.time()
#print("start preload")
self._preload(rload=0)
#sp = time.time()
#print("start complete")
#print "load in ", (sp - st)
self._isy_event = ISYEvent(debug=self.debug)
self._isy_event.subscribe(addr=self.addr, userp=self.userp, userl=self.userl)
self._isy_event.set_process_func(self._read_event, self)
self.event_thread = Thread(target=self._isy_event.events_loop, name="event_looper")
self.event_thread.daemon = True
self.event_thread.start()
self.eventupdates = True
# print(self.event_thread)
def stop_event_tread(self):
""" Stop update thread """
if hasattr(self._isy_event, "_shut_down"):
self._isy_event._shut_down = 1
self.eventupdates = False
# @staticmethod
def _read_event(self, evnt_dat, *arg):
""" read event stream data and copy into internal state cache
internal function call
"""
# print("_read_event")
skip_default = [
# "_0", "_2", "_4", "_5", "_6", "_7", "_8",
# "_9", "_10", "_11", "_12", "_13", "_14",
# "_15", "_16", "_17", "_18", "_19", "_20",
"DON", "DOF",
]
skip = skip_default
assert isinstance(evnt_dat, dict), "_read_event Arg must me dict"
# event_targ holds the node address or var id
# for the current event ( if applicable )
event_targ = None
#if evnt_dat["control"] in skip:
# return
# print "evnt_dat ", evnt_dat
#
# Status/property changed
#
if evnt_dat["control"] in ["ST", "RR", "OL","DON"]:
if evnt_dat["node"] in self._nodedict:
# ADD LOCK ON NODE DATA
# print("===evnt_dat :", evnt_dat)
# print("===a :", ar)
#print(self._nodedict[evnt_dat["node"]])
target_node = self._nodedict[evnt_dat["node"]]
event_targ = evnt_dat["node"]
# create property if we do not have it yet
if not evnt_dat["control"] in target_node["property"]:
target_node["property"][evnt_dat["control"]] = dict()
target_node["property"][evnt_dat["control"]]["value"] \
= evnt_dat["action"]
target_node["property"][evnt_dat["control"]]["formatted"] \
= self._format_val(evnt_dat["action"])
if (self.debug & 0x10):
print("_read_event :", evnt_dat["node"], evnt_dat["control"], evnt_dat["action"])
print(">>>", self._nodedict[evnt_dat["node"]]["property"])
else:
warn("Event for Unknown node : {0}".format(evnt_dat["node"]), \
IsyRuntimeWarning)
elif evnt_dat["control"] == "_0" : # HeartBeat
#self.event_heartbeat = time.gmtime()
pass
#
# handle VAR value change
#
elif evnt_dat["control"] == "_1" : # Trigger Events
#
# action = "0" -> Event Status
# action = "1" -> Client Should Get Status
# action = "2" -> Key Changed
# action = "3" -> Info String
# action = "4" -> IR Learn Mode
# action = "5" -> Schedule (schedule status changed)
# action = "6" -> Variable Status (status of variable changed)
# action = "7" -> Variable Initialized (initial value of a variable )
#
if evnt_dat["action"] == "0" and 'nr' in evnt_dat['eventInfo']:
prog_id = '{0:0>4}'.format(evnt_dat['eventInfo']['id'])
event_targ = prog_id
if (self.debug & 0x40):
print "Prog Change/Updated :\t{0}".format(evnt_dat['eventInfo']['id'])
print "Prog Id :\t", prog_id
print "evnt_dat :\t", evnt_dat
if self._progdict is None:
self.load_prog(prog_id)
elif prog_id in self._progdict:
prog_dict = self._progdict[prog_id]
if 'on' in evnt_dat['eventInfo']:
prog_dict['enabled'] = 'true'
elif 'off' in evnt_dat['eventInfo']:
prog_dict['enabled'] = 'false'
else:
pass
if 'rr' in evnt_dat['eventInfo']:
prog_dict['runAtStartup'] = 'true'
elif 'nr' in evnt_dat['eventInfo']:
prog_dict['runAtStartup'] = 'false'
else:
pass
# not all prog change events have time Info
if 'r' in evnt_dat['eventInfo']:
prog_dict['lastRunTime'] = evnt_dat['eventInfo']['r']
if 'f' in evnt_dat['eventInfo']:
prog_dict['lastFinishTime'] = evnt_dat['eventInfo']['f']
if 'nsr' in evnt_dat['eventInfo']:
prog_dict['nextScheduledRunTime'] = evnt_dat['eventInfo']['nsr']
ev_status = int(evnt_dat['eventInfo']['s'])
if ev_status & 0x01:
prog_dict['running'] = 'idle'
elif ev_status & 0x02:
prog_dict['running'] = 'then'
elif ev_status & 0x03:
prog_dict['running'] = 'else'
if ev_status & 0x10:
prog_dict['status'] = 'unknown'
elif ev_status & 0x20:
prog_dict['status'] = 'true'
elif ev_status & 0x30:
prog_dict['status'] = 'false'
elif ev_status & 0xF0:
prog_dict['status'] = 'not_loaded'
else:
# TODO : Figure out why we are here...
pass
# '0002': { 'enabled': 'true',
# 'folder': 'false',
# 'id': '0002',
# 'lastFinishTime': '2013/03/30 15:11:25',
# 'lastRunTime': '2013/03/30 15:11:25',
# 'name': 'QueryAll',
# 'nextScheduledRunTime': '2013/03/31 03:00:00',
# 'parentId': '0001',
# 'runAtStartup': 'false',
# 'running': 'idle',
# 'status': 'false'},
if evnt_dat["action"] == "6" or evnt_dat["action"] == "7":
var_eventInfo = evnt_dat['eventInfo']['var']
vid = var_eventInfo['var-type'] + ":" + var_eventInfo['var-id']
# check if the event var exists in out world
if vid in self._vardict:
# ADD LOCK ON VAR DATA
# copy var properties from event
event_targ = vid
self._vardict[vid].update(var_eventInfo)
self._vardict[vid]["val"] = int(self._vardict[vid]["val"])
self._vardict[vid]["init"] = int(self._vardict[vid]["init"])
else:
warn("Event for Unknown Var : {0}".format(vid), IsyRuntimeWarning)
elif evnt_dat["control"] == "_2" : # Driver Specific Events
pass
elif evnt_dat["control"] == "_3" : # Node Change/Updated Event
if (self.debug & 0x40):
print("Node Change/Updated Event : {0}".format(evnt_dat["node"]))
print("evnt_dat : ", evnt_dat)
#
# action = "NN" -> Node Renamed
# action = "NR" -> Node Removed
# action = "ND" -> Node Added
# action = "NR" -> Node Revised
# action = "MV" -> Node Moved (into a scene)
# action = "CL" -> Link Changed (in a scene)
# action = "RG" -> Removed From Group (scene)
# action = "EN" -> Enabled
# action = "PC" -> Parent Changed
# action = "PI" -> Power Info Changed
# action = "DI" -> Device ID Changed
# action = "DP" -> Device Property Changed
# action = "GN" -> Group Renamed
# action = "GR" -> Group Removed
# action = "GD" -> Group Added
# action = "FN" -> Folder Renamed
# action = "FR" -> Folder Removed
# action = "FD" -> Folder Added
# action = "NE" -> Node Error (Comm. Errors)
# action = "CE" -> Clear Node Error (Comm. Errors Cleared)
# action = "SN" -> Discovering Nodes (Linking)
# action = "SC" -> Node Discovery Complete
# action = "WR" -> Network Renamed
# action = "WH" -> Pending Device Operation
# action = "WD" -> Programming Device
# action = "RV" -> Node Revised (UPB)
if evnt_dat['action'] == 'EN' : # Enable
if evnt_dat['node'] in self._nodedict:
self._nodedict[evnt_dat['node']]['enabled'] = evnt_dat['eventInfo']['enabled']
elif evnt_dat['action'] == 'GN' : # Group Renamed
if evnt_dat['node'] in self._nodegroups:
oldname = self._nodegroups[evnt_dat['node']]['name']
self._nodegroups[evnt_dat['node']]['name'] = evnt_dat['eventInfo']['newName']
self._groups2addr[evnt_dat['eventInfo']['newName']] = evnt_dat['node']
del self._groups2addr[oldname]
if evnt_dat['eventInfo']['newName'] in self._name2id:
# warn Dup ID
if self._name2id[evnt_dat['eventInfo']['newName']][0] == "group":
self._name2id[evnt_dat['eventInfo']['newName']] = ("group", evnt_dat['node'])
else:
self._name2id[evnt_dat['eventInfo']['newName']] = ("group", evnt_dat['node'])
# Delete old entery if it is 'ours'
if oldname in self._name2id and self._name2id[oldname][0] == "group":
del self._name2id[oldname]
elif evnt_dat['action'] == 'GR' : # Group Removed/Deleted
if (self.debug & 0x40):
print("evnt_dat :", evnt_dat)
pass
elif evnt_dat['action'] == 'GD' : # New Group Added
if (self.debug & 0x40):
print("evnt_dat :", evnt_dat)
pass
elif evnt_dat['action'] == 'ND':
node_id = evnt_dat["node"]
node_dat = evnt_dat['eventInfo']['node']
if node_id in self.nodedict:
self.nodedict[node_id].update(node_dat)
else:
self.nodedict[node_id] = node_dat
#
# At this time results are undefined for
# Node class objects that represent a deleted node
#
elif evnt_dat['action'] == 'NR':
node_id = evnt_dat["node"]
if node_id in self.nodedict:
node_name = self.nodedict[node_id]["name"]
if "property" in self.nodedict[node_id]:
self.nodedict[node_id]["property"].clear()
del self.nodedict[node_id]["property"]
if self._node2addr and node_name in self._node2addr:
self._node2addr[node_name]
if self._name2id and node_name in self._name2id:
self._name2id[node_name]
if node_id in self.nodeCdict:
self.nodeCdict[node_id]
elif evnt_dat['action'] == 'FD':
if 'folder' in evnt_dat['eventInfo'] and isinstance(evnt_dat['eventInfo']['folder'], dict):
self._nodefolder[evnt_dat['node']] = evnt_dat['eventInfo']['folder']
self._folder2addr[evnt_dat['eventInfo']['folder']['name']] = evnt_dat['node']
elif evnt_dat['action'] == 'FR':
if evnt_dat['node'] in self._nodefolder:
if evnt_dat['node'] in self.nodeCdict:
# this is tricky if the user has a IsyNodeFolder obj
# more has to be done to tell the Obj it's dead
del self.nodeCdict[evnt_dat['node']]
del self._nodefolder[evnt_dat['node']]
elif evnt_dat['action'] == 'FN':
if evnt_dat['node'] in self._nodefolder:
oldname = self._nodefolder[evnt_dat['node']]['name']
self._nodefolder[evnt_dat['node']]['name'] = evnt_dat['eventInfo']['newName']
self._folder2addr[evnt_dat['eventInfo']['newName']] = evnt_dat['node']
del self._folder2addr[oldname]
elif evnt_dat["control"] == "_4" : # System Configuration Updated
pass
#
# action = "0" -> Time Changed
# action = "1" -> Time Configuration Changed
# action = "2" -> NTP Settings Updated
# action = "3" -> Notifications Settings Updated
# action = "4" -> NTP Communications Error
# action = "5" -> Batch Mode Updated
# node = null
# <eventInfo>
# <status>"1"|"0"</status>
# </eventInfo>
# action = "6" Battery Mode Programming Updated
# node = null
# <eventInfo>
# <status>"1"|"0"</status>
# </eventInfo>
if evnt_dat['action'] == '5':
if 'status' in evnt_dat['eventInfo']:
if evnt_dat['eventInfo']['status'] == "1":
self.isy_status['batchmode'] = True
else:
self.isy_status['batchmode'] = False
# self.isy_status['batchmode'] = (evnt_dat['eventInfo']['status'] == "1")
elif evnt_dat['action'] == '6':
if 'status' in evnt_dat['eventInfo']:
if evnt_dat['eventInfo']['status'] == "1":
self.isy_status['battery_mode_prog_update'] = True
else:
self.isy_status['battery_mode_prog_update'] = False
#self.isy_status['battery_mode_prog_update'] = (evnt_dat['eventInfo']['status'] == "1")
# status_battery_mode_prog_update
elif evnt_dat["control"] == "_5" : # System Status Updated
pass
#
# node = null
# action = "0" -> Not Busy
# action = "1" -> Busy
# action = "2" -> Idle
# action = "3" -> Safe Mode
#
elif evnt_dat["control"] == "_6" : # Internet Access Status
pass
#
# action = "0" -> Disabled
# action = "1" -> Enabled
# node = null
# <eventInfo>external URL</eventInfo>
# action = "2" -> Failed
#
elif evnt_dat["control"] == "_7" : # Progress Report
pass
elif evnt_dat["control"] == "_8" : # Security System Event
pass
elif evnt_dat["control"] == "_9" : # System Alert Event
pass
elif evnt_dat["control"] == "_10" : # OpenADR and Flex Your Power Events
pass
elif evnt_dat["control"] == "_11" : # Climate Events
pass
elif evnt_dat["control"] == "_12" : # AMI/SEP Events
pass
# if evnt_dat['action'] == '1':
# if 'ZBNetwork' in evnt_dat['eventInfo']:
# self.zigbee['network'] = evnt_dat['eventInfo']['ZBNetwork']
# elif evnt_dat['action'] == '10':
# if 'MeterFormat' in evnt_dat['eventInfo']:
# self.zigbee['MeterFormat'] = evnt_dat['eventInfo']['MeterFormat']
#
elif evnt_dat["control"] == "_13" : # External Energy Monitoring Events
pass
elif evnt_dat["control"] == "_14" : # UPB Linker Events
pass
elif evnt_dat["control"] == "_15" : # UPB Device Adder State
pass
elif evnt_dat["control"] == "_16" : # UPB Device Status Events
pass
elif evnt_dat["control"] == "_17" : # Gas Meter Events
pass
elif evnt_dat["control"] == "_18" : # Zigbee Events
pass
elif evnt_dat["control"] == "_19" : # Elk Events
pass
# if evnt_dat["action"] == "6":
# if 'se" in evnt_dat['eventInfo']:
# if evnt_dat['eventInfo']['se']['se-type'] == '156':
# print "Elk Connection State : ", evnt_dat['eventInfo']['se']['se-val']
# elif evnt_dat['eventInfo']['se']['se-type'] == '157':
# print "Elk Enable State : ", evnt_dat['eventInfo']['se']['se-val']
elif evnt_dat["control"] == "_20" : # Device Linker Events
pass
else:
if (self.debug & 0x40):
print("evnt_dat :", evnt_dat)
print("Event fall though : '{0}'".format(evnt_dat["node"]))
if self.callbacks != None:
call_targ = None
if event_targ in self.callbacks:
call_targ = event_targ
elif evnt_dat["control"] in self.callbacks:
call_targ = evnt_dat["control"]
if call_targ != None:
cb = self.callbacks[call_targ]
if isinstance(cb[0], collections.Callable):
try:
cb[0](evnt_dat, *cb[1])
except Exception as e:
print "e=",e
print "sys.exc_info()=",sys.exc_info()
print("Callback Error:", sys.exc_info()[0])
else:
warn("callback for {!s} not callable, deleting callback".format(call_targ),
IsyRuntimeWarning)
del self.callbacks[call_targ]
return
def _format_val(self, vs):
try:
if isinstance(vs, dict):
if "#val" in vs:
v = int(vs["#val"])
else:
return None
else:
v = int(vs)
except ValueError:
return "0"
else:
if ( v == 0):
return "off"
elif v == 255:
return "on"
else:
return str ( (int(v)*100) // 255)
def addnode(self, id=None, nname=None, ntype=None, flag="0"):
"""
Adds a predefined node for a device with a given address
args:
id
nname
ntype
flag
"""
if nname is None:
nname = id
if id is None:
raise IsyValueError("invalid node id : " + type)
if type is None:
raise IsyValueError("invalid node type : " + type)
return self.soapcomm("AddNode", id=id, name=nname, type=ntype, flag=flag)
def getsystemdatetime(self):
"""
timestamp of when ISY was last started
"""
r = self.soapcomm("GetSystemDateTime")
return (r)
def startuptime(self):
"""
timestamp of when ISY was last started
"""
r = self.soapcomm("GetStartupTime")
return (r)
def webcam_get(self):
"""
get webcam list avalible in ISY's ajax web UI
returns dict
"""
#campath="/WEB/CONF/cams.jsn"
r = self.soapcomm("GetSysConf", name="/WEB/CONF/cams.jsn")
return json.loads(r)
def webcam_add(self, brand=None, num=None, ip=None, model='1', name=None, passwd='', port='80', user=''):
"""
Add webcam to UI
args:
brand brand of cam (one of : Foscam Smarthome Axis Panasonic MJPGstreamer)
ip IP of cam
port TCP port for cam (default = 80)
model
name
user
passwd
"""
if not ( brand is None) and (brand.lower() not in ["foscam", "smarthome", "axis", "panasonic", "mjpgstreamer"]):
raise IsyValueError("webcam_add : invalid value for arg 'brand' ")
else:
brand = brand.lower()
if ip is None:
raise IsyValueError("webcam_add : invalid ip")
if name is None:
name = brand
camlist = self.webcam_get()
if 'lastId' in camlist:
maxid = int( camlist['lastId']) + 2
else:
maxid = camlist.__len__() + 2
if num is None:
for i in range(1, maxid):
if str(i) not in camlist:
num = str(i)
break
else:
raise RuntimeError( "webcam_add : failed cam index")
elif isinstance(num, int):
num = str(num)
if self.debug & 0x100:
print "using num : ", num
newcam = {'brand': brand, 'ip': ip, 'model': model, 'name': name, 'pass': passwd, 'port': port, 'user': user}
camlist[num] = newcam
if self.debug & 0x100:
print "webcam_add : ",
pprint.pprint(camlist)
if num > camlist['lastId']:
if self.debug & 0x100:
print "new lastId = ", num, ":", camlist['lastId']
camlist['lastId'] = num
return self._webcam_set(camlist)
def webcam_del(self, camid=None):
"""
delete an entery from UI's webcam list
arg:
camid index for camera in camlist
"""
if camid is None:
raise IsyValueError("webcam_del : arg camid is None")
camlist = self.webcam_get()
if self.debug & 0x100:
pprint.pprint(camlist)
if isinstance(camid, int):
camid = str(camid)
if camid not in camlist:
raise IsyValueError("webcam_del : invalid camid")
del camlist[camid]
if 'lastId' in camlist:
maxid = int( camlist['lastId']) + 2
else:
maxid = camlist.__len__() + 2
lastid = -1
for i in range(1, maxid):
if str(i) in camlist and lastid < i:
lastid = i
camlist['lastId'] = str(lastid)
return self._webcam_set(camlist)
def _webcam_set(self, camdict=None):
if camdict is None:
raise IsyValueError("_webcam_set : arg camdict invalid")
camjson = json.dumps(camdict, sort_keys=True)
r = self._sendfile(data=camjson, filename="/WEB/CONF/cams.jsn", load="n")
return r
def set_debug_level(self, level=1):
"""
Sets the debug options and current level
args:
option value 0 -> 3
"""
ret = self.soapcomm("SetDebugLevel", option=level)
return ret
def get_debug_level(self, level=1):
"""
Gets the debug options and current level
"""
ret = self.soapcomm("GetDebugLevel",)
return ret
def node_discover_start(self, nodetype=None):
soapargs = dict()
if nodetype is not None:
soapargs['type'] = nodetype
ret = self.soapcomm("StartNodesDiscovery", **soapargs)
return ret
def node_discover_stop(self, flag="1"):
"""
Puts ISY out of discovery (linking) mode
The flag decides the operations (reset, crawl, spider)
to be performed after device(s) are discovered
args:
NodeOperationsFlag enum value '1', '2', '3' or '4'
Valid values
1 = add the node and reset all previous setting if any
2 = unused
3 = add the node, find all the associated nodes, and create all the linkages thereto
4 = add the node, find all the associated nodes, but do not create any linkages
"""
flag = str(flag)
if flag not in ['1', '2', '3', '4']:
raise IsyValueError("invalid flag value : " + flag)
# if code == 501 then device was alread not in link/Discovery mode
ret = self.soapcomm("CancelNodesDiscovery", flag=flag)
return ret
# def node_get_props(self, naddr):
# """"
# Soap call GetNodeProps
# """
# (nodetype, node_id) = self._node_get_id(naddr)
#
# if self.debug & 0x04:
# print("node_get_props", naddr)
#
# if not node_id:
# raise LookupError(
# "node_del: {0} not a node ( {1}={2} )".format(
# naddr, node_id, nodetype))
#
# try:
# r = self.soapcomm("GetNodeProps", node=node_id)
# except IsySoapError, se:
#
# # if error code is 404 then Node did not exist or was already deleted
# # this is messy and needs to change or be removed
# code = se.code()
# if code == 404:
# return None
# raise
# else:
# return et2d( ET.fromstring(r))
#
# need to add code to update name2id and *2addr lookup arrays
#
def rename(self, objid, nname):
""" rename
args:
id = Node/Scene/Folder name or ID
name = new name
calls SOAP RenameNode() / RenameGroup() / RenameFolder()
"""
(idtype, nid) = self._node_get_id(objid)
if nid is None:
raise IsyValueError("unknown node/obj : " + objid)
if idtype == "node":
return self.soapcomm("RenameNode", id=nid, name=nname)
elif idtype == "group":
return self.soapcomm("RenameGroup", id=fid, name=nname)
elif idtype == "folder":
return self.soapcomm("RenameFolder", id=fid, name=nname)
elif idtype == "var":
# return self.var_rename(var=nid, name=nname)
raise IsyValueError("can not rename var, use var_rename() ")
elif idtype == "prog":
raise IsyValueError("can not rename prog use prog_rename() ")
else:
raise IsyValueError("node/obj " + objid + " not node (" + idtype + ")" )
#
# need to add code to update name2id and *2addr lookup arrays
#
def node_rename(self, nodeid, nname):
""" rename Node
args:
id = Node ID
name = new Node name
calls SOAP RenameNode()
"""
(idtype, nid) = self._node_get_id(nodeid)
if nid is None:
raise IsyValueError("unknown node/obj : " + nodeid)
print "nodeid ", nodeid
print "nid ", nid
return self.soapcomm("RenameNode", id=nid, name=nname)
# def node_new(self, sid, nname):
# """ create new Folder """
# return self.soapcomm("AddNode", id=1234, name=nname, type="T", flag="Y")
## scene
#
# need to add code to update name2id and *2addr lookup arrays
#
def scene_rename(self, sid, fname):
""" rename Scene/Group
args:
sid = a Scene/Group id
name = new name
calls SOAP RenameGroup()
"""
(idtype, grid) = self._node_get_id(sid)
return self.soapcomm("RenameGroup", id=grid, name=fname)
#
# need to add code to update name2id and *2addr lookup arrays
#
def scene_del(self, sid=None):
""" delete Scene/Group
args:
id : Scene address, name or Folder Obj
calls SOAP RemoveGroup()
"""
(idtype, sceneid) = self._node_get_id(sid)
if sceneid is None:
raise IsyValueError("no such Scene : " + str(sid))
#
# add code to update self._nodegroups
#
return self.soapcomm("RemoveGroup", id=sceneid)
#
# need to add code to update name2id and *2addr lookup arrays
#
def scene_new(self, nid=0, sname=None):
""" new Scene/Group
args:
id = a unique (unused) Group ID
name = name for new Scene/Group
***No error is given if Scene/Group ID is already in use***
calls SOAP AddGroup()
"""
if not isinstance(sname, str) or not len(sname):
raise IsyValueError("scene name must be non zero length string")
if nid == 0:
iid = 30001
nid = str(iid)
while nid in self._nodefolder or nid in self._nodegroups:
iid += 1
nid=str(iid)
if sname is None:
sname = nid
self.soapcomm("AddGroup", id=nid, name=sname)
#
# add code to update self._nodegroups
#
return nid
def scene_add_node(self, groupid, nid, nflag=0x10):
""" add node to Scene/Group
args:
group = a unique (unused) scene_id ID
node = id, name or Node Obj
flag = set to 0x10 if node is a controler for Scene/Group
set to 0x20 if node is responder for Scene/Group
Add new Node to Scene/Group
calls SOAP MoveNode()
"""
(idtype, nodeid) = self._node_get_id(nid)
if nodeid is None:
raise IsyValueError("no such Node : " + str(nid))
r = self.soapcomm("MoveNode", group=groupid, node=nodeid, flag=nflag)
return r
def scene_del_node(self, groupid, nid):
""" Remove Node from Scene/Group
args:
group = address, name or Scene Obj
id = address, name or Node Obj
calls SOAP RemoveFromGroup()
"""
(idtype, nodeid) = self._node_get_id(nid)
if nodeid is None:
raise IsyValueError("no such Node : " + str(nid))
r = self.soapcomm("RemoveFromGroup", group=groupid, id=nodeid)
return r
## folder
#
# need to add code to update name2id and *2addr lookup arrays
#
def folder_rename(self, fid, fname):
""" rename Folder
args:
id = folder ID
name = new folder name
calls SOAP RenameFolder()
"""
(idtype, fid) = self._node_get_id(fid)
r = self.soapcomm("RenameFolder", id=fid, name=fname)
return r
def folder_new(self, fid, fname):
""" create new Folder
args:
folder_id = a unique (unused) folder ID
folder name = name for new folder
returns error if folder ID is already in use
calls SOAP AddFolder()
"""
if fid == 0:
iid = 50001
fid = str(iid)
while fid in self._nodefolder or fid in self._nodegroups:
iid += 1
fid = str(iid)
r = self.soapcomm("AddFolder", fid=1234, name=fname)
if isinstance(r, tuple) and r[0] == '200':
self._nodefolder[fid] = dict()
self._nodefolder[fid]['address'] = fid
self._nodefolder[fid]['folder-flag'] = '0'
self._nodefolder[fid]['name'] = 'fname'
return r
def folder_del(self,fid):
""" delete folder
args:
fid : folder address, name or Folder Obj
calls SOAP RemoveFolder()
"""
(idtype, fid) = self._node_get_id(fid)
if fid is None:
raise IsyValueError("Unknown Folder : " + str(fid))
r = self.soapcomm("RemoveFolder", id=fid)
if isinstance(r, tuple) and r[0] == '200':
self._nodefolder[fid] = dict()
# SetParent(node, nodeType, parent, parentType)
def folder_add_node(self, nid, nodeType=1, parent="", parentType=3):
""" move node/scene from folder
Named args:
node
nodeType
parent
parentType
sets Parent for node/scene
calls SOAP SetParent()
"""
(idtype, nodeid) = self._node_get_id(nid)
if nodeid is None:
raise IsyValueError("no such Node/Scene : " + str(nid))
if parent != "":
(idtype, fldid) = self._node_get_id(parent)
if fldid is None:
raise IsyValueError("no such Folder : " + str(parent))
parentid = fldid
else:
parentid = parent
r = self.soapcomm("SetParent", node=nodeid, nodeType=nodeType, parent=parentid, parentType=parentType)
return r
def folder_del_node(self, nid, nodeType=1):
""" remove node from folder
args:
node
nodeType
remove node/scene from folder ( moves to default/main folder)
calls SOAP SetParent()
"""
return self.folder_add_node(nid, nodeType=nodeType, \
parent="", parentType=3)
def set_user_credentials(self, name=None, password=None):
"""
Changes the userid and password for a user ( admin )
args:
name user name
password user password
"""
if name is None:
raise IsyValueError("set_user_credentials : name argument required ")
if password is None:
raise IsyValueError("set_user_credentials : pass argument required ")
return self.soapcomm("SetUserCredentials", name=name, password=password)
def reboot(self):
""" Reboot ISY Device
args: none
calls SOAP Reboot()
"""
return self.soapcomm("Reboot")
#
# User web commands
#
def user_fsstat(self):
""" ISY Filesystem Status
calls SOAP GetFSStat()
"""
r = self.soapcomm("GetFSStat")
return et2d( ET.fromstring(r))
def user_dir(self, name="", pattern=""):
""" Get User Folder/Directory Listing
Named args:
name
pattern
call SOAP GetUserDirectory()
"""
r = self.soapcomm("GetUserDirectory", name=name, pattern=pattern)
# print "GetUserDirectory : ", r
return et2d( ET.fromstring(r))
def user_mkdir(self, name=None):
""" Make new User Folder/Directory
Named args:
name
call SOAP MakeUserDirectory()
"""
if name is None:
raise IsyValueError("user_mkdir : invalid dir name")
if name[0] != "/":
name = "/USER/WEB/" + name
r = self.soapcomm("MakeUserDirectory", name=name)
return et2d( ET.fromstring(r))
def user_rmdir(self, name=None):
""" Remove User Folder/Directory
Named args:
name
call SOAP RemoveUserDirectory()
"""
if name is None:
raise IsyValueError("user_rmdir : invalid dir name")
name = name.rstrip('/')
if name[0] != "/":
name = "/USER/WEB/" + name
r = self.soapcomm("RemoveUserDirectory", name=name)
return et2d( ET.fromstring(r))
def user_mv(self, name=None, newName=None):
""" Move/Rename User Object (File or Directory)
Named args:
oldn
newn
call SOAP MoveUserObject()
"""
if name is None or newName is None:
raise IsyValueError("user_mv : invalid name")
if name[0] != "/":
name = "/USER/WEB/" + name
if newName[0] != "/":
newName = "/USER/WEB/" + newName
r = self.soapcomm("MoveUserObject", name=name, newName=newName)
return r
def user_rm(self, name=None):
""" Remove User File
Named args:
name
call SOAP RemoveUserFile()
"""
if name is None:
raise IsyValueError("user_mkdir : invalid name")
if name[0] != "/":
name = "/USER/WEB/" + name
r = self.soapcomm("RemoveUserFile", name=name)
return(r)
def user_getfile(self, name=None):
""" Get User File
Named args:
name
call SOAP GetUserFile()
"""
if not len(name):
raise IsyValueError("user_getfile : invalid name")
if name[0] != "/":
name = "/USER/WEB/" + name
r = self.soapcomm("GetUserFile", name=name)
return r
def user_uploadfile(self, srcfile="", name=None, data=""):
""" upload User File
Named args:
name : name of file after upload
data : date to upload
srcfile : file containing data to upload
srcfile is use only if data is not set
if both data & srcfile are not set then
the file "name" is used
calls /file/upload/...
"""
if name is None:
raise IsyValueError("user_uploadfile : invalid name")
r = self.sendfile(src=srcfile, filename=name, data=data)
return r
def queryall(self, node=None, flag=None):
"""
Queries a node, a scene, or even the whole network
Named args:
node : name of node or scene to query (optional)
flag : enum { '1', '4', '8' }
"""
soapargs = dict()
if node is not None:
soapargs['node'] = ntype
if flag is not None:
soapargs['flag'] = flag
r = self.soapcomm("QueryAll", **soapargs)
#
# Util Funtions
#
def _preload(self, rload=0):
""" Internal function
preload all data tables from ISY device into cache
normally this is done "on demand" as needed
"""
if rload or not self.controls:
self.load_conf()
if rload or not self._nodedict:
self.load_nodes()
# self._gen_member_list()
# if rload or not self.climateinfo:
# self.load_clim()
if rload or not self._vardict:
self.load_vars()
if rload or not self._progdict:
self.load_prog()
# if rload or not self._wolinfo:
#self.load_wol()
if rload or not self._nodeCategory:
self.load_node_types()
def _savedict(self):
""" internal debug command """
self._preload()
# self._writedict(self._wolinfo, "wolinfo.txt")
self._writedict(self._nodedict, "nodedict.txt")
self._writedict(self._nodegroups, "nodegroups.txt")
self._writedict(self._nodefolder, "folderlist.txt")
self._writedict(self._vardict, "vardict.txt")
# self._writedict(self.climateinfo, "climateinfo.txt")
self._writedict(self.controls, "controls.txt")
self._writedict(self._progdict, "progdict.txt")
self._writedict(self._nodeCategory, "nodeCategory.txt")
##
## Load System config / info and command information
##
def load_conf(self):
""" Load configuration of the system with permissible commands
args : none
internal function call
"""
if self.debug & 0x01:
print("load_conf")
configinfo = self._getXMLetree("/rest/config")
# Isy._printXML(configinfo)
# IsyCommunicationError
if configinfo is None:
raise IsyCommunicationError("Load Configuration Fail : " \
+ self.error_str)
self.name2control = dict()
self.controls = dict()
for ctl in configinfo.iter('control'):
# self._printXML(ctl)
# self._printinfo(ctl, "configinfo : ")
cprop = dict()
for child in list(ctl):
# print("child.tag " + str(child.tag) + "\t=" + str(child.text))
if child.tag == "actions":
adict = dict()
for act in child.iter('action'):
n = act.find('label').text
v = act.find('name').text
adict[n] = v
cprop[child.tag] = adict
else:
# self._printinfo(child, "child")
cprop[child.tag] = child.text
for n, v in child.items():
cprop[n] = v
# print("cprop ", cprop)
if "name" in cprop:
self.controls[cprop["name"].upper()] = cprop
if "label" in cprop:
self.name2control[cprop["label"].upper()] \
= cprop["name"].upper()
self.config = dict()
for v in ( "platform", "app_version", "driver_timestamp",
"app", " build_timestamp"):
n = configinfo.find(v)
if n is not None:
if isinstance(n.text, str):
self.config[v] = n.text
n = configinfo.find("root/id")
if n is not None:
if isinstance(n.text, str):
self.config['id'] = n.text
xelm = configinfo.find("product/id")
if xelm is not None:
if hasattr(xelm, 'text'):
self.config["product_id"] = xelm.text
# print("self.controls : ", self.controls)
#self._printdict(self.controls)
#print("self.name2control : ", self.name2control)
def _get_control_id(self, comm):
""" command name to command ID """
if not self.controls:
self.load_conf()
c = comm.strip().upper()
if c in self.controls:
return c
if c in self.name2control:
return self.name2control[c]
return None
##
## property
##
def _get_platform(self):
""" name of ISY platform (readonly) """
return self.config["platform"]
platform = property(_get_platform)
def _get_id(self):
""" id of ISY (readonly) """
return self.config["id"]
id = property(_get_id)
def _get_app_version(self):
""" name of ISY app_version (readonly) """
return self.config["app_version"]
app_version = property(_get_app_version)
# def _get_debug(self):
# """ debug flag for Obj """
# return self._debug
# def _set_debug(self, val):
# self._debug = val
# debug = property(_get_debug,_set_debug)
##
## Logs
##
def load_log_type(self):
""" load log type tables
args: None
**not implemented **
"""
if self.debug & 0x01:
print("load_log_type")
pass
def load_log_id(self):
""" load log id tables
**not implemented **
"""
if self.debug & 0x01:
print("load_log_id")
pass
def log_reset(self, errorlog = 0):
""" clear log lines in ISY
args:
errorlog = flag clear error
"""
self.log_query(errorlog, 1)
def log_iter(self, error = 0):
""" iterate though log lines
args:
error : return error logs or now
returns:
Return an iterator log enteries
"""
for l in self.log_query(error):
yield l
def log_query(self, errorlog = 0, resetlog = 0):
""" get log from ISY """
xurl = self.baseurl + "/rest/log"
if errorlog:
xurl += "/error"
if resetlog:
xurl += "?reset=true"
if self.debug & 0x02:
print("xurl = " + xurl)
req = URL.Request(xurl)
try:
res = self._opener.open(req)
except URL.URLError as e:
# Error log can return a 404 is there are not logs ( yet )
return [ ]
else:
data = res.read()
res.close()
return data.splitlines()
def log_format_line(self, line):
""" format a ISY log line into a more human readable form
** not implemented **
"""
pass
##
## X10 Code
##
_x10re = re.compile('([a-pA-P]\d{,2)')
_x10comm = { 'alllightsoff' : 1,
'status off' : 2,
'on' : 3,
'Preset dim' : 4,
'alllightson' : 5,
'hail ack' : 6,
'bright' : 7,
'status on' : 8,
'extended code' : 9,
'status request' : 10,
'off' : 11,
'preset dim' : 12,
'alloff' : 13,
'Hail Req' : 14,
'dim' : 15,
'extended data' : 16 }
def _get_x10_comm_id(self, comm):
""" X10 command name to id """
comm = str(comm).strip().lower()
if comm.isdigit():
if int(comm) >= 1 and int(comm) <= 16:
return comm
else:
raise IsyValueError("bad x10 command digit : " + comm)
if comm in self._x10comm:
return self._x10comm[comm]
else:
raise IsyValueError("unknown x10 command : " + comm)
def x10_comm(self, unit, cmd):
""" direct send x10 command """
xcmd = self._get_x10_comm_id(str(cmd))
unit = unit.strip().upper()
if not re.match("[A-P]\d{,2}", unit):
raise IsyValueError("bad x10 unit name : " + unit)
# print("X10 sent : " + str(unit) + " : " + str(xcmd))
xurl = "/rest/X10/" + str(unit) + "/" + str(xcmd)
if self.debug & 0x02 : print("xurl = " + xurl)
resp = self._getXMLetree(xurl)
#self._printXML(resp)
#self._printinfo(resp)
if resp.attrib["succeeded"] != 'true':
raise IsyResponseError("X10 command error : unit=" + str(unit) + " cmd=" + str(cmd))
# /rest/time
# Returns system time
#
#/rest/network
# Returns network configuration
# /rest/sys
# returns system configuration
#
# /rest/subscriptions
# Returns the state of subscriptions
def subscriptions(self):
""" get event subscriptions list and states
args: none
Returns the state of subscriptions
calls : /rest/subscriptions
"""
xurl = "/rest/subscriptions"
if self.debug & 0x02 : print("xurl = " + xurl)
resp = self._getXMLetree(xurl)
#self._printXML(resp)
return et2d(resp)
def network(self):
""" network configuration
args: none
Returns network configuration
calls /rest/network
"""
xurl = "/rest/network"
if self.debug & 0x02 : print("xurl = " + xurl)
resp = self._getXMLetree(xurl)
#self._printXML(resp)
return et2d(resp)
def sys(self):
""" system configuration
args: none
calls : /rest/sys
"""
xurl = "/rest/sys"
if self.debug & 0x02 : print("xurl = " + xurl)
resp = self._getXMLetree(xurl)
#self._printXML(resp)
return et2d(resp)
def time(self):
""" system time of ISY
args: none
calls : /rest/time
"""
xurl = "/rest/time"
resp = self._getXMLetree(xurl)
#self._printXML(resp)
return et2d(resp)
def batch(self, on=-1):
""" Batch mode
args values:
1 = Turn Batch mode on
0 = Turn Batch mode off
-1 or None = Return Batch mode status
calls /rest/batteryPoweredWrites/
"""
xurl = "/rest/batteryPoweredWrites/"
if on == 0:
xurl += "/off"
elif on == 1:
xurl += "/on"
if self.debug & 0x02 : print("xurl = " + xurl)
resp = self._getXMLetree(xurl)
if resp is None:
print 'The server couldn\'t fulfill the request.'
raise IsyResponseError("Batch")
else:
#self._printXML(resp)
return resp
#/rest/batterypoweredwrites
def batterypoweredwrites(self, on=-1):
""" Battery Powered Writes
args values:
1 = Turn Batch mode on
0 = Turn Batch mode off
-1 or None = Return Batch mode status
returns status of Battery Powered device operations
calls /rest/batteryPoweredWrites/
"""
xurl = "rest/batteryPoweredWrites/"
if on == 0:
xurl += "/off"
elif on == 1:
xurl += "/on"
if self.debug & 0x02 : print("xurl = " + xurl)
resp = self._getXMLetree(xurl)
if resp != None:
#self._printXML(resp)
return et2d(resp)
def electricity(self):
"""
electricity status
args: none
Returns electricity module info, "Energy Monitor",
"Open ADR" and "Flex Your Power" status
Only applicable to 994 Z Series.
calls: /rest/electricity
"""
xurl = "/rest/electricity"
if self.debug & 0x02:
print("xurl = " + xurl)
resp = self._getXMLetree(xurl)
if resp != None:
#self._printXML(resp)
return et2d(resp)
##
## Callback functions
##
def callback_set(self, nid, func, *args):
"""set a callback function for a Node
args:
node id
referance to a function
* arg list
Sets up a callback function that will be called whenever there
is a change event for the specified node
Only one callback per node is supported,
If a callback funtion is already registared for
node or var id it will be replaced
requires IsyClass option "eventupdates" to to set
"""
if not isinstance(func, collections.Callable):
raise IsyValueError("callback_set : Invalid Arg, function not callable")
# func.__repr__()
if self.callbacks is None:
self.callbacks = dict()
(idtype, nodeid) = self._node_get_id(nid)
if nodeid is None:
# raise LookupError("no such Node : " + str(nodeid) )
self.callbacks[nid] = (func, args)
else:
self.callbacks[nodeid] = (func, args)
def callback_get(self, nid):
"""get a callback funtion for a Nodes
args:
node id
returns referance to registared callback function for a node
no none exist then value "None" is returned
"""
if self.callbacks != None:
(idtype, nodeid) = self._node_get_id(nid)
if nodeid != None and nodeid in self.callbacks:
return self.callbacks[nodeid]
return None
def callback_del(self, nid):
"""delete a callback funtion
args:
node id
delete a callback funtion for a Node, if exists.
no error is raised if callback does not exist
"""
if self.callbacks != None:
(idtype, nodeid) = self._node_get_id(nid)
if nodeid != None and nodeid in self.callbacks:
del self.callbacks[nodeid]
##
## support functions
##
def _printinfolist(self, uobj, ulabel="_printinfo"):
print("\n\n" + ulabel + " : ")
for attr in dir(uobj):
print(" obj.%s = %s" % (attr, getattr(uobj, attr)))
print("\n\n")
##
## the following are obj independent get methods
##
#
# Untested
#
def gettype(self, nobj):
if isinstance(nobj, IsySubClass):
return nobj.objtype()
(idtype, nid) = self._node_get_id(nobj)
return(idtype)
#
# Untested
#
def getid(self, objaddr):
(idtype, nid) = self._node_get_id(objaddr)
return(nid)
#
# Untested
#
def getobj(self, objaddr):
""" access node obj line a dictionary entery """
(idtype, nid) = self._node_get_id(objid)
if nid is None:
raise IsyValueError("unknown node/obj : " + objid)
if nid in self.nodeCdict:
return self.nodeCdict[nid]
if idtype in ['node', 'group', 'folder']:
return self.get_node(nid)
elif idtype == "var":
return self.get_var(nid)
elif idtype == "prog":
return self.get_prog(nid)
else:
raise IsyValueError("don't know how to get obj for type : " + idtype)
##
## Special Methods
##
# Design question:
# __get/setitem__ returns a node obj ?
def __getitem__(self, nodeaddr):
""" access node obj line a dictionary entery """
if nodeaddr in self.nodeCdict:
return self.nodeCdict[str(nodeaddr)]
else:
return self.get_node(nodeaddr)
def __setitem__(self, nodeaddr, val):
""" This allows you to set the status of a Node by
addressing it as dictionary entery """
val = int(val)
if val > 0:
self.node_comm(nodeaddr, "DON", val)
else:
self.node_comm(nodeaddr, "DOF")
def __delitem__(self, nodeaddr):
raise IsyPropertyError("__delitem__ : can't delete nodes : " + str(nodeaddr) )
def __iter__(self):
""" iterate though Node Obj (see: node_iter() ) """
return self.node_iter()
def __del__(self):
if self.debug & 0x80:
print "__del__ ", self.__repr__()
#if isinstance(self._isy_event, ISYEvent):
# #ISYEvent._stop_event_loop()
if hasattr(self, "_isy_event"):
if hasattr(self._isy_event, "_shut_down"):
self._isy_event._shut_down = 1
if hasattr(self, "nodeCdict" ):
self.nodeCdict.clear()
if hasattr(self, "varCdict" ):
self.varCdict.clear()
if hasattr(self, "progCdict" ):
self.progCdict.clear()
if hasattr(self, "folderCdict" ):
self.folderCdict.clear()
# the reasion for this is that
#for k in self.nodeCdict.keys():
# del self.nodeCdict[k]
#for k in self.varCdict.keys():
# del self.varCdict[k]
#for k in self.progCdict.keys():
# del self.progCdict[k]
#for k in self.folderCdict.keys():
# del self.folderCdict[k]
def __repr__(self):
return "<Isy %s at 0x%x>" % (self.addr, id(self))
# def debugerror(self):
# print("debugerror")
# raise IsyPropertyError("debugerror : test IsyPropertyError ")
def _printdict(self, dic):
""" Pretty Print dictionary """
print("===START===")
pprint.pprint(dic)
print("===END===")
def _writedict(self, d, filen):
""" Pretty Print dict to file """
with open(filen, 'w') as fi:
pprint.pprint(d, fi)
def IsyGetArg(lineargs):
"""
takes argv and extracts name/pass/ipaddr options
"""
# print "IsyGetArg ", lineargs
addr=""
upass=""
uname=""
i = 0
while i < len(lineargs):
#print "len = ", len(lineargs)
#print "lineargs =", lineargs
#print "check :", i, ":", lineargs[i],
if lineargs[i] in ['--isyaddress', '-isyaddress', '--isyaddr' '-isyaddr']:
lineargs.pop(i)
addr = lineargs.pop(i)
continue
elif lineargs[i] in ['--isypass', '-isypass']:
lineargs.pop(i)
upass = lineargs.pop(i)
continue
elif lineargs[i] in ['--isyuser', '-isyuser']:
lineargs.pop(i)
uname = lineargs.pop(i)
continue
i += 1
# if not addr:
# addr = os.getenv('ISY_ADDR', "isy")
# if not uname:
# userl = os.getenv('ISY_USER', "admin")
# if not upass:
# userp = os.getenv('ISY_PASS', "admin")
return(addr, uname, upass)
def log_time_offset():
""" calculates time format offset used in ISY event logs to localtime format """
lc_time = time.localtime()
gm_time = time.gmtime()
return ((lc_time[3]) - (gm_time[3] - gm_time[8])) * 60 * 60
# index 3 represent the hours
# index 8 represent isdst (daylight saving time boolean (0/1))
#
# Do nothing
# (syntax check)
#
if __name__ == "__main__":
import __main__
print(__main__.__file__)
print("syntax ok")
exit(0)
|
evilpete/ISYlib-python
|
ISY/IsyClass.py
|
Python
|
bsd-2-clause
| 68,214
|
[
"Elk"
] |
e470a613d0d88b93dc2eff64d9841f7cb130d041475f2ef8e6065506934a0cf3
|
import math
import numpy as np
import matplotlib.pyplot as plt
import skimage.transform as sktr
from unsharp import *
def get_points(im1, im2):
print('Please select 2 points in each image for alignment.')
plt.imshow(im1)
p1, p2 = plt.ginput(2)
plt.close()
plt.imshow(im2)
p3, p4 = plt.ginput(2)
plt.close()
return (p1, p2, p3, p4)
def recenter(im, r, c):
R, C, _ = im.shape
rpad = np.abs(2*r+1 - R)
cpad = np.abs(2*c+1 - C)
return np.pad(
im, [(0 if r > (R-1)/2 else rpad, 0 if r < (R-1)/2 else rpad),
(0 if c > (C-1)/2 else cpad, 0 if c < (C-1)/2 else cpad),
(0, 0)], 'constant')
def find_centers(p1, p2):
cx = np.round(np.mean([p1[0], p2[0]]))
cy = np.round(np.mean([p1[1], p2[1]]))
return cx, cy
def align_images(im1, im2, pts):
p1, p2, p3, p4 = pts
h1, w1, b1 = im1.shape
h2, w2, b2 = im2.shape
cx1, cy1 = find_centers(p1, p2)
cx2, cy2 = find_centers(p3, p4)
im1 = recenter(im1, cy1, cx1)
im2 = recenter(im2, cy2, cx2)
return im1, im2
def rescale_images(im1, im2, pts):
p1, p2, p3, p4 = pts
len1 = np.sqrt((p2[1] - p1[1])**2 + (p2[0] - p1[0])**2)
len2 = np.sqrt((p4[1] - p3[1])**2 + (p4[0] - p3[0])**2)
dscale = len2/len1
if dscale < 1:
im1 = sktr.rescale(im1, dscale)
else:
im2 = sktr.rescale(im2, 1./dscale)
return im1, im2
def rotate_im1(im1, im2, pts):
p1, p2, p3, p4 = pts
theta1 = math.atan2(-(p2[1] - p1[1]), (p2[0] - p1[0]))
theta2 = math.atan2(-(p4[1] - p3[1]), (p4[0] - p3[0]))
dtheta = theta2 - theta1
im1 = sktr.rotate(im1, dtheta*180/np.pi)
return im1, dtheta
def match_img_size(im1, im2, (oh1, ow1), (oh2, ow2)):
# Make images the same size
h1, w1, c1 = im1.shape
h2, w2, c2 = im2.shape
if h1 < h2:
im2 = im2[np.floor((h2-h1)/2.) : -np.ceil((h2-h1)/2.), :, :]
elif h1 > h2:
im1 = im1[np.floor((h1-h2)/2.) : -np.ceil((h1-h2)/2.), :, :]
if w1 < w2:
im2 = im2[:, np.floor((w2-w1)/2.) : -np.ceil((w2-w1)/2.), :]
elif w1 > w2:
im1 = im1[:, np.floor((w1-w2)/2.) : -np.ceil((w1-w2)/2.), :]
assert im1.shape == im2.shape
return im1, im2
def combine_freq(im1, im2, sigma1, sigma2):
high = Laplacian(im1, sigma1)
low = Gaussian(im2, sigma2)
return (high + low)/2.
|
rachelalbert/CS294-26_code
|
project3_code/part_1/align_images.py
|
Python
|
mit
| 2,363
|
[
"Gaussian"
] |
e3f8d595c6d915f37dcb6e34245792556dbe0568ce2c8af5582e366ee385cd8d
|
import ast
import os
import sys
from .python.ast import Visitor
from .python.debug import dump
def transpile(input, prefix='.', outdir=None, namespace='python', verbosity=0):
transpiler = Transpiler(namespace=namespace, verbosity=verbosity)
for file_or_dir in input:
if os.path.isfile(file_or_dir):
if verbosity:
print("Compiling %s ..." % file_or_dir)
with open(file_or_dir) as source:
ast_module = ast.parse(source.read(), mode='exec')
transpiler.transpile(file_or_dir, ast_module, prefix)
elif os.path.isdir(file_or_dir):
for root, dirs, files in os.walk(file_or_dir, followlinks=True):
for filename in files:
if os.path.splitext(filename)[1] == '.py':
source_file = os.path.join(root, filename)
if verbosity:
print("Compiling %s ..." % source_file)
with open(source_file) as source:
ast_module = ast.parse(source.read(), mode='exec')
transpiler.transpile(source_file, ast_module, prefix)
else:
print("Unknown source file: %s" % file_or_dir, file=sys.stderr)
transpiler.write(outdir)
class Transpiler:
def __init__(self, namespace="python", verbosity=0):
self.namespace = namespace
self.classfiles = []
self.verbosity = verbosity
def write(self, outdir):
# Create directory tree to store classfile
if outdir:
basedir = [outdir]
else:
basedir = []
for namespace, class_name, javaclassfile in self.classfiles:
dirname = os.path.join(*(basedir + namespace.split('.')))
classfilename = os.path.join(dirname, '%s.class' % class_name)
try:
os.makedirs(os.path.dirname(classfilename))
except FileExistsError:
pass
if self.verbosity:
print("Writing %s ..." % classfilename)
with open(classfilename, 'wb') as out:
javaclassfile.write(out)
def transpile(self, filename, ast_module, prefix):
"Transpile a Python source file into class files"
# Determine what portion of the filename is part of the
# common source directory, and which is namespace.
common = os.path.commonprefix([
os.path.abspath(prefix),
os.path.abspath(filename)
])
self.transpile_code(os.path.abspath(filename)[len(common):], ast_module)
def transpile_string(self, filename, code_string):
"Transpile a string containing Python code into class files"
ast_module = ast.parse(code_string, mode='exec')
self.transpile_code(filename, ast_module)
def transpile_code(self, filename, ast_module):
"Transpile a code object into class files"
# Convert the AST into Java opcodes
if self.verbosity > 1:
print('=' * 75)
print(dump(ast_module))
print('=' * 75)
module = Visitor(self.namespace, filename, verbosity=self.verbosity).visit(ast_module)
# Transpile the module code, adding any classfiles generated
# to the list to be exported.
self.classfiles.extend(module.transpile())
|
Felix5721/voc
|
voc/transpiler.py
|
Python
|
bsd-3-clause
| 3,397
|
[
"VisIt"
] |
fb58dba365d6d82ec9beb3b9a27b0721c0085207dd5573c175f5dd3911efe0d9
|
#!/usr/bin/env python
# encoding: utf-8
import inspect
from functools import wraps
from decorator import decorator
import pytest
def use_bintypes(*bintypes):
"""Decorate test to run only for the given bintypes."""
def check_bintype(f):
@wraps(f)
def decorated_function(self, *args, **kwargs):
if kwargs['galaxy'].bintype not in bintypes:
pytest.skip('Only use {}'.format(', '.join(bintypes)))
return f(self, *args, **kwargs)
return decorated_function
return check_bintype
def use_releases(*releases):
"""Decorate test to run only for the given releases."""
def check_bintype(f):
@wraps(f)
def decorated_function(self, *args, **kwargs):
if 'release' in kwargs.keys():
release = kwargs['release']
elif 'galaxy' in kwargs.keys():
release = kwargs['galaxy'].release
if release not in releases:
pytest.skip('Only use {}'.format(', '.join(releases)))
return f(self, *args, **kwargs)
return decorated_function
return check_bintype
class MetaUse(object):
"""Meta class to define a testing class that decorates all tests to use the specified fxn."""
def __init__(self, *args):
self.args = args
def __call__(self, decorated_class):
for attr in inspect.getmembers(decorated_class, inspect.isfunction):
# only decorate public functions
if attr[0][0] != '_':
setattr(decorated_class, attr[0],
self.fxn(*self.args)(attr[1]))
return decorated_class
class UseBintypes(MetaUse):
def __init__(self, *args):
self.args = args
self.fxn = use_bintypes
class UseReleases(MetaUse):
def __init__(self, *args):
self.args = args
self.fxn = use_releases
# These decorators for functions and classes allow to skip or run tests only for galaxies
# that have certain bintypes, templates, or releases
def marvin_test_if(mark='skip', **kfilter):
"""Decorate test to skip/include certain parameters.
Parameters:
mark ({'skip', 'include', 'xfail'}):
Whether the decorator should skip the test if it matches the filter
conditions, include it only if it matches the conditions, or mark
it as an expected failure.
kfilter (kwargs):
A keyword argument whose name should match one of the fixtures in
the test. If the fixture returns a single value, the keyword must
define a list of the fixture values to skip, include, or xfail.
If the fixture returns an object, the value of the kwarg must be a
dictionary of the object attributes to filter on. The ``mark`` is
applied to all the attributes in the dictionary equally.
Examples:
If you want to only test for galaxies with bintype ``'STON'`` and
template ``'MILES-THIN'`` you can do::
@marvin_test_if(mark='include', galaxy=dict(bintype=['STON'], template=['MILES-THIN']))
You can also mark all tests with ``data_origin=['file']`` as expected
failure::
@marvin_test_if(mark='xfails', data_origin=['file'])
``marvin_test_if`` decorators can be concatenated::
@marvin_test_if(mark='xfails', data_origin=['file'])
@marvin_test_if(mark='skip', galaxy=dict(bintype=['SPX']))
will skip ``'SPX'`` bintypes and expect a failure on ``'file'``
data_origins.
"""
def _should_skip(filter_values, fixture_value, prop_name):
ll = ', '.join(filter_values)
if mark == 'skip' and fixture_value in filter_values:
return pytest.skip('Skipping {0}={1!r}'.format(prop_name, ll))
elif mark == 'include' and fixture_value not in filter_values:
return pytest.skip('Skipping all {0} except {1!r}'.format(prop_name, ll))
elif mark == 'xfail' and fixture_value in filter_values:
return pytest.xfail('Expected failure if {0}={1!r}'.format(prop_name, ll))
return False
@decorator
def decorated_function(ff, *args, **kwargs):
ff_attr_names = inspect.getargspec(ff).args
ff_attrs = {}
for ii in range(len(args)):
ff_attrs[ff_attr_names[ii]] = args[ii]
assert mark in ['skip', 'include', 'xfail'], \
'valid marks are \'skip\', \'include\', and \'xfail\''
if len(kfilter) > 1:
raise ValueError('marvin_test_if only accepts one filter condition.')
fixture_to_filter, filter_attributes = list(kfilter.items())[0]
if fixture_to_filter not in ff_attrs:
return ff(*args, **kwargs)
if not isinstance(filter_attributes, dict):
_should_skip(filter_attributes, ff_attrs[fixture_to_filter], fixture_to_filter)
else:
for filter_attribute, filter_values in filter_attributes.items():
fixture = ff_attrs[fixture_to_filter]
if not hasattr(fixture, filter_attribute):
continue
fixture_value = getattr(fixture, filter_attribute)
if _should_skip(filter_values, fixture_value, filter_attribute):
break
return ff(*args, **kwargs)
return decorated_function
class marvin_test_if_class(object):
"""Decorate all tests in a class to run only for, or skip, certain parameters.
See ``marvin_test_if``. This decorator is the equivalent for decorating
classes isntead of functions.
"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __call__(self, decorated_class):
for attr in inspect.getmembers(decorated_class, inspect.isfunction):
# only decorate public functions
if attr[0][0] != '_':
setattr(decorated_class, attr[0],
marvin_test_if(*self.args,
**self.kwargs)(getattr(decorated_class, attr[0])))
return decorated_class
def skipIfNoDB(test):
"""Decorate a test to skip if DB ``session`` is ``None``."""
@wraps(test)
def wrapper(self, db, *args, **kwargs):
if db.session is None:
pytest.skip('Skip because no DB.')
else:
return test(self, db, *args, **kwargs)
return wrapper
|
sdss/marvin
|
tests/__init__.py
|
Python
|
bsd-3-clause
| 6,457
|
[
"Galaxy"
] |
e94b459b8aa847b2c6c5326f6b1424f5ea94eb4c157eaa2f637a75773b5501cf
|
from __main__ import vtk, qt, ctk, slicer
import string
import numpy
import collections
class NodeInformation:
def __init__(self, dataNode, labelNode, allKeys):
self.nodeInformation = collections.OrderedDict()
self.nodeInformation["Node"] = "self.nodeName(self.dataNode)"
self.dataNode = dataNode
self.labelNode = labelNode
self.keys = set(allKeys).intersection(self.nodeInformation.keys())
def nodeName (self, dataNode):
return (dataNode.GetName())
def EvaluateFeatures(self):
# Evaluate dictionary elements corresponding to user-selected keys
if not self.keys:
return(self.nodeInformation)
for key in self.keys:
self.nodeInformation[key] = eval(self.nodeInformation[key])
return(self.nodeInformation)
|
vnarayan13/Slicer-OpenCAD
|
HeterogeneityCAD/FeatureExtractionLib/NodeInformation.py
|
Python
|
mit
| 803
|
[
"VTK"
] |
20f020261beacbc62b8290e2a7f5688e8ebe79e39111688b1df943e16bc35d77
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from pymatgen.core.libxcfunc import LibxcFunc
from pymatgen.util.testing import PymatgenTest
class LibxcFuncTest(PymatgenTest):
def test_libxcfunc_api(self):
"""Testing libxcfunc_api."""
# LDA correlation: Hedin & Lundqvist
xc = LibxcFunc.LDA_C_HL
print(xc)
assert not xc.is_x_kind and xc.is_c_kind and not xc.is_xc_kind
assert xc.is_lda_family and not xc.is_gga_family
print(xc.info_dict)
assert xc.family in LibxcFunc.all_families()
assert xc.kind in LibxcFunc.all_kinds()
# Test if object can be serialized with Pickle.
self.serialize_with_pickle(xc, test_eq=True)
# Test if object supports MSONable
self.assertMSONable(xc, test_if_subclass=False)
|
vorwerkc/pymatgen
|
pymatgen/core/tests/test_libxcfunc.py
|
Python
|
mit
| 859
|
[
"pymatgen"
] |
65b6924ba278f3327bcbe1eafc3fb7ccdfb44c9e6e209cf7c1d779b6e4b48b67
|
from __future__ import (absolute_import, division, print_function)
import sys
import unittest
import numpy as np
from mantid.simpleapi import *
from mantid.api import *
from testhelpers import run_algorithm
class MatchPeaksTest(unittest.TestCase):
_args = {}
def setUp(self):
func0 = "name=Gaussian, PeakCentre=3.2, Height=10, Sigma=0.3"
func1 = "name=Gaussian, PeakCentre=6, Height=10, Sigma=0.3"
func2 = "name=Gaussian, PeakCentre=4, Height=10000, Sigma=0.01"
_input_ws_0 = 'spectrum0' # Gaussian
_input_ws_1 = 'spectrum1' # Gaussian outside tolerance interval
_input_ws_2 = 'spectrum2' # Gaussian, too narrow peak
_input_ws_3 = 'spectrum3' # Flat background
self._ws_shift = 'to_be_shifted'
spectrum_0 = CreateSampleWorkspace(Function='User Defined',
WorkspaceType='Histogram',
UserDefinedFunction=func0,
NumBanks=1, BankPixelWidth=1,
XUnit='DeltaE', XMin=0, XMax=7, BinWidth=0.099,
OutputWorkspace=_input_ws_0)
spectrum_1 = CreateSampleWorkspace(Function='User Defined',
WorkspaceType='Histogram',
UserDefinedFunction=func1,
NumBanks=1, BankPixelWidth=1,
XUnit='DeltaE', XMin=0, XMax=7, BinWidth=0.099,
OutputWorkspace=_input_ws_1)
spectrum_2 = CreateSampleWorkspace(Function='User Defined',
WorkspaceType='Histogram',
UserDefinedFunction=func2,
NumBanks=1, BankPixelWidth=1,
XUnit='DeltaE', XMin=0, XMax=7, BinWidth=0.099,
OutputWorkspace=_input_ws_2)
spectrum_3 = CreateSampleWorkspace(Function='Flat background',
WorkspaceType='Histogram',
NumBanks=1, BankPixelWidth=1,
XUnit='DeltaE', XMin=0, XMax=7, BinWidth=0.099,
OutputWorkspace=_input_ws_3)
AppendSpectra(InputWorkspace1=spectrum_0, InputWorkspace2=spectrum_1, OutputWorkspace=self._ws_shift)
AppendSpectra(InputWorkspace1=self._ws_shift, InputWorkspace2=spectrum_2, OutputWorkspace=self._ws_shift)
AppendSpectra(InputWorkspace1=self._ws_shift, InputWorkspace2=spectrum_3, OutputWorkspace=self._ws_shift)
# Input workspace 2
self._ws_in_2 = 'in_2'
func3 = "name=LinearBackground, A0=0.3; name=Gaussian, PeakCentre=4.2, Height=10, Sigma=0.3"
CreateSampleWorkspace(Function='User Defined',
WorkspaceType='Histogram',
UserDefinedFunction=func3,
NumBanks=4, BankPixelWidth=1,
XUnit='DeltaE', XMin=0, XMax=7, BinWidth=0.099,
OutputWorkspace=self._ws_in_2)
# Input workspace 3
self._ws_in_3 = 'in_3'
func4 = "name=LinearBackground, A0=0.3; name=Gaussian, PeakCentre=2.5, Height=7, Sigma=0.15"
CreateSampleWorkspace(Function='User Defined',
WorkspaceType='Histogram',
UserDefinedFunction=func4,
NumBanks=4, BankPixelWidth=1,
XUnit='DeltaE', XMin=0, XMax=7, BinWidth=0.099,
OutputWorkspace=self._ws_in_3)
# Input workspaces that are incompatible
self._in1 = 'wrong_number_of_histograms'
CreateSampleWorkspace(Function='Flat background',
WorkspaceType='Histogram',
NumBanks=1, BankPixelWidth=1,
XUnit='DeltaE', XMin=0, XMax=7, BinWidth=0.1,
OutputWorkspace=self._in1)
self._in2 = 'wrong_number_of_bins'
CreateSampleWorkspace(Function='Flat background',
WorkspaceType='Histogram',
NumBanks=4, BankPixelWidth=1,
XUnit='DeltaE', XMin=0, XMax=8, BinWidth=0.1,
OutputWorkspace=self._in2)
# mtd[self._ws_shift].blocksize() = 70
# mid = 35
# Details:
# workspace has peak positions at : [32 35(mid) 40 35(mid)]
# the corresponding Y-values are (rounded) : [10 0 3.4 1.0]
#
# -> test shifting to the right and to the left
# -> test options to use FindEPP, maximum peak position or no shifting
def tearDown(self):
if AnalysisDataService.doesExist('to_be_shifted'):
DeleteWorkspace(self._ws_shift)
if AnalysisDataService.doesExist('in_2'):
DeleteWorkspace(self._ws_in_2)
if AnalysisDataService.doesExist('output'):
DeleteWorkspace(mtd['output'])
if AnalysisDataService.doesExist('wrong_number_of_histograms'):
DeleteWorkspace(self._in1)
if AnalysisDataService.doesExist('wrong_number_of_bins'):
DeleteWorkspace(self._in2)
def testValidateInputWorkspace(self):
self._args['OutputWorkspace'] = 'output'
self.assertTrue(sys.version_info >= (2, 7))
with self.assertRaises(RuntimeError) as contextManager:
self._args['InputWorkspace'] = self._in1
run1 = run_algorithm('MatchPeaks', **self._args)
self.assertTrue(run1.isExecuted())
self.assertEqual('Some invalid Properties found', str(contextManager.exception))
with self.assertRaises(RuntimeError) as contextManager:
self._args['InputWorkspace'] = self._in2
run2 = run_algorithm('MatchPeaks', **self._args)
self.assertTrue(run2.isExecuted())
self.assertEqual('Some invalid Properties found', str(contextManager.exception))
def testValidateInputWorkspace2(self):
self._args['InputWorkspace'] = self._ws_shift
self._args['OutputWorkspace'] = 'output'
self.assertTrue(sys.version_info >= (2, 7))
with self.assertRaises(RuntimeError) as contextManager:
self._args['InputWorkspace2'] = self._in1
run_algorithm('MatchPeaks', **self._args)
self.assertEqual('Some invalid Properties found', str(contextManager.exception))
with self.assertRaises(RuntimeError) as contextManager:
self._args['InputWorkspace2'] = self._in2
run_algorithm('MatchPeaks', **self._args)
self.assertEqual('Some invalid Properties found', str(contextManager.exception))
def testValidateInputWorkspace3(self):
self._args['InputWorkspace'] = self._ws_shift
self._args['InputWorkspace3'] = self._ws_in_3
self._args['OutputWorkspace'] = 'output'
self.assertTrue(sys.version_info >= (2, 7))
with self.assertRaises(RuntimeError) as contextManager:
run_algorithm('MatchPeaks', **self._args)
self.assertEqual('Some invalid Properties found', str(contextManager.exception))
def testMatchCenter(self):
# Input workspace should match its center
# Bin ranges of each spectrum:
# spectrum 0 : [(32-35), 70] => [3, 70] right shift
# spectrum 1 : [0, 70] no shift
# spectrum 2 : [0, 70 - (40-35)] => [0, 65] left shift
# spectrum 3 : [0, 70] no shift
# Final bin range is [3, 65-1]
self._args['InputWorkspace'] = self._ws_shift
self._args['OutputWorkspace'] = 'output'
alg_test = run_algorithm('MatchPeaks', **self._args)
self.assertTrue(alg_test.isExecuted())
shifted = AnalysisDataService.retrieve('output')
fit_table = FindEPP(shifted)
self.assertEqual(35, shifted.binIndexOf(fit_table.row(0)["PeakCentre"]))
self.assertEqual(35, np.argmax(shifted.readY(2)))
self._workspace_properties(shifted)
DeleteWorkspace(shifted)
DeleteWorkspace(fit_table)
def testBinRangeTable(self):
self._args['InputWorkspace'] = self._ws_shift
self._args['OutputWorkspace'] = 'output'
self._args['BinRangeTable'] = 'bin_range'
alg_test = run_algorithm('MatchPeaks', **self._args)
self.assertTrue(alg_test.isExecuted())
bin_range_table = AnalysisDataService.retrieve('bin_range')
# Size of the table and its column names
self.assertEqual(1, bin_range_table.rowCount())
self.assertEqual(2, bin_range_table.columnCount())
columns = ['MinBin', 'MaxBin']
self.assertEqual(columns, bin_range_table.getColumnNames())
# Bin range
self.assertEqual(3, bin_range_table.row(0)["MinBin"])
self.assertEqual(64, bin_range_table.row(0)["MaxBin"])
DeleteWorkspace(bin_range_table)
def testMasking(self):
self._args['InputWorkspace'] = self._ws_shift
self._args['OutputWorkspace'] = 'output'
self._args['MaskBins'] = True
alg_test = run_algorithm('MatchPeaks', **self._args)
self.assertTrue(alg_test.isExecuted())
masked = AnalysisDataService.retrieve('output')
for i in range(4):
for k in range(3):
self.assertEqual(0.0, masked.readY(i)[k], 'Mask spectrum {0} bin {1} failed'.format(i, k))
for k in range(65, 70):
self.assertEqual(0.0, masked.readY(i)[k], 'Mask spectrum {0} bin {1} failed'.format(i, k))
DeleteWorkspace(masked)
def testNoMasking(self):
self._args['InputWorkspace'] = self._ws_shift
self._args['OutputWorkspace'] = 'output'
self._args['MaskBins'] = False # this is the default behaviour
alg_test = run_algorithm('MatchPeaks', **self._args)
self.assertTrue(alg_test.isExecuted())
not_masked = AnalysisDataService.retrieve('output')
self.assertNotEqual(0, not_masked.readY(0)[0])
DeleteWorkspace(not_masked)
def testMatchInput2(self):
# Input workspace should match the peak of input workspace 2:
# has its peaks at bin 42
# shifts: 32-42 = -10 (right shift)
# 35-42 = -7 (right shift)
# 40-42 = -2 (right shift)
# 35-42 = -7 (right shift)
# new bin range [10, 70]
self._args['InputWorkspace'] = self._ws_shift
self._args['OutputWorkspace'] = 'output'
self._args['InputWorkspace2'] = self._ws_in_2
self._args['BinRangeTable'] = 'bin_range'
alg_test = run_algorithm('MatchPeaks', **self._args)
self.assertTrue(alg_test.isExecuted())
shifted = AnalysisDataService.retrieve('output')
bin_range_table = AnalysisDataService.retrieve('bin_range')
fit_table = FindEPP(shifted)
self.assertEqual(42, shifted.binIndexOf(fit_table.row(0)["PeakCentre"]))
self.assertEqual(42, np.argmax(shifted.readY(2)))
# Bin range
self.assertEqual(10, bin_range_table.row(0)["MinBin"])
self.assertEqual(70, bin_range_table.row(0)["MaxBin"])
self._workspace_properties(shifted)
DeleteWorkspace(shifted)
DeleteWorkspace(fit_table)
DeleteWorkspace(bin_range_table)
def testMatchInput2MatchOption(self):
# match option true:
# left shifts
# spectrum 0: 35 - 35 = 0 (no shift)
# spectrum 1: 42 - 35 = 7 (left shift)
# spectrum 2: 42 - 35 = 7 (left shift)
# spectrum 3: 42 - 35 = 7 (left shift)
# new bin range [0, 70-7-1]
self._args['InputWorkspace'] = self._ws_shift
self._args['InputWorkspace2'] = self._ws_in_2
self._args['MatchInput2ToCenter'] = True
alg_test = run_algorithm('MatchPeaks', **self._args)
self.assertTrue(alg_test.isExecuted())
shifted = AnalysisDataService.retrieve('output')
bin_range_table = AnalysisDataService.retrieve('bin_range')
fit_table = FindEPP(shifted)
self.assertEqual(32-7, shifted.binIndexOf(fit_table.row(0)["PeakCentre"]))
self.assertEqual(40-7, np.argmax(shifted.readY(2)))
# Bin range
self.assertEqual(0, bin_range_table.row(0)["MinBin"])
self.assertEqual(62, bin_range_table.row(0)["MaxBin"])
self._workspace_properties(shifted)
DeleteWorkspace(shifted)
DeleteWorkspace(fit_table)
DeleteWorkspace(bin_range_table)
def testMatchInput3(self):
# right shifts
# spectrum 0: 25 - 42 = -17 (right shift)
# spectrum 1: 25 - 42 = -17 (right shift)
# spectrum 2: 25 - 42 = -17 (right shift)
# spectrum 3: 25 - 42 = -17 (right shift)
self._args['InputWorkspace'] = self._ws_shift
self._args['InputWorkspace2'] = self._ws_in_2
self._args['InputWorkspace3'] = self._ws_in_3
alg_test = run_algorithm('MatchPeaks', **self._args)
self.assertTrue(alg_test.isExecuted())
shifted = AnalysisDataService.retrieve('output')
bin_range_table = AnalysisDataService.retrieve('bin_range')
fit_table = FindEPP(shifted)
self.assertEqual(32+17, shifted.binIndexOf(fit_table.row(0)["PeakCentre"]))
self.assertEqual(40+17, np.argmax(shifted.readY(2)))
# Bin range
self.assertEqual(17, bin_range_table.row(0)["MinBin"])
self.assertEqual(70, bin_range_table.row(0)["MaxBin"])
self._workspace_properties(shifted)
DeleteWorkspace(shifted)
DeleteWorkspace(fit_table)
DeleteWorkspace(bin_range_table)
def testOverride(self):
self._args['InputWorkspace'] = self._ws_shift
self._args['OutputWorkspace'] = self._ws_shift
alg_test = run_algorithm('MatchPeaks', **self._args)
self.assertTrue(alg_test.isExecuted())
shifted = AnalysisDataService.retrieve('to_be_shifted')
self.assertFalse(np.all(mtd['to_be_shifted'].extractY() - shifted.extractY()))
DeleteWorkspace(shifted)
def _workspace_properties(self, test_ws):
self.assertTrue(isinstance(test_ws, MatrixWorkspace), "Should be a matrix workspace")
self.assertTrue(test_ws.getRun().getLogData(), "Should have SampleLogs")
self.assertTrue(test_ws.getHistory().lastAlgorithm(), "Should have AlgorithmsHistory")
if __name__=="__main__":
unittest.main()
|
wdzhou/mantid
|
Framework/PythonInterface/test/python/plugins/algorithms/MatchPeaksTest.py
|
Python
|
gpl-3.0
| 14,834
|
[
"Gaussian"
] |
bec514bd51f3b3bc83de3dedb60a54feb73ed5372987ca04051e17d89a94c0f2
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_certificatemanagementprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of CertificateManagementProfile Avi RESTful Object
description:
- This module is used to configure CertificateManagementProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
name:
description:
- Name of the pki profile.
required: true
script_params:
description:
- List of customparams.
script_path:
description:
- Script_path of certificatemanagementprofile.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create CertificateManagementProfile object
avi_certificatemanagementprofile:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_certificatemanagementprofile
"""
RETURN = '''
obj:
description: CertificateManagementProfile (api/certificatemanagementprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
name=dict(type='str', required=True),
script_params=dict(type='list',),
script_path=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'certificatemanagementprofile',
set([]))
if __name__ == '__main__':
main()
|
kbrebanov/ansible
|
lib/ansible/modules/network/avi/avi_certificatemanagementprofile.py
|
Python
|
gpl-3.0
| 3,654
|
[
"VisIt"
] |
41d747838fc8e73de5dff2fb1202db3e60538781ecdbbef37f71da40828f9a15
|
#!/usr/bin/env python2
# Copyright (C) 2016, 2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#########################################################################################
# #
# ESPResSo++ Python script for F-AdResS protein in rigid water simulation including #
# a selfadjusting atomistic region (on the fly) #
# #
#########################################################################################
import mpi4py.MPI as MPI
import espressopp
from espressopp import Real3D
from espressopp.tools import gromacs
import math
import os
import time
import sys
from math import sqrt
import random
import logging
from datetime import datetime
# Performs simulation of fully atomistic peptide in aqueous solution, with a self-adjusting atomistic region
# Reads in peptide coord file (.gro) and topology (topol.top) written in gromacs format
# Assumes that in input file, peptide is listed before water
# Assumes there are no ions
# Uses force-based AdResS and thermodynamic force
# Assumes the atomistic region is defined such that the entire protein is always completely inside it
# Atomistic region is formed of a series of overlapping spheres
# The particles are stored in memory as follows:
# particles in protein each correspond to one coarse-grained particle and one atomistic particle (this is just because of the way particles are stored in espressopp, the protein is fully atomistic all the time anyway)
# solvent (water) molecules each correspond to one coarse-grained particle which maps to three atomistic particles
########################################################################
# 1. specification of the main system setup and simulation parameters #
########################################################################
# protein indices
atProtIndices = [x for x in range(1,94)] #1 to 93 inclusive
nProtAtoms = len(atProtIndices)
# indices of atoms in water molecules with adaptive resolution
atWaterIndices = [x for x in range(94,30628)] #water atoms, 94 to 30627 inclusive
nWaterAtoms = len(atWaterIndices)
nWaterAtomsPerMol = 3 #number of atoms per cg water bead
nWaterMols = nWaterAtoms/nWaterAtomsPerMol
particlePIDsADR = atProtIndices #atomistic indices of atoms at centres of spheres forming AdResS region
# input coordinates
inputcrdfile = "peptide.gro"
# atomistic forcefield
aatopfile = "topol.top"
# system parameters
# NB cutoff
nbCutoff = 1.25
# Interaction cutoff
intCutoff = 1.0
# VerletList skin size (also used for domain decomposition)
skin = 0.2 #nm
# the temperature of the system
temperatureConvFactor = 120.27239 # 1/(kB in kJ K-1 mol-1) (input vel should be in nm/ps), for converting from reduced units to K
temperature = 300.0 # Kelvin
temperature = float(temperature)/temperatureConvFactor #in units of kJ mol-1
# time step for the velocity verlet integrator
dt = 0.001 #ps
nSteps = 1000 #total number of steps
nStepsPerOutput = 100 #frequency for printing energies and trajectory
nOutput = nSteps/nStepsPerOutput
# Parameters for size of AdResS dimensions
ex_size = 1.00
hy_size = 1.00
print '# radius of atomistic region = ',ex_size
print '# thickness of hybrid region = ',hy_size
trjfile = "trj.gro"
# print ESPResSo++ version and compile info
print '# ',espressopp.Version().info()
# print simulation parameters (useful to have them in a log file)
print "# nbCutoff = ", nbCutoff
print "# intCutoff = ", intCutoff
print "# skin = ", skin
print "# dt = ", dt
print "# nSteps = ", nSteps
print "# output every ",nStepsPerOutput," steps"
########################################################################
# 2. read in coordinates and topology
########################################################################
## get info on (complete) atomistic system ##
print '# Reading gromacs top and gro files...'
# call gromacs parser for processing the top file (and included files) and the gro file
defaults, atTypes, atomtypesDict, atMasses, atCharges, atomtypeparameters, atBondtypes, bondtypeparams, atAngletypes, angletypeparams, atDihedraltypes, dihedraltypeparams, atImpropertypes, impropertypeparams, atExclusions, atOnefourpairslist, atX, atY, atZ, atVX, atVY, atVZ, atResnames, atResid, Lx, Ly, Lz = gromacs.read(inputcrdfile,aatopfile)
#initialize a map between atomtypes as integers and as strings
reverseAtomtypesDict = dict([(v, k) for k, v in atomtypesDict.iteritems()])
# delete from atomtypeparams any types not in system, so as not to conflict with any new types created later
for k in list(atomtypeparameters):
if k not in atTypes:
print "# Deleting unused type ",k,"/",reverseAtomtypesDict[k]," from atomtypeparameters, atomtypesDict and reverseAtomtypesDict"
del atomtypeparameters[k]
atomtypekey = reverseAtomtypesDict[k]
del reverseAtomtypesDict[k]
del atomtypesDict[atomtypekey]
# system box size
box = (Lx, Ly, Lz)
print "# Box size = ", box
nParticlesRead=len(atX)
print "# total number of particles read from atomistic config file = ",nParticlesRead
print "# number of atomistic particles in protein = ",nProtAtoms
print "# number of coarse-grained particles in protein = ",nProtAtoms
print "# number of atomistic particles in solvent = ",nWaterAtoms
print "# number of coarse-grained particles in solvent = ",nWaterMols
nParticlesTotal=nProtAtoms*2+nWaterAtoms+nWaterMols
print "# total number of particles after setup = ",nParticlesTotal
if (nParticlesRead != (nProtAtoms+nWaterAtoms)):
print "problem: no. particles in crd file != np. of atomistic particles specified"
print "values: ",nParticlesRead,nProtAtoms+nWaterAtoms
quit()
particleX=[]
particleY=[]
particleZ=[]
particlePID=[]
particleTypes=[]
particleMasses=[]
particleCharges=[]
particleTypestring=[]
particleVX=[]
particleVY=[]
particleVZ=[]
#atomistic particles (protein and water)
for i in range(nProtAtoms+nWaterAtoms):
particlePID.append(i+1)
particleMasses.append(atMasses[i])
particleCharges.append(atCharges[i])
particleTypes.append(atTypes[i])
particleTypestring.append('atomistic__')
particleX.append(atX[i])
particleY.append(atY[i])
particleZ.append(atZ[i])
particleVX.append(atVX[i])
particleVY.append(atVY[i])
particleVZ.append(atVZ[i])
#cg protein particles (same as atomistic)
for i in range(nProtAtoms):
particlePID.append(i+1+nProtAtoms+nWaterAtoms)
particleMasses.append(atMasses[i])
particleCharges.append(atCharges[i])
particleTypes.append(atTypes[i])
particleTypestring.append('cg_protein_')
particleX.append(atX[i])
particleY.append(atY[i])
particleZ.append(atZ[i])
particleVX.append(atVX[i])
particleVY.append(atVY[i])
particleVZ.append(atVZ[i])
#cg water particles
typeCG = max(reverseAtomtypesDict.keys())+2
reverseAtomtypesDict[typeCG]='WCG'
for i in range(nWaterMols):
particlePID.append(i+1+nProtAtoms*2+nWaterAtoms)
indexO=atWaterIndices[3*i]-1
particleMasses.append(atMasses[indexO]+atMasses[indexO+1]+atMasses[indexO+2])
particleCharges.append(0.0)
particleTypes.append(typeCG)
particleTypestring.append('adres_cg___')
particleX.append(atX[indexO]) # put CG particle on O for the moment, later CG particle will be positioned in centre
particleY.append(atY[indexO])
particleZ.append(atZ[indexO])
particleVX.append(atVX[indexO]) # give CG particle velocity of O for the moment
particleVY.append(atVY[indexO])
particleVZ.append(atVZ[indexO])
print '# system total charge = ',sum(particleCharges[:nProtAtoms+nWaterAtoms])
########################################################################
# 2. setup of the system, random number geneartor and parallelisation #
########################################################################
# create the basic system
system = espressopp.System()
# use the random number generator that is included within the ESPResSo++ package
xs = time.time()
seed = int(xs % int(xs) * 10000000000)
print "RNG Seed:", seed
rng = espressopp.esutil.RNG()
rng.seed(seed)
system.rng = rng
# use orthorhombic periodic boundary conditions
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
# set the skin size used for verlet lists and cell sizes
system.skin = skin
# get the number of CPUs to use
NCPUs = espressopp.MPI.COMM_WORLD.size
# calculate a regular 3D grid according to the number of CPUs available
nodeGrid = espressopp.tools.decomp.nodeGrid(NCPUs,box,nbCutoff,skin)
# calculate a 3D subgrid to speed up verlet list builds and communication
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, nbCutoff, skin)
# create a domain decomposition particle storage with the calculated nodeGrid and cellGrid
system.storage = espressopp.storage.DomainDecompositionAdress(system, nodeGrid, cellGrid)
print "# NCPUs = ", NCPUs
print "# nodeGrid = ", nodeGrid
print "# cellGrid = ", cellGrid
########################################################################
# 4. adding the particles and build structure #
########################################################################
properties = ['id', 'type', 'pos', 'v', 'mass', 'q', 'adrat']
allParticles = []
tuples = []
#add particles in order CG1,AA11,AA12,AA13...CG2,AA21,AA22,AA23... etc.
mapAtToCgIndex = {}
#first adres particles
for i in range(nWaterMols):
cgindex = i + nProtAtoms*2 + nWaterAtoms
tmptuple = [particlePID[cgindex]]
# first CG particle
allParticles.append([particlePID[cgindex],
particleTypes[cgindex],
Real3D(particleX[cgindex],particleY[cgindex],particleZ[cgindex]),
Real3D(particleVX[cgindex],particleVY[cgindex],particleVZ[cgindex]),
particleMasses[cgindex],particleCharges[cgindex],0])
# then AA particles
for j in range(nWaterAtomsPerMol):
aaindex = i*nWaterAtomsPerMol + j + nProtAtoms
tmptuple.append(particlePID[aaindex])
allParticles.append([particlePID[aaindex],
particleTypes[aaindex],
Real3D(particleX[aaindex],particleY[aaindex],particleZ[aaindex]),
Real3D(particleVX[aaindex],particleVY[aaindex],particleVZ[aaindex]),
particleMasses[aaindex],particleCharges[aaindex],1])
mapAtToCgIndex[particlePID[aaindex]]=particlePID[cgindex]
tuples.append(tmptuple)
# then protein
for i in range(nProtAtoms):
allParticles.append([particlePID[i]+nProtAtoms+nWaterAtoms,particleTypes[i], #particlePID[i]+nParticlesTotal works bcs non-adres particles are listed first
Real3D(particleX[i],particleY[i],particleZ[i]),
Real3D(particleVX[i],particleVY[i],particleVZ[i]),
particleMasses[i],particleCharges[i],0])
allParticles.append([particlePID[i],particleTypes[i],
Real3D(particleX[i],particleY[i],particleZ[i]),
Real3D(particleVX[i],particleVY[i],particleVZ[i]),
particleMasses[i],particleCharges[i],1])
tuples.append([particlePID[i]+nProtAtoms+nWaterAtoms,particlePID[i]])
mapAtToCgIndex[particlePID[i]] = particlePID[i]+nProtAtoms+nWaterAtoms
system.storage.addParticles(allParticles, *properties)
# create FixedTupleList object
ftpl = espressopp.FixedTupleListAdress(system.storage)
ftpl.addTuples(tuples)
system.storage.setFixedTuplesAdress(ftpl)
system.storage.decompose()
########################################################################
# 3. setup of the integrator and simulation ensemble #
########################################################################
# use a velocity Verlet integration scheme
integrator = espressopp.integrator.VelocityVerlet(system)
# set the integration step
integrator.dt = dt
# use a thermostat if the temperature is set
if (temperature != None):
# create Langevin thermostat
thermostat = espressopp.integrator.LangevinThermostat(system)
# set Langevin friction constant
thermostat.gamma = 5.0 # units ps-1
print "# gamma for langevin thermostat = ",thermostat.gamma
# set temperature
thermostat.temperature = temperature
# switch on for adres
thermostat.adress = True
print "# thermostat temperature = ", temperature*temperatureConvFactor
# tell the integrator to use this thermostat
integrator.addExtension(thermostat)
else:
print "#No thermostat"
########################################################################
# 6. define atomistic and adres interactions
########################################################################
## adres interactions ##
print '# moving atomistic region composed of multiple spheres centered on each protein cg particle'
particlePIDsADR = [mapAtToCgIndex[pid] for pid in particlePIDsADR]
verletlist = espressopp.VerletListAdress(system, cutoff=nbCutoff, adrcut=nbCutoff,
dEx=ex_size, dHy=hy_size,
pids=particlePIDsADR, sphereAdr=True)
# set up LJ interaction according to the parameters read from the .top file
lj_adres_interaction=gromacs.setLennardJonesInteractions(system, defaults, atomtypeparameters, verletlist, intCutoff, adress=True, ftpl=ftpl)
# set up coulomb interactions according to the parameters read from the .top file
print '#Note: Reaction Field method is used for Coulomb interactions'
qq_adres_interaction=gromacs.setCoulombInteractions(system, verletlist, intCutoff, atTypes, epsilon1=1, epsilon2=67.5998, kappa=0, adress=True, ftpl=ftpl)
# set the CG potential for water. Set for LJ interaction, and QQ interaction has no CG equivalent, also prot has no CG potential, is always in adres region
# load CG interaction from table
fe="table_CGwat_CGwat.tab"
gromacs.convertTable("table_CGwat_CGwat.xvg", fe, 1, 1, 1, 1)
potCG = espressopp.interaction.Tabulated(itype=3, filename=fe, cutoff=intCutoff)
lj_adres_interaction.setPotentialCG(type1=typeCG, type2=typeCG, potential=potCG)
## bonded (fixed list) interactions for protein (actually between CG particles in AA region) ##
## set up LJ 1-4 interactions
cgOnefourpairslist=[]
for (a1,a2) in atOnefourpairslist:
cgOnefourpairslist.append((mapAtToCgIndex[a1],mapAtToCgIndex[a2]))
print '# ',len(cgOnefourpairslist),' 1-4 pairs in aa-hybrid region'
onefourlist = espressopp.FixedPairList(system.storage)
onefourlist.addBonds(cgOnefourpairslist)
lj14interaction=gromacs.setLennardJones14Interactions(system, defaults, atomtypeparameters, onefourlist, intCutoff)
# set up coulomb 1-4 interactions
qq14_interactions=gromacs.setCoulomb14Interactions(system, defaults, onefourlist, intCutoff, atTypes)
## set up bond interactions according to the parameters read from the .top file
# only for protein, not for water
cgBondtypes={}
for btkey in atBondtypes.keys():
newBondtypes=[]
for (a1,a2) in atBondtypes[btkey]:
if (a1 in atProtIndices) and (a2 in atProtIndices):
newBondtypes.append((mapAtToCgIndex[a1],mapAtToCgIndex[a2]))
cgBondtypes[btkey]=newBondtypes
bondedinteractions=gromacs.setBondedInteractions(system, cgBondtypes, bondtypeparams)
# set up angle interactions according to the parameters read from the .top file
# only for protein, not for water
cgAngletypes={}
for atkey in atAngletypes.keys():
newAngletypes=[]
for (a1,a2,a3) in atAngletypes[atkey]:
if (a1 in atProtIndices) and (a2 in atProtIndices) and (a3 in atProtIndices):
newAngletypes.append((mapAtToCgIndex[a1],mapAtToCgIndex[a2],mapAtToCgIndex[a3]))
cgAngletypes[atkey]=newAngletypes
angleinteractions=gromacs.setAngleInteractions(system, cgAngletypes, angletypeparams)
# set up dihedral interactions according to the parameters read from the .top file
cgDihedraltypes={}
for atkey in atDihedraltypes.keys():
newDihedraltypes=[]
for (a1,a2,a3,a4) in atDihedraltypes[atkey]:
newDihedraltypes.append((mapAtToCgIndex[a1],mapAtToCgIndex[a2],mapAtToCgIndex[a3],mapAtToCgIndex[a4]))
cgDihedraltypes[atkey]=newDihedraltypes
dihedralinteractions=gromacs.setDihedralInteractions(system, cgDihedraltypes, dihedraltypeparams)
# set up improper interactions according to the parameters read from the .top file
cgImpropertypes={}
for atkey in atImpropertypes.keys():
newImpropertypes=[]
for (a1,a2,a3,a4) in atImpropertypes[atkey]:
newImpropertypes.append((mapAtToCgIndex[a1],mapAtToCgIndex[a2],mapAtToCgIndex[a3],mapAtToCgIndex[a4]))
cgImpropertypes[atkey]=newImpropertypes
improperinteractions=gromacs.setImproperInteractions(system, cgImpropertypes, impropertypeparams)
cgExclusions = [] #previously existing atExclusions list was for atomistic protein, don't use it
#in espressopppp, exclusions are handled at the CG particle level
for pair in atExclusions:
vp1 = mapAtToCgIndex[pair[0]]
vp2 = mapAtToCgIndex[pair[1]]
if vp1 == vp2: continue #all at interactions within one cg particle are excluded anyway
cgExclusions.append((vp1,vp2))
verletlist.exclude(cgExclusions)
print '# ',len(cgExclusions),' exclusions'
count = system.getNumberOfInteractions()
print '# ',count,' interactions defined'
# SETTLE water for rigid water
print '#Warning: settle set-up assumes water was listed first when tuples were constructed'
molidlist=[]
for wm in range(nWaterMols): #assuming water==adres part, and water is listed first
molidlist.append(tuples[wm][0])
settlewaters = espressopp.integrator.Settle(system, ftpl, mO=15.9994, mH=1.008, distHH=0.1633, distOH=0.1)
settlewaters.addMolecules(molidlist)
integrator.addExtension(settlewaters)
print '# Settling ',len(molidlist), ' waters'
# calculate number of degrees of freedom, for temperature calculation
# note that this will only work in a fully atomistic system
# espressopp doesn't calculate the number of dof correctly in force-based Adress
nconstr = nWaterAtoms
nAtoms = nWaterAtoms + nProtAtoms
ndof_unconstr = nAtoms*3-3
ndof_constr = ndof_unconstr-nconstr
dofTemperatureCorrFactor = float(ndof_unconstr)/float(ndof_constr)
print "# Correcting temperature for constraints, using factor: ",dofTemperatureCorrFactor
print "# calculated using nAtoms = ",nAtoms, "nconstraints = ",nconstr," and ndof_constr = ",ndof_constr
# add AdResS
adress = espressopp.integrator.Adress(system,verletlist,ftpl)
integrator.addExtension(adress)
# add thermodynamic force
print "# Adding Extension: external thermodynamic force using TDforce module..."
tabTF="tabletf-1-1.xvg"
thdforce = espressopp.integrator.TDforce(system,verletlist,startdist = 0.9, enddist = 2.1, edgeweightmultiplier = 20)
thdforce.addForce(itype=3,filename=tabTF,type=typeCG)
integrator.addExtension(thdforce)
# distribute atoms and CG molecules according to AdResS domain decomposition, place CG molecules in the center of mass
print '# Decomposing...'
espressopp.tools.AdressDecomp(system, integrator)
########################################################################
# 7. run #
########################################################################
temperature = espressopp.analysis.Temperature(system)
print "# starting run..."
#try:
# os.remove(trjfile)
#except OSError:
# pass
dump_conf_gro = espressopp.io.DumpGROAdress(system, ftpl, integrator, filename=trjfile,unfolded=True)
start_time = time.clock()
print 'Start time: ', str(datetime.now())
print "i*dt,Eb, EAng, Edih, EImp, ELj, Elj14, EQQ, EQQ14, Etotal, T"
fmt='%5.5f %15.8g %15.8g %15.8g %15.8g %15.8g %15.8g %15.8g %15.8g %15.8f %15.8f\n'
integrator.run(0)
for k in range(nOutput):
i=k*nStepsPerOutput
EQQ=0.0
EQQ14=0.0
ELj=0.0
ELj14=0.0
Eb = 0.0
EAng = 0.0
EDih = 0.0
EImp = 0.0
for bd in bondedinteractions.values(): Eb+=bd.computeEnergy()
for ang in angleinteractions.values(): EAng+=ang.computeEnergy()
for dih in dihedralinteractions.values(): EDih+=dih.computeEnergy()
for imp in improperinteractions.values(): EImp+=imp.computeEnergy()
ELj= lj_adres_interaction.computeEnergy()
ELj14 = lj14interaction.computeEnergy()
EQQ = qq_adres_interaction.computeEnergy()
EQQ14 = qq14_interactions.computeEnergy()
T = temperature.compute()
Etotal = Eb+EAng+EDih+EImp+EQQ+EQQ14+ELj+ELj14
print (fmt%(i*dt,Eb, EAng, EDih, EImp, ELj, ELj14, EQQ, EQQ14, Etotal, T*temperatureConvFactor*dofTemperatureCorrFactor))
sys.stdout.flush()
integrator.run(nStepsPerOutput)
particle = system.storage.getParticle(1)
if math.isnan(particle.pos[0]):
quit()
dump_conf_gro.dump()
end_time = time.clock()
|
kkreis/espressopp
|
examples/adress/fadress_selfadjusting/peptide-adres-selfadjusting.py
|
Python
|
gpl-3.0
| 21,548
|
[
"ESPResSo",
"Gromacs"
] |
b19fd9c4e9ca8fef93a95bf15c41f07e3ddc04ee40b02b23cbacf6c64b486ffb
|
import unittest
import os
from core.data import DataReader
class DataReaderTest(unittest.TestCase):
def setUp(self):
self.reader = DataReader()
def tearDown(self):
del self.reader
def testDataReader(self):
path = os.path.dirname(os.path.abspath(__file__))
fileName = path + "/data/hi-3.mhd"
imageData = self.reader.GetImageData(fileName)
self.assertIsNotNone(imageData)
dimensions = imageData.GetDimensions()
self.assertEquals(dimensions, (21, 15, 9))
def testUnsupportedDataTypes(self):
self.assertRaises(Exception, self.reader.GetImageData, "data/hi-3.mrb")
def testSupportedDataTypes(self):
self.assertTrue(self.reader.IsExtensionSupported("mhd"))
self.assertTrue(self.reader.IsExtensionSupported("vti"))
self.assertTrue(self.reader.IsExtensionSupported("dcm"))
self.assertFalse(self.reader.IsExtensionSupported("mrb"))
self.assertFalse(self.reader.IsExtensionSupported("vtk"))
self.assertFalse(self.reader.IsExtensionSupported("raw"))
self.assertFalse(self.reader.IsExtensionSupported("dat"))
# def testDatFileFormat(self):
# path = os.path.dirname(os.path.abspath(__file__))
# fileName = path + "/data/present492x492x442.dat"
# imageData = self.reader.GetImageData(fileName)
# dimensions = imageData.GetDimensions()
# self.assertEquals(dimensions, (492, 492, 442))
def testVTIFileFormat(self):
path = os.path.dirname(os.path.abspath(__file__))
fileName = path + "/data/modelSegmentation.vti"
imageData = self.reader.GetImageData(fileName)
dimensions = imageData.GetDimensions()
self.assertEquals(dimensions, (376, 245, 206))
def testEmptyDirectory(self):
path = os.path.dirname(os.path.abspath(__file__))
fileName = path + "/data"
imageData = self.reader.GetImageData(fileName)
self.assertIsNone(imageData)
def testDICOMFileFormat(self):
path = os.path.dirname(os.path.abspath(__file__))
fileName = path + "/data/DICOM"
imageData = self.reader.GetImageData(fileName)
self.assertIsNotNone(imageData)
dimensions = imageData.GetDimensions()
self.assertEquals(dimensions, (320, 384, 11))
|
berendkleinhaneveld/Registrationshop
|
tests/test_DataReader.py
|
Python
|
mit
| 2,084
|
[
"VTK"
] |
5333f28d237428e141f0f444f768831e26c205cab2759d4d4341134892980ecc
|
from pylab import *
from plotly.tools import FigureFactory as FF
import plotly.graph_objs as go
from scipy.spatial.distance import pdist, squareform, cdist
from .riemannian_manifold import RManifold
from ..data_attachment.measures import Measures, Measure
class Landmarks(RManifold) :
"""
Encodes a Landmarks manifold :
self = {(x_1,...,x_n) in R^d, x_i != x_j} ~ R^(nd)
endowed with an appropriate (kernel) metric.
"""
def __init__(self, npoints = 1, dimension = 2, kernel = ('gaussian', 1), dt=0.1) :
"""
Creates a Landmarks manifold.
"""
RManifold.__init__(self, npoints * dimension, g=None, dt=dt)
self.npoints = npoints
self.dimension = dimension
assert(kernel[0] == 'gaussian'), 'The gaussian kernel is the only one that is implemented yet.'
if kernel[0] == 'gaussian' :
self.kernel_scale = kernel[1]
# These three functions will typically account for 90% of the overall computation time
#self.kernel = lambda x : exp(- x / (2* self.kernel_scale ** 2)) # kernel is given |x|^2 as input
#self.kernelp = lambda x : - exp(- x / (2* self.kernel_scale ** 2)) / (2* self.kernel_scale ** 2)
#self.kernelpp = lambda x : + exp(- x / (2* self.kernel_scale ** 2)) / (4* self.kernel_scale ** 4)
def precompute_kernels(self, q) :
"""
Returns a tuple of kernel, kernel', kernel'' matrices at position q.
"""
x = q.reshape((self.npoints, self.dimension))
dists = squareform(pdist(x, 'sqeuclidean'))
K = exp(- dists / (2* self.kernel_scale ** 2))
return ( K,
- K / (2* self.kernel_scale ** 2),
K / (4* self.kernel_scale ** 4))
def K(self,q,p, kernels) :
"""
Kernel representation of a cotangent momentum p at position q
in the tangent space.
"""
m = p.reshape((self.npoints, self.dimension))
K = kernels[0] # K_ij = k(|x_i-x_j|^2)
# K = kron(K, eye(self.dimension)) # hugely inefficient, but whatever...
# return p @ K
Kq_p = zeros((self.npoints, self.dimension))
for d in range(self.dimension) :
Kq_p[:,d] = m[:,d] @ K # v_nd = (Kq_p)_nd = sum_i k(|x_i-x_j|^2) p_i^d
return Kq_p.ravel()
def L2_repr_p(self,q,p, kernels) :
"""
Mapping from the cotangent plane endowed with Kernel metric
to R^2 endowed with the standard dot product.
K(r, theta)^.5 = ...
"""
raise(NotImplementedError)
def upP(self,q,p, kernels) :
"""
Returns an update step of the momentum p in the geodesic equations.
-.5*d_q (p, K_q p) = ...
"""
x = q.reshape((self.npoints, self.dimension))
p = p.reshape((self.npoints, self.dimension))
K = kernels[1] # K_ij = k'(|x_i-x_j|^2)
L2prods = p @ p.T # L2prods_ij = (p_i . p_j) : isotropic kernels
pKqp_p = K * L2prods # pKqp_p_ij = (p_i . p_j) * k'(|x_i-x_j|^2)
grad = zeros((self.npoints, self.dimension))
for d in range(self.dimension) :
diffs = atleast_2d(x[:,d]).T - x[:,d] # diffs_ij = x_i^d - x_j^d
# grad_nd = 2*sum_i (x_i^d - x_n^d) * (p_i . p_n) * k'(|x_i-x_n|^2)
# = -.5 * ( sum_j 2*(x_n^d - x_j^d) * (p_n . p_j) * k'(|x_n-x_j|^2)
# - sum_i 2*(x_i^d - x_n^d) * (p_i . p_n) * k'(|x_i-x_n|^2) )
grad[:,d] = 2*sum( diffs * pKqp_p, 0)
return grad.reshape((self.npoints * self.dimension,))
def gradq_pKqz(self, p, q, z, kernels) :
"""
Useful for the adjoint integration scheme.
d_q (p, K_q z) = ...
"""
x = q.reshape((self.npoints, self.dimension))
p = p.reshape((self.npoints, self.dimension))
z = z.reshape((self.npoints, self.dimension))
K = kernels[1] # K_ij = k'(|x_i-x_j|^2)
L2prods = p @ z.T # L2prods_ij = (p_i . z_j) : isotropic kernels
pKqp_z = K * L2prods # pKqp_p_ij = (p_i . z_j) * k'(|x_i-x_j|^2)
grad = zeros((self.npoints, self.dimension))
for d in range(self.dimension) :
diffs = atleast_2d(x[:,d]).T - x[:,d] # diffs_ij = x_i^d - x_j^d
# grad_nd = sum_i 2*(x_i^d - x_n^d) * (p_i . z_n) * k'(|x_i-x_n|^2)
# + sum_j 2*(x_n^d - x_j^d) * (p_n . z_j) * k'(|x_n-x_j|^2)
grad[:,d] = - sum( 2*diffs * pKqp_z, 0) + sum( 2*diffs * pKqp_z, 1)
return grad.reshape((self.npoints * self.dimension,))
def dq_gradq_pKqp_a(self, q, p, a, kernels) :
"""
Useful for the adjoint integration scheme :
d_q [ d_q (p, K_q p) ] . a = ...
"""
h = 1e-8
Q0phA = q + h*a
Q0mhA = q - h*a
update_emp = ( Landmarks.gradq_pKqz(self, p, Q0phA, p, Landmarks.precompute_kernels(self, Q0phA))
- Landmarks.gradq_pKqz(self, p, Q0mhA, p, Landmarks.precompute_kernels(self, Q0mhA))) / (2*h)
return update_emp
"""
x = q.reshape((self.npoints, self.dimension))
p = p.reshape((self.npoints, self.dimension))
a = a.reshape((self.npoints, self.dimension))
L2prods = p @ p.T # L2prods_ij = (p_i . p_j) : isotropic kernels
grad = zeros((self.npoints, self.dimension))
for d in range(self.dimension) :
diffs = atleast_2d(x[:,d]).T - x[:,d] # diffs_ij = x_i^d - x_j^d
# K_ij = 2*[ k'(|x_i-x_j|^2) + 2* (x_i^d - x_j^d)^2 * k''(|x_i-x_j|^2) ]
K = 2*( kernels[1] \
+ 2 * kernels[2] * (diffs**2)) # The two '2' come from the fact that d(x-y)^2 / dx = 2 * (x-y)
# We have :
# [ d_q (p, K_q p) ]_nd = 2* sum_j (p_n . p_j) * 2*(x_n^d - x_j^d) * k'(|x_n-x_j|^2)
# = 2* sum_j (p_n . p_j) * f(x_n^d, x_j)
# --> the first factor '2' because we are actually
# doing a summation over i + a summation over j,
# which can be identified by symmetry.
# with :
# f(x_n^d, x_j) = 2* (x_n^d - x_j^d) * k'( |x_n - x_j|^2)
# df/d(x_n^d) = 2* [ k'( |x_n - x_j|^2) + 2 * (x_n^d - x_j^d)^2 * k''( |x_n - x_j|^2) ]
# If we note F(q,p) = [ d_q (p, K_q p) ], we have :
# d_q [ d_q (p, K_q p) ] . a ~= (F(q + dt.a, p) - F(q,p)) / dt
# (Gateau derivative in the direction "a" over the variable "q")
#
#
# So that :
# grad_nd = a_nd * 2 * sum_j (p_n . p_j) * f'(x_n^d, x_j)
# grad_nd = 2 * a_nd
# * sum_i [ (p_i . p_j) * 2* (k'(|x_i-x_j|^2)
# + 2* (x_i^d - x_j^d)^2 * k''(|x_i-x_j|^2) ) ]
grad[:,d] = a[:,d] * 2 * sum( K * L2prods , 0 )
# The factor '2' comes from the fact that we identify the summation over i with the summation over j
return grad.reshape((self.npoints * self.dimension,))
"""
def dq_Kqp_a(self,q,p,a, kernels) :
"""
Useful for the adjoint integration scheme.
d_q (K_q p) . a = ...
"""
h = 1e-8
Q0phA = q + h*a
Q0mhA = q - h*a
update_emp = ( Landmarks.K(self, Q0phA, p, Landmarks.precompute_kernels(self, Q0phA))
- Landmarks.K(self, Q0mhA, p, Landmarks.precompute_kernels(self, Q0mhA))) / (2*h)
return update_emp
"""x = q.reshape((self.npoints, self.dimension))
p = p.reshape((self.npoints, self.dimension))
a = a.reshape((self.npoints, self.dimension))
dists = squareform(pdist(x, 'sqeuclidean')) # dists_ij = |x_i-x_j|^2
# We have :
# [K_q p]_nd = sum_j { k(|x_n - x_j|^2) * p_j^d }
#
# So that :
# grad_nd = a_nd * sum_j { 2 * (x_n^d - x_j^d) * k'(|x_n - x_j|^2) * p_j^d }
grad = zeros((self.npoints, self.dimension))
for d in range(self.dimension) :
diffs = atleast_2d(x[:,d]).T - x[:,d] # diffs_ij = x_i^d - x_j^d
# K_ij = 2 * (x_i^d - x_j^d) * k'(|x_i - x_j|^2) * p_j^d
K = 2 * dists * kernels[1] * p[:,d]
# grad_nd = a_nd * sum_j { 2 * (x_n^d - x_j^d) * k'(|x_n - x_j|^2) * p_j^d }
grad[:,d] = a[:,d] * sum( K , 1 )
return grad.reshape((self.npoints * self.dimension,))"""
""" Distances """
def squared_distance(self, Q, Xt, *args) :
"""Returns 1/2 * |I(Q) - Xt|^2 and its Q-gradient."""
return (.5*sum( (Q-Xt)**2) , Q - Xt)
def distance(self, Q, Xt, *args) :
"""Returns |I(Q) - Xt| and its Q-gradient."""
raise(NotImplementedError)
def kernel_matchings(self, start_scale, end_scale) :
def curryfied (Q,Xt,progress) :
return self.kernel_matching(Q, Xt, start_scale + (end_scale - start_scale) * progress ) # Coarse to fine scheme
return curryfied
def kernel_matching(self, Q, Xt, s = 0.3) :
"""
Implementation of the kernel data attachment term :
d(Q, Xt) = .5 * sum_{i,j} k( | Q_i - Q_j | ) / nobs^2
- .5 * 2*sum_{i,j} k( | Q_i - Xt_j | ) / nobs^2
+ .5 * sum_{i,j} k( | Xt_i - Xt_j | ) / nobs^2
where k( d ) = exp( - d^2/(2*s^2) ) is a gaussian kernel
with std = s.
See the Phd thesis of Joan Glaunes, Chapter 4, for reference (2005).
This is the most rudimentary tool for the matching of unlabelled data :
Landmarks are simply seen as sums of dirac measures,
with *same weight* and *total mass 1*.
More sophisticated attachment terms such as 'varifold', 'currents'
or 'optimal transport'/'gromov-wasserstein' are implemented by
the Curves2D class.
"""
(C, dMu) = Measures.kernel_matching( Measure( Q.reshape((self.npoints, self.dimension))),
Measure(Xt.reshape((self.npoints, self.dimension))),
s )
return (C, dMu.points ) # throw away the information about the weights variations
def sinkhorn_matchings(self, sinkhorn_options = None) :
def curryfied (Q,Xt,progress) :
return self.sinkhorn_matching(Q, Xt, sinkhorn_options )
return curryfied
def sinkhorn_matching(self, Q, Xt, sinkhorn_options) :
(C, dMu) = Measures.sinkhorn_matching( Measure( Q.reshape((self.npoints, self.dimension))),
Measure(Xt.reshape((self.npoints, self.dimension))),
sinkhorn_options )
return (C, dMu.points ) # throw away the information about the weights variations
def I(self, q) :
return vstack(q)
def show(self, mode='', ax = None) :
"Manifold display."
self.layout = go.Layout(
title='',
width=800,
height=800,
legend = dict( x = .8, y = 1),
xaxis = dict(range = [-3,3]),
yaxis = dict(range = [-3,3])
)
def plot_traj(self, qt, **kwargs) :
"Trajectory display. qt can be an array of coordinates, or a list of such arrays."
if type(qt) is not list :
qt = [qt]
points = array([]).reshape((0,self.dimension)) # we should pre-allocate...
separator = array([None]* self.dimension).reshape((1,self.dimension))
for traj in qt :
traj = atleast_2d(traj)
ntimes = traj.shape[0]
for landmark in range(self.npoints) :
traj_landmark = traj[:, landmark*(self.dimension) : landmark*(self.dimension) + self.dimension]
points = vstack((points, traj_landmark, separator))
points = go.Scatter(x = array(points[:,0]), y = array(points[:,1]), mode = 'markers+lines', hoverinfo='name', **kwargs)
self.current_axis.append(points)
def quiver(self, qt, vt, **kwargs) :
"Vector field display"
self.marker(qt, **kwargs)
def marker(self, q, **kwargs) :
"""Marker field display"""
q = atleast_2d(q)
list_points = []
separator = array([None]* self.dimension)
for l in range(q.shape[0]) :
list_points.append(q[l].reshape((self.npoints, self.dimension)))
list_points.append( separator )
points = vstack(list_points)
points = go.Scatter(x = array(points[:,0]), y = array(points[:,1]), mode = 'markers', hoverinfo='name', **kwargs)
self.current_axis.append(points)
|
jeanfeydy/lddmm-ot
|
LDDMM_Python/lddmm_python/modules/manifolds/landmarks.py
|
Python
|
mit
| 11,202
|
[
"DIRAC",
"Gaussian"
] |
3417d3981e4383ee8cb554396d75bbc05d2dde4378190ce3ab9d8095d28034ed
|
"""
dip CLI tool main entrypoint
"""
import json
import subprocess
import click
import docker
from dip import __version__
from dip import colors
from dip import errors
from dip import options
from dip import settings
from dip import utils
def clickerr(func):
""" Decorator to catch errors and re-raise as ClickException. """
# pylint: disable=missing-docstring
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except errors.DipError as err:
raise click.ClickException(str(err))
wrapper.__doc__ = func.__doc__
return wrapper
def warnsleep(app):
""" Warn about app divergence and sleep. """
# Warn about divergence
warn = '\n'\
'Local service has diverged from remote or is inaccessible.\n'\
'Sleeping for {}s\n'\
'CTRL-C to exit\n'.format(app.repo.sleeptime)
click.echo(colors.amber(warn), err=True)
# Give hint to upgrade
upgrade = 'dip upgrade {}'.format(app.name)
hint = 'Run `{}` to git-pull updates from remote\n'\
.format(colors.teal(upgrade))
click.echo(hint, err=True)
# Sleep
app.repo.sleep()
def warnask(app):
""" Warn about app divergence and ask to upgrade. """
# Warn about divergence
warn = '\nLocal service has diverged from remote or is inaccessible.'
click.echo(colors.amber(warn), err=True)
# Ask to upgrade
upgrade = colors.teal('Attempt to upgrade before continuing?')
if click.confirm(upgrade):
# Upgrade
app.repo.pull()
click.echo(err=True)
else:
override = colors.teal('Continue without upgrading?')
if not click.confirm(override):
goodbye = 'Please resolve these changes before re-attempting.\n'
click.echo(goodbye, err=True)
raise SystemExit(1)
def warnupgrade(app):
""" Warn about app divergence and do upgrade. """
# Warn about divergence
warn = '\nLocal service has diverged from remote or is inaccessible.'
click.echo(colors.amber(warn), err=True)
# Ask to upgrade
click.echo(colors.teal('Attempting to auto-upgrade'), err=True)
app.repo.pull()
@click.group(context_settings={'help_option_names': ['-h', '--help']})
@click.version_option(__version__, '-v', '--version')
def dip():
""" Install CLIs using docker-compose.
The following ENV variables are supported by `dip`:
\b
:DIP_HOME: The location of the dip settings.json file
:DIP_PATH: The default location of installed executables
See https://github.com/amancevice/dip for more information.
"""
@dip.command('completion')
def dip_completion():
""" Print bash completion script. """
pipe = subprocess.Popen('_DIP_COMPLETE=source dip',
stdout=subprocess.PIPE,
shell=True)
for line in pipe.communicate():
# pylint: disable=superfluous-parens
print(line.decode('utf-8').strip())
return
@dip.command('config')
@options.EDIT
@options.KEYS
@clickerr
def dip_config(edit, keys):
""" Show current dip configuration.
\b
dip config NAME # Get NAME config dict
dip config NAME git remote # Get name of remote
"""
with settings.load() as cfg:
if edit:
try:
subprocess.call([utils.editor(), cfg.filepath])
except KeyError:
raise click.ClickException('EDITOR value not defined in ENV')
else:
working = cfg.data
for key in keys:
try:
working = working[key]
except (KeyError, TypeError):
raise SystemExit(1)
if isinstance(working, dict):
click.echo(json.dumps(working, indent=4, sort_keys=True))
else:
click.echo(working)
@dip.command('diff')
@options.NAME
@options.QUIET
def dip_diff(name, quiet):
""" Run diff against remote. """
with settings.diffapp(name, quiet=quiet) as app_diff:
_, diff = app_diff
if diff:
raise SystemExit(1)
@dip.command('install')
@options.NAME
@options.HOME
@options.PATH
@options.REMOTE
@options.DOTENV
@options.ENV
@options.SECRET
@options.SLEEP
@options.AUTO_UPGRADE
@options.NO_EXE
@clickerr
def dip_install(name, home, path, remote, dotenv, env, secret, sleep,
auto_upgrade, no_exe):
""" Install CLI by name.
\b
dip install fizz . # Relative path
dip install fizz /path/to/dir # Absolute path
dip install fizz . -r origin/master # Tracking git remote/branch
"""
# pylint: disable=too-many-arguments
with settings.saveonexit() as cfg:
# Interactively set ENV
for sec in secret:
env[sec] = click.prompt(sec, hide_input=True) # pragma: no cover
# Parse git config
remote, branch = remote
git = {'remote': remote,
'branch': branch,
'sleep': sleep,
'auto_upgrade': auto_upgrade}
# Install
if no_exe:
app = cfg[name] = settings.Dip(name, home, path, env, git, dotenv)
else:
app = cfg.install(name, home, path, env, git, dotenv)
# Validate configuration
app.validate()
# Finish
click.echo("Installed {name} to {path}".format(
name=colors.teal(app.name),
path=colors.blue(app.path)))
@dip.command('list')
@clickerr
def dip_list():
""" List installed CLIs. """
with settings.load() as cfg:
if any(cfg):
click.echo()
homes = [utils.contractuser(cfg[x].home) for x in cfg]
maxname = max(len(x) for x in cfg)
maxhome = max(len(x) for x in homes)
for key in sorted(cfg):
app = cfg[key]
name = colors.teal(app.name.ljust(maxname))
home = colors.blue(utils.contractuser(app.home).ljust(maxhome))
remote = branch = None
tpl = "{name} {home}"
if app.repo:
try:
remote = app.repo.remotename
branch = app.repo.branch
tpl += " {remote}/{branch}"
except Exception: # pylint: disable=broad-except
tpl += colors.red(' [git error]')
click.echo(tpl.format(name=name,
home=home,
remote=remote,
branch=branch))
click.echo()
@dip.command('pull')
@options.NAME
@clickerr
def dip_pull(name):
""" Pull updates from docker-compose. """
with settings.diffapp(name) as app_diff:
app, diff = app_diff
if diff and app.git.get('sleep'):
warnsleep(app)
elif diff:
warnask(app)
try:
return app.service.pull()
except docker.errors.APIError:
err = "Could not pull '{}' image".format(name)
click.echo(colors.red(err), err=True)
raise SystemExit(1)
@dip.command('reset')
@options.FORCE
@clickerr
def dip_reset(force):
""" Reset dip configuration to defaults. """
if force:
settings.reset()
@dip.command('run')
@options.NAME
@options.QUICK
@options.ARGS
@clickerr
def dip_run(name, quick, args):
""" Run dip CLI. """
if quick:
with settings.getapp(name) as app:
app.run(*args)
else:
with settings.diffapp(name) as app_diff:
app, diff = app_diff
if diff and app.sleep:
warnsleep(app)
elif diff and app.auto_upgrade:
warnupgrade(app)
elif diff:
warnask(app)
app.run(*args)
@dip.command('show')
@options.NAME
@clickerr
def dip_show(name):
""" Show service configuration. """
with settings.diffapp(name) as app_diff:
app, diff = app_diff
if diff and app.git.get('sleep'):
warnsleep(app)
elif diff:
warnask(app)
for definition in app.definitions:
click.echo("\n{}\n".format(definition.strip()))
@dip.command('uninstall')
@options.NAMES
@clickerr
def dip_uninstall(names):
""" Uninstall CLI by name. """
for name in names:
with settings.saveonexit() as cfg:
try:
cfg.uninstall(name)
click.echo("Uninstalled {name}".format(name=colors.red(name)))
except KeyError:
pass
@dip.command('upgrade')
@options.NAMES
@clickerr
def dip_upgrade(names):
""" Upgrade CLI by pulling from git remote. """
for name in names:
with settings.getapp(name) as app:
try:
app.repo.pull()
except AttributeError:
pass
if __name__ == '__main__':
dip() # pragma: no cover
|
amancevice/dip
|
dip/main.py
|
Python
|
mit
| 9,032
|
[
"Amber"
] |
5412cc5655c95f3c3327724591c1b3d5335e0dd0f524386f71ce20d12bb63059
|
###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
##############################################################################
# Built-in
import gc
import logging
# Third-party
import numpy
import vigra
import psutil
# Lazyflow
from lazyflow.graph import Operator, InputSlot, OutputSlot
from lazyflow.roi import enlargeRoiForHalo, TinyVector
# ilastik
from lazyflow.utility.timer import Timer
logger = logging.getLogger(__name__)
def getMemoryUsageMb():
"""
Get the current memory usage for the whole system (not just python).
"""
# Collect garbage first
gc.collect()
vmem = psutil.virtual_memory()
mem_usage_mb = (vmem.total - vmem.available) / 1e6
return mem_usage_mb
class OpAnisotropicGaussianSmoothing5d(Operator):
# raw volume, in 5d 'txyzc' order
Input = InputSlot()
Sigmas = InputSlot(value={'x': 1.0, 'y': 1.0, 'z': 1.0})
Output = OutputSlot()
def setupOutputs(self):
self.Output.meta.assignFrom(self.Input.meta)
self.Output.meta.dtype = numpy.float32 # vigra gaussian only supports float32
self._sigmas = self.Sigmas.value
assert isinstance(self.Sigmas.value, dict), "Sigmas slot expects a dict"
assert set(self._sigmas.keys()) == set('xyz'), "Sigmas slot expects three key-value pairs for x,y,z"
def execute(self, slot, subindex, roi, result):
assert all(roi.stop <= self.Input.meta.shape),\
"Requested roi {} is too large for this input image of shape {}.".format(roi, self.Input.meta.shape)
# Determine how much input data we'll need, and where the result will be
# relative to that input roi
# inputRoi is a 5d roi, computeRoi depends on the number of singletons
# in shape, but is at most 3d
inputRoi, computeRoi = self._getInputComputeRois(roi)
# Obtain the input data
with Timer() as resultTimer:
data = self.Input(*inputRoi).wait()
logger.debug("Obtaining input data took {} seconds for roi {}".format(
resultTimer.seconds(), inputRoi))
data = vigra.taggedView(data, axistags='txyzc')
# input is in txyzc order
tIndex = 0
cIndex = 4
# Must be float32
if data.dtype != numpy.float32:
data = data.astype(numpy.float32)
# we need to remove a singleton z axis, otherwise we get
# 'kernel longer than line' errors
ts = self.Input.meta.getTaggedShape()
tags = [k for k in 'xyz' if ts[k] > 1]
sigma = [self._sigmas[k] for k in tags]
# Check if we need to smooth
if any([x < 0.1 for x in sigma]):
# just pipe the input through
result[...] = data
return
for i, t in enumerate(xrange(roi.start[tIndex], roi.stop[tIndex])):
for j, c in enumerate(xrange(roi.start[cIndex], roi.stop[cIndex])):
# prepare the result as an argument
resview = vigra.taggedView(result[i, ..., j],
axistags='xyz')
dataview = data[i, ..., j]
# TODO make this general, not just for z axis
resview = resview.withAxes(*tags)
dataview = dataview.withAxes(*tags)
# Smooth the input data
vigra.filters.gaussianSmoothing(
dataview, sigma, window_size=2.0,
roi=computeRoi, out=resview)
def _getInputComputeRois(self, roi):
shape = self.Input.meta.shape
start = numpy.asarray(roi.start)
stop = numpy.asarray(roi.stop)
n = len(stop)
spatStart = [roi.start[i] for i in range(n) if shape[i] > 1]
spatStop = [roi.stop[i] for i in range(n) if shape[i] > 1]
sigma = [0] + map(self._sigmas.get, 'xyz') + [0]
spatialRoi = (spatStart, spatStop)
inputSpatialRoi = enlargeRoiForHalo(roi.start, roi.stop, shape,
sigma, window=2.0)
# Determine the roi within the input data we're going to request
inputRoiOffset = roi.start - inputSpatialRoi[0]
computeRoi = [inputRoiOffset, inputRoiOffset + stop - start]
for i in (0, 1):
computeRoi[i] = [computeRoi[i][j] for j in range(n)
if shape[j] > 1 and j not in (0, 4)]
# make sure that vigra understands our integer types
computeRoi = (tuple(map(int, computeRoi[0])),
tuple(map(int, computeRoi[1])))
inputRoi = (list(inputSpatialRoi[0]), list(inputSpatialRoi[1]))
return inputRoi, computeRoi
def propagateDirty(self, slot, subindex, roi):
if slot == self.Input:
# Halo calculation is bidirectional, so we can re-use the function
# that computes the halo during execute()
inputRoi, _ = self._getInputComputeRois(roi)
self.Output.setDirty(inputRoi[0], inputRoi[1])
elif slot == self.Sigmas:
self.Output.setDirty(slice(None))
else:
assert False, "Unknown input slot: {}".format(slot.name)
class OpAnisotropicGaussianSmoothing(Operator):
Input = InputSlot()
Sigmas = InputSlot( value={'x':1.0, 'y':1.0, 'z':1.0} )
Output = OutputSlot()
def setupOutputs(self):
self.Output.meta.assignFrom(self.Input.meta)
#if there is a time of dim 1, output won't have that
timeIndex = self.Output.meta.axistags.index('t')
if timeIndex<len(self.Output.meta.shape):
newshape = list(self.Output.meta.shape)
newshape.pop(timeIndex)
self.Output.meta.shape = tuple(newshape)
del self.Output.meta.axistags[timeIndex]
self.Output.meta.dtype = numpy.float32 # vigra gaussian only supports float32
self._sigmas = self.Sigmas.value
assert isinstance(self.Sigmas.value, dict), "Sigmas slot expects a dict"
assert set(self._sigmas.keys()) == set('xyz'), "Sigmas slot expects three key-value pairs for x,y,z"
print("Assigning output: {} ====> {}".format(self.Input.meta.getTaggedShape(), self.Output.meta.getTaggedShape()))
#self.Output.setDirty( slice(None) )
def execute(self, slot, subindex, roi, result):
assert all(roi.stop <= self.Input.meta.shape), "Requested roi {} is too large for this input image of shape {}.".format( roi, self.Input.meta.shape )
# Determine how much input data we'll need, and where the result will be relative to that input roi
inputRoi, computeRoi = self._getInputComputeRois(roi)
# Obtain the input data
with Timer() as resultTimer:
data = self.Input( *inputRoi ).wait()
logger.debug("Obtaining input data took {} seconds for roi {}".format( resultTimer.seconds(), inputRoi ))
xIndex = self.Input.meta.axistags.index('x')
yIndex = self.Input.meta.axistags.index('y')
zIndex = self.Input.meta.axistags.index('z') if self.Input.meta.axistags.index('z')<len(self.Input.meta.shape) else None
cIndex = self.Input.meta.axistags.index('c') if self.Input.meta.axistags.index('c')<len(self.Input.meta.shape) else None
# Must be float32
if data.dtype != numpy.float32:
data = data.astype(numpy.float32)
axiskeys = self.Input.meta.getAxisKeys()
spatialkeys = filter( lambda k: k in 'xyz', axiskeys )
# we need to remove a singleton z axis, otherwise we get
# 'kernel longer than line' errors
reskey = [slice(None, None, None)]*len(self.Input.meta.shape)
reskey[cIndex]=0
if zIndex and self.Input.meta.shape[zIndex]==1:
removedZ = True
data = data.reshape((data.shape[xIndex], data.shape[yIndex]))
reskey[zIndex]=0
spatialkeys = filter( lambda k: k in 'xy', axiskeys )
else:
removedZ = False
sigma = map(self._sigmas.get, spatialkeys)
#Check if we need to smooth
if any([x < 0.1 for x in sigma]):
if removedZ:
resultXY = vigra.taggedView(result, axistags="".join(axiskeys))
resultXY = resultXY.withAxes(*'xy')
resultXY[:] = data
else:
result[:] = data
return result
# Smooth the input data
smoothed = vigra.filters.gaussianSmoothing(data, sigma, window_size=2.0, roi=computeRoi, out=result[tuple(reskey)]) # FIXME: Assumes channel is last axis
expectedShape = tuple(TinyVector(computeRoi[1]) - TinyVector(computeRoi[0]))
assert tuple(smoothed.shape) == expectedShape, "Smoothed data shape {} didn't match expected shape {}".format( smoothed.shape, roi.stop - roi.start )
return result
def _getInputComputeRois(self, roi):
axiskeys = self.Input.meta.getAxisKeys()
spatialkeys = filter( lambda k: k in 'xyz', axiskeys )
sigma = map( self._sigmas.get, spatialkeys )
inputSpatialShape = self.Input.meta.getTaggedShape()
spatialRoi = ( TinyVector(roi.start), TinyVector(roi.stop) )
tIndex = None
cIndex = None
zIndex = None
if 'c' in inputSpatialShape:
del inputSpatialShape['c']
cIndex = axiskeys.index('c')
if 't' in inputSpatialShape.keys():
assert inputSpatialShape['t'] == 1
tIndex = axiskeys.index('t')
if 'z' in inputSpatialShape.keys() and inputSpatialShape['z']==1:
#2D image, avoid kernel longer than line exception
del inputSpatialShape['z']
zIndex = axiskeys.index('z')
indices = [tIndex, cIndex, zIndex]
indices = sorted(indices, reverse=True)
for ind in indices:
if ind:
spatialRoi[0].pop(ind)
spatialRoi[1].pop(ind)
inputSpatialRoi = enlargeRoiForHalo(spatialRoi[0], spatialRoi[1], inputSpatialShape.values(), sigma, window=2.0)
# Determine the roi within the input data we're going to request
inputRoiOffset = spatialRoi[0] - inputSpatialRoi[0]
computeRoi = (inputRoiOffset, inputRoiOffset + spatialRoi[1] - spatialRoi[0])
# For some reason, vigra.filters.gaussianSmoothing will raise an exception if this parameter doesn't have the correct integer type.
# (for example, if we give it as a numpy.ndarray with dtype=int64, we get an error)
computeRoi = ( tuple(map(int, computeRoi[0])),
tuple(map(int, computeRoi[1])) )
inputRoi = (list(inputSpatialRoi[0]), list(inputSpatialRoi[1]))
for ind in reversed(indices):
if ind:
inputRoi[0].insert( ind, 0 )
inputRoi[1].insert( ind, 1 )
return inputRoi, computeRoi
def propagateDirty(self, slot, subindex, roi):
if slot == self.Input:
# Halo calculation is bidirectional, so we can re-use the function that computes the halo during execute()
inputRoi, _ = self._getInputComputeRois(roi)
self.Output.setDirty( inputRoi[0], inputRoi[1] )
elif slot == self.Sigmas:
self.Output.setDirty( slice(None) )
else:
assert False, "Unknown input slot: {}".format( slot.name )
## Combine high and low threshold
# This operator combines the thresholding results. We want the resulting labels to be
# the ones that passed the lower threshold AND that have at least one pixel that passed
# the higher threshold. E.g.:
#
# Thresholds: High=4, Low=1
#
# 0 2 0 0 2 0
# 2 5 2 2 3 2
# 0 2 0 0 2 0
#
# Results:
#
# 0 1 0 0 0 0
# 1 1 1 0 0 0
# 0 1 0 0 0 0
#
#
# Given two label images, produce a copy of BigLabels, EXCEPT first remove all labels
# from BigLabels that do not overlap with any labels in SmallLabels.
class OpSelectLabels(Operator):
## The smaller clusters
# i.e. results of high thresholding
SmallLabels = InputSlot()
## The larger clusters
# i.e. results of low thresholding
BigLabels = InputSlot()
Output = OutputSlot()
def setupOutputs(self):
self.Output.meta.assignFrom(self.BigLabels.meta)
self.Output.meta.dtype = numpy.uint32
self.Output.meta.drange = (0, 1)
def execute(self, slot, subindex, roi, result):
assert slot == self.Output
# This operator is typically used with very big rois, so be extremely memory-conscious:
# - Don't request the small and big inputs in parallel.
# - Clean finished requests immediately (don't wait for this function to exit)
# - Delete intermediate results as soon as possible.
if logger.isEnabledFor(logging.DEBUG):
dtypeBytes = self.SmallLabels.meta.getDtypeBytes()
roiShape = roi.stop - roi.start
logger.debug("Roi shape is {} = {} MB".format(roiShape, numpy.prod(roiShape) * dtypeBytes / 1e6 ))
starting_memory_usage_mb = getMemoryUsageMb()
logger.debug("Starting with memory usage: {} MB".format(starting_memory_usage_mb))
def logMemoryIncrease(msg):
"""Log a debug message about the RAM usage compared to when this function started execution."""
if logger.isEnabledFor(logging.DEBUG):
memory_increase_mb = getMemoryUsageMb() - starting_memory_usage_mb
logger.debug("{}, memory increase is: {} MB".format(msg, memory_increase_mb))
smallLabelsReq = self.SmallLabels(roi.start, roi.stop)
smallLabels = smallLabelsReq.wait()
smallLabelsReq.clean()
logMemoryIncrease("After obtaining small labels")
smallNonZero = numpy.ndarray(shape=smallLabels.shape, dtype=bool)
smallNonZero[...] = (smallLabels != 0)
del smallLabels
logMemoryIncrease("Before obtaining big labels")
bigLabels = self.BigLabels(roi.start, roi.stop).wait()
logMemoryIncrease("After obtaining big labels")
prod = smallNonZero * bigLabels
del smallNonZero
# get labels that passed the masking
#passed = numpy.unique(prod)
passed = numpy.bincount(prod.flat).nonzero()[0] # Much faster than unique(), which copies and sorts
# 0 is not a valid label
if passed[0] == 0:
passed = passed[1:]
logMemoryIncrease("After taking product")
del prod
all_label_values = numpy.zeros((bigLabels.max()+1,),
dtype=numpy.uint32)
for i, l in enumerate(passed):
all_label_values[l] = i+1
all_label_values[0] = 0
# tricky: map the old labels to the new ones, labels that didnt pass
# are mapped to zero
result[:] = all_label_values[bigLabels]
logMemoryIncrease("Just before return")
return result
def propagateDirty(self, slot, subindex, roi):
if slot == self.SmallLabels or slot == self.BigLabels:
self.Output.setDirty(slice(None))
else:
assert False, "Unknown input slot: {}".format(slot.name)
|
nielsbuwen/ilastik
|
ilastik/applets/thresholdTwoLevels/thresholdingTools.py
|
Python
|
gpl-3.0
| 16,113
|
[
"Gaussian"
] |
cc7b992b3ff6a467b5baec277785cc02ea65a0044f80d383170ed95804c7bb65
|
#
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##########################################################################
#
# Active Matter: Swimmer Flow Field Tutorial
#
##########################################################################
import os
import espressomd
from espressomd import assert_features, lb
assert_features(["ENGINE", "CUDA", "MASS", "ROTATION", "ROTATIONAL_INERTIA"])
## Exercise 1 ##
# Create a routine to read in the hydrodynamic type
# (pusher/puller) and position at which the particle
# is initiated, set the variables 'type' and 'pos' to
# these values, respectively.
...
mode = ...
pos = ...
##########################################################################
## Exercise 2 ##
# Create an output directory that is labeled according
# to the value of the type and position, use the parameter
# 'outdir' to store this path
outdir = ...
os.makedirs(outdir, exist_ok=True)
# System parameters
LENGTH = 25.0
PROD_STEPS = 1000
PROD_LENGTH = 50
TIME_STEP = 0.01
system = espressomd.System(box_l=[LENGTH, LENGTH, LENGTH])
system.cell_system.skin = 0.3
system.time_step = TIME_STEP
system.min_global_cut = 1.0
##########################################################################
# Set the position of the particle
## Exercise 3 ##
# Determine the initial position of the particle, which
# should be in the center of the box, and shifted by
# the value of 'pos' in the direction of the z-axis
x0 = ...
y0 = ...
z0 = ...
# Sphere size, mass, and moment of inertia, dipole force
sph_size = 0.5
sph_mass = 4.8
Ixyz = 4.8
force = 0.1
## Exercise 4 ##
# Why is the sphere size set to 0.5 (this value is
# an approximation for the real value)? What happens when you
# change the mass and rotational inertia? Why is the value of
# the force chosen to be low.
# Setup the particle
system.part.add(
pos=[x0, y0, z0], type=0, mass=sph_mass, rinertia=[Ixyz, Ixyz, Ixyz],
swimming={'f_swim': force, 'mode': mode, 'dipole_length': sph_size + 0.5})
## Exercise 5 ##
# Why is the dipole_length chosen in this way?
# What happens if you make the length go to zero?
# Why does this happen?
##########################################################################
# Setup the fluid (quiescent)
lbf = lb.LBFluidGPU(agrid=1.0, dens=1.0, visc=1.0,
tau=TIME_STEP)
## Exercise 6 ##
# Can the particle rotate in the flow field?
system.actors.add(lbf)
system.thermostat.set_lb(LB_fluid=lbf, gamma=20.0, seed=42)
##########################################################################
# Output the coordinates
with open("{}/trajectory.dat".format(outdir), 'w') as outfile:
print("####################################################", file=outfile)
print("# time position velocity #", file=outfile)
print("####################################################", file=outfile)
# Production run
for k in range(PROD_STEPS):
# Output quantities
print("{time} {pos[0]} {pos[1]} {pos[2]} {vel[0]} {vel[1]} {vel[2]}"
.format(time=system.time, pos=system.part[0].pos, vel=system.part[0].v),
file=outfile)
# Output 50 simulations
if k % (PROD_STEPS / 50) == 0:
num = k / (PROD_STEPS / 50)
lbf.write_vtk_velocity("{}/lb_velocity_{}.vtk".format(outdir, num))
system.part.writevtk(
"{}/position_{}.vtk".format(outdir, num), types=[0])
system.integrator.run(PROD_LENGTH)
## Exercise 7 ##
# Use the snapshots and paraview to visualize the final state.
# By appropriately choosing the initial position, you can ensure
# that the swimmer is in the center of the box. Explain why
# the flow lines look the way they do.
|
fweik/espresso
|
doc/tutorials/active_matter/exercises/flow_field.py
|
Python
|
gpl-3.0
| 4,424
|
[
"ESPResSo",
"ParaView",
"VTK"
] |
ce6870e681b489c054c6ad4c1e44439ec875f46c518672d1366bdb8cb375f457
|
#!/usr/bin/python
# encoding: utf-8
"""
colors.py
A list of predefined PyMOL colors.
Created by Shane O'Connor 2014.
"""
import traceback
import colorsys
import matplotlib.colors as mpl_colors
from klab.gfx.colors import ggplot_color_wheel
# How to update this list:
#
# Go to http://pymolwiki.org/index.php/Color_Values and copy the color lines from there. Then run this in a Python terminal:
#
# a = '''[paste the lines]'''
# colors = {}
# lines = a.split('\n')
# for l in lines:
# tokens = [t.strip() for t in l.split() if t.strip()]
# if len(tokens) > 3:
# try:
# r, g, b = float(tokens[1]), float(tokens[2]), float(tokens[3])
# colors[tokens[0]] = (r, g, b, tokens[4:])
# except: pass
# print('predefined = {')
# for k, v in sorted(colors.iteritems()):
# if v[3]:
# print('\t# %s' % str(v[3:]))
# print("\t'%s' : %s," % (k, str(v[:3])))
# print('}')
predefined = {
'actinium' : (0.439215686, 0.670588235, 0.980392157),
'aluminum' : (0.749019608, 0.650980392, 0.650980392),
'americium' : (0.329411765, 0.360784314, 0.949019608),
'antimony' : (0.619607843, 0.388235294, 0.709803922),
'aquamarine' : (0.5, 1.0, 1.0),
'argon' : (0.501960784, 0.819607843, 0.890196078),
'arsenic' : (0.741176471, 0.501960784, 0.890196078),
'astatine' : (0.458823529, 0.309803922, 0.270588235),
'barium' : (0.0, 0.788235294, 0.0),
'berkelium' : (0.541176471, 0.309803922, 0.890196078),
'beryllium' : (0.760784314, 1.0, 0.0),
'bismuth' : (0.619607843, 0.309803922, 0.709803922),
'black' : (0.0, 0.0, 0.0),
'blue' : (0.0, 0.0, 1.0),
'bluewhite' : (0.85, 0.85, 1.0),
'bohrium' : (0.878431373, 0.0, 0.219607843),
'boron' : (1.0, 0.709803922, 0.709803922),
'br0' : (0.1, 0.1, 1.0),
'br1' : (0.2, 0.1, 0.9),
'br2' : (0.3, 0.1, 0.8),
'br3' : (0.4, 0.1, 0.7),
'br4' : (0.5, 0.1, 0.6),
'br5' : (0.6, 0.1, 0.5),
'br6' : (0.7, 0.1, 0.4),
'br7' : (0.8, 0.1, 0.3),
'br8' : (0.9, 0.1, 0.2),
'br9' : (1.0, 0.1, 0.1),
'brightorange' : (1.0, 0.7, 0.2),
'bromine' : (0.650980392, 0.160784314, 0.160784314),
'brown' : (0.65, 0.32, 0.17),
'cadmium' : (1.0, 0.850980392, 0.560784314),
'calcium' : (0.239215686, 1.0, 0.0),
'californium' : (0.631372549, 0.211764706, 0.831372549),
'carbon' : (0.2, 1.0, 0.2),
'cerium' : (1.0, 1.0, 0.780392157),
'cesium' : (0.341176471, 0.090196078, 0.560784314),
'chartreuse' : (0.5, 1.0, 0.0),
'chlorine' : (0.121568627, 0.941176471, 0.121568627),
'chocolate' : (0.555, 0.222, 0.111),
'chromium' : (0.541176471, 0.6, 0.780392157),
'cobalt' : (0.941176471, 0.564705882, 0.62745098),
'copper' : (0.784313725, 0.501960784, 0.2),
'curium' : (0.470588235, 0.360784314, 0.890196078),
'cyan' : (0.0, 1.0, 1.0),
'darksalmon' : (0.73, 0.55, 0.52),
'dash' : (1.0, 1.0, 0.0),
'deepblue' : (0.25, 0.25, 0.65),
'deepolive' : (0.6, 0.6, 0.1),
'deeppurple' : (0.6, 0.1, 0.6),
'deepsalmon' : (1.0, 0.42, 0.42),
'deepteal' : (0.1, 0.6, 0.6),
'density' : (0.1, 0.1, 0.6),
'deuterium' : (0.9, 0.9, 0.9),
'dirtyviolet' : (0.7, 0.5, 0.5),
'dubnium' : (0.819607843, 0.0, 0.309803922),
'dysprosium' : (0.121568627, 1.0, 0.780392157),
'einsteinium' : (0.701960784, 0.121568627, 0.831372549),
'erbium' : (0.0, 0.901960784, 0.458823529),
'europium' : (0.380392157, 1.0, 0.780392157),
'fermium' : (0.701960784, 0.121568627, 0.729411765),
'firebrick' : (0.698, 0.13, 0.13),
'fluorine' : (0.701960784, 1.0, 1.0),
'forest' : (0.2, 0.6, 0.0),
'francium' : (0.258823529, 0.0, 0.4),
'gadolinium' : (0.270588235, 1.0, 0.780392157),
'gallium' : (0.760784314, 0.560784314, 0.560784314),
'germanium' : (0.4, 0.560784314, 0.560784314),
'gold' : (1.0, 0.819607843, 0.137254902),
'gray' : (0.5, 0.5, 0.5),
'green' : (0.0, 1.0, 0.0),
'greencyan' : (0.25, 1.0, 0.75),
'grey' : (0.5, 0.5, 0.5),
'grey10' : (0.1, 0.1, 0.1),
'grey30' : (0.3, 0.3, 0.3),
'grey40' : (0.4, 0.4, 0.4),
'grey60' : (0.6, 0.6, 0.6),
'grey70' : (0.7, 0.7, 0.7),
'grey80' : (0.8, 0.8, 0.8),
'grey90' : (0.9, 0.9, 0.9),
'hafnium' : (0.301960784, 0.760784314, 1.0),
'hassium' : (0.901960784, 0.0, 0.180392157),
'helium' : (0.850980392, 1.0, 1.0),
'holmium' : (0.0, 1.0, 0.611764706),
'hotpink' : (1.0, 0.0, 0.5),
'hydrogen' : (0.9, 0.9, 0.9),
'indium' : (0.650980392, 0.458823529, 0.450980392),
'iodine' : (0.580392157, 0.0, 0.580392157),
'iridium' : (0.090196078, 0.329411765, 0.529411765),
'iron' : (0.878431373, 0.4, 0.2),
'krypton' : (0.360784314, 0.721568627, 0.819607843),
'lanthanum' : (0.439215686, 0.831372549, 1.0),
'lawrencium' : (0.780392157, 0.0, 0.4),
'lead' : (0.341176471, 0.349019608, 0.380392157),
'lightblue' : (0.75, 0.75, 1.0),
'lightmagenta' : (1.0, 0.2, 0.8),
'lightorange' : (1.0, 0.8, 0.5),
'lightpink' : (1.0, 0.75, 0.87),
'lightteal' : (0.4, 0.7, 0.7),
'lime' : (0.5, 1.0, 0.0),
'limegreen' : (0.0, 1.0, 0.5),
'limon' : (0.75, 1.0, 0.25),
'lithium' : (0.8, 0.501960784, 1.0),
'lutetium' : (0.0, 0.670588235, 0.141176471),
'magenta' : (1.0, 0.0, 1.0),
'magnesium' : (0.541176471, 1.0, 0.0),
'manganese' : (0.611764706, 0.478431373, 0.780392157),
'marine' : (0.0, 0.5, 1.0),
'meitnerium' : (0.921568627, 0.0, 0.149019608),
'mendelevium' : (0.701960784, 0.050980392, 0.650980392),
'mercury' : (0.721568627, 0.721568627, 0.815686275),
'molybdenum' : (0.329411765, 0.709803922, 0.709803922),
'neodymium' : (0.780392157, 1.0, 0.780392157),
'neon' : (0.701960784, 0.890196078, 0.960784314),
'neptunium' : (0.0, 0.501960784, 1.0),
'nickel' : (0.31372549, 0.815686275, 0.31372549),
'niobium' : (0.450980392, 0.760784314, 0.788235294),
'nitrogen' : (0.2, 0.2, 1.0),
'nobelium' : (0.741176471, 0.050980392, 0.529411765),
'olive' : (0.77, 0.7, 0.0),
'orange' : (1.0, 0.5, 0.0),
'osmium' : (0.149019608, 0.4, 0.588235294),
'oxygen' : (1.0, 0.3, 0.3),
'palecyan' : (0.8, 1.0, 1.0),
'palegreen' : (0.65, 0.9, 0.65),
'paleyellow' : (1.0, 1.0, 0.5),
'palladium' : (0.0, 0.411764706, 0.521568627),
'phosphorus' : (1.0, 0.501960784, 0.0),
'pink' : (1.0, 0.65, 0.85),
'platinum' : (0.815686275, 0.815686275, 0.878431373),
'plutonium' : (0.0, 0.419607843, 1.0),
'polonium' : (0.670588235, 0.360784314, 0.0),
'potassium' : (0.560784314, 0.250980392, 0.831372549),
'praseodymium' : (0.850980392, 1.0, 0.780392157),
'promethium' : (0.639215686, 1.0, 0.780392157),
'protactinium' : (0.0, 0.631372549, 1.0),
'purple' : (0.75, 0.0, 0.75),
'purpleblue' : (0.5, 0.0, 1.0),
'radium' : (0.0, 0.490196078, 0.0),
'radon' : (0.258823529, 0.509803922, 0.588235294),
'raspberry' : (0.7, 0.3, 0.4),
'red' : (1.0, 0.0, 0.0),
'rhenium' : (0.149019608, 0.490196078, 0.670588235),
'rhodium' : (0.039215686, 0.490196078, 0.549019608),
'rubidium' : (0.439215686, 0.180392157, 0.690196078),
'ruby' : (0.6, 0.2, 0.2),
'ruthenium' : (0.141176471, 0.560784314, 0.560784314),
'rutherfordium' : (0.8, 0.0, 0.349019608),
'salmon' : (1.0, 0.6, 0.6),
'samarium' : (0.560784314, 1.0, 0.780392157),
'sand' : (0.72, 0.55, 0.3),
'scandium' : (0.901960784, 0.901960784, 0.901960784),
'seaborgium' : (0.850980392, 0.0, 0.270588235),
'selenium' : (1.0, 0.631372549, 0.0),
'silicon' : (0.941176471, 0.784313725, 0.62745098),
'silver' : (0.752941176, 0.752941176, 0.752941176),
'skyblue' : (0.2, 0.5, 0.0),
'slate' : (0.5, 0.5, 1.0),
'smudge' : (0.55, 0.7, 0.4),
'sodium' : (0.670588235, 0.360784314, 0.949019608),
'splitpea' : (0.52, 0.75, 0.0),
'strontium' : (0.0, 1.0, 0.0),
'sulfur' : (0.9, 0.775, 0.25),
'tantalum' : (0.301960784, 0.650980392, 1.0),
'teal' : (0.0, 0.75, 0.75),
'technetium' : (0.231372549, 0.619607843, 0.619607843),
'tellurium' : (0.831372549, 0.478431373, 0.0),
'terbium' : (0.188235294, 1.0, 0.780392157),
'thallium' : (0.650980392, 0.329411765, 0.301960784),
'thorium' : (0.0, 0.729411765, 1.0),
'thulium' : (0.0, 0.831372549, 0.321568627),
'tin' : (0.4, 0.501960784, 0.501960784),
'titanium' : (0.749019608, 0.760784314, 0.780392157),
'tungsten' : (0.129411765, 0.580392157, 0.839215686),
'tv_blue' : (0.3, 0.3, 1.0),
'tv_green' : (0.2, 1.0, 0.2),
'tv_orange' : (1.0, 0.55, 0.15),
'tv_red' : (1.0, 0.2, 0.2),
'tv_yellow' : (1.0, 1.0, 0.2),
'uranium' : (0.0, 0.560784314, 1.0),
'vanadium' : (0.650980392, 0.650980392, 0.670588235),
'violet' : (1.0, 0.5, 1.0),
'violetpurple' : (0.55, 0.25, 0.6),
'warmpink' : (0.85, 0.2, 0.5),
'wheat' : (0.99, 0.82, 0.65),
'white' : (1.0, 1.0, 1.0),
'xenon' : (0.258823529, 0.619607843, 0.690196078),
'yellow' : (1.0, 1.0, 0.0),
'yelloworange' : (1.0, 0.87, 0.37),
'ytterbium' : (0.0, 0.749019608, 0.219607843),
'yttrium' : (0.580392157, 1.0, 1.0),
'zinc' : (0.490196078, 0.501960784, 0.690196078),
'zirconium' : (0.580392157, 0.878431373, 0.878431373),
}
default_color_scheme = {
'global' : {
'background-color' : 'white'
},
'Scaffold' : {
'bb' : 'grey30',
'hetatm' : 'grey60',
'mutations' : 'grey80'
},
'RosettaModel' : {
'bb' : 'brightorange',
'hetatm' : 'deepolive',
'mutations' : 'yellow'
},
'ExpStructure' : {
'bb' : 'violetpurple',
'hetatm' : 'warmpink',
'mutations' : 'magenta'
},
}
# todo: I now specify protein color and display options in PyMOLStructureBase objects. Rewrite this code so that default_color_scheme
# specifies global options e.g. view options, background colors. This will probably be easier if the other PSE builders
# are rewritten to match MultiStructureBuilder.
class PyMOLStructureBase(object):
'''A simple structure-less class to store parameters used to display a structure. Open to heavy modification as we add more
customization.'''
def __init__(self, backbone_color = 'white', backbone_display = 'cartoon',
sidechain_color = 'grey80', sidechain_display = 'sticks',
hetatm_color = 'grey60', hetatm_display = 'sticks',
visible = True):
self.backbone_color = backbone_color or 'white'
self.backbone_display = backbone_display or 'cartoon'
self.sidechain_color = sidechain_color or 'grey80'
self.sidechain_display = sidechain_display or 'sticks'
self.hetatm_color = hetatm_color or 'grey60'
self.hetatm_display = hetatm_display or 'sticks'
self.visible = visible
class PyMOLStructure(PyMOLStructureBase):
'''A simple structure-containing class to store parameters used to display a structure. Open to heavy modification as we add more
customization.'''
def __init__(self, pdb_object, structure_name, residues_of_interest = [], label_all_residues_of_interest = False, **kwargs):
'''The chain_seed_color kwarg can be either:
- a triple of R,G,B values e.g. [0.5, 1.0, 0.75] where each value is between 0.0 and 1.0;
- a hex string #RRGGBB e.g. #77ffaa;
- a name defined in the predefined dict above e.g. "aquamarine".
'''
self.pdb_object = pdb_object
self.structure_name = structure_name
self.add_residues_of_interest(residues_of_interest)
self.label_all_residues_of_interest = label_all_residues_of_interest
self.chain_colors = kwargs.get('chain_colors') or {}
# Set up per-chain colors
try:
if not self.chain_colors and kwargs.get('chain_seed_color'):
chain_seed_color = kwargs.get('chain_seed_color')
if isinstance(chain_seed_color, str) or isinstance(chain_seed_color, str):
chain_seed_color = str(chain_seed_color)
if chain_seed_color.startswith('#'):
if len(chain_seed_color) != 7:
chain_seed_color = None
else:
trpl = predefined.get(chain_seed_color)
chain_seed_color = None
if trpl:
chain_seed_color = mpl_colors.rgb2hex(trpl)
elif isinstance(chain_seed_color, list) and len(chain_seed_color) == 3:
chain_seed_color = mpl_colors.rgb2hex(chain_seed_color)
if chain_seed_color.startswith('#') and len(chain_seed_color) == 7:
# todo: We are moving between color spaces multiple times so are probably introducing artifacts due to rounding. Rewrite this to minimize this movement.
chain_seed_color = chain_seed_color[1:]
hsl_color = colorsys.rgb_to_hls(int(chain_seed_color[0:2], 16)/255.0, int(chain_seed_color[2:4], 16)/255.0, int(chain_seed_color[4:6], 16)/255.0)
chain_seed_hue = int(360.0 * hsl_color[0])
chain_seed_saturation = max(0.15, hsl_color[1]) # otherwise some colors e.g. near-black will not yield any alternate colors
chain_seed_lightness = max(0.15, hsl_color[2]) # otherwise some colors e.g. near-black will not yield any alternate colors
min_colors_in_wheel = 4 # choose at least 4 colors - this usually results in a wider variety of colors and prevents clashes e.g. given 2 chains in both mut and wt, wt seeded with blue, and mut seeded with yellow, we will get a clash
chain_ids = sorted(pdb_object.atom_sequences.keys())
# Choose complementary colors, respecting the original saturation and lightness values
chain_colors = ggplot_color_wheel(max(len(chain_ids), min_colors_in_wheel), start = chain_seed_hue, saturation_adjustment = None, saturation = chain_seed_saturation, lightness = chain_seed_lightness)
assert(len(chain_colors) >= len(chain_ids))
self.chain_colors = {}
for i in range(len(chain_ids)):
self.chain_colors[chain_ids[i]] = str(list(mpl_colors.hex2color('#' + chain_colors[i])))
# Force use of the original seed as this may have been altered above in the "= max(" statements
self.chain_colors[chain_ids[0]] = str(list(mpl_colors.hex2color('#' + chain_seed_color)))
except Exception as e:
print('An exception occurred setting the chain colors. Ignoring exception and resuming with default colors.')
print((str(e)))
print((traceback.format_exc()))
super(PyMOLStructure, self).__init__(
backbone_color = kwargs.get('backbone_color'), backbone_display = kwargs.get('backbone_display'),
sidechain_color = kwargs.get('sidechain_color'), sidechain_display = kwargs.get('sidechain_display'),
hetatm_color = kwargs.get('hetatm_color'), hetatm_display = kwargs.get('hetatm_display'),
visible = kwargs.get('visible', True),
)
def add_residues_of_interest(self, residues_of_interest):
# todo: we should check the residue IDs against the PDB object to make sure that the coordinates exist
# For now, do a simple assignment
if residues_of_interest:
self.residues_of_interest = residues_of_interest
default_display_scheme = dict(
GenericProtein = PyMOLStructureBase(),
)
def create_new_color_command(color_name, r, g, b):
return 'set_color %(color_name)s, [%(r).10f,%(g).10f,%(b).10f]' % vars()
class ColorScheme(object):
'''A dict wrapper class. The dict that is stored is intended to have a tree structure. The paths of the tree describe
how the color should be used e.g. RosettaModel.bb should be used to color the backbone of a Rosetta model. The leaves of the
tree are colors. If a new color is needed, use the create_new_color_command function to define the new color in
the script before use.'''
def __init__(self, custom_color_scheme = {}):
'''If a color_scheme is passed in then this is merged with the default color scheme.'''
color_scheme = {}
color_scheme.update(default_color_scheme)
display_scheme = {}
display_scheme.update(default_display_scheme)
if custom_color_scheme:
assert(type(custom_color_scheme) == type(predefined))
color_scheme.update(custom_color_scheme)
self.color_scheme = color_scheme
self.name = 'Default'
def update(self, path, node):
'''Update the dict with a new color using a 'path' through the dict. You can either pass an existing path e.g.
'Scaffold.mutations' to override a color or part of the hierarchy or you can add a new leaf node or dict.'''
assert(type(path) == type(self.name))
assert(type(node) == type(self.name) or type(node) == type(predefined))
d = self.color_scheme
tokens = path.split('.')
for t in tokens[:-1]:
d = d.get(t)
if d == None:
raise Exception("Path '%s' not found.")
d[tokens[-1]] = node
def lookup(self, path, must_be_leaf = False):
'''Looks up a part of the color scheme. If used for looking up colors, must_be_leaf should be True.'''
assert(type(path) == type(self.name))
d = self.color_scheme
tokens = path.split('.')
for t in tokens[:-1]:
d = d.get(t)
if d == None:
raise Exception("Path '%s' not found.")
if must_be_leaf:
assert(type(d[tokens[-1]]) == type(self.name))
return d[tokens[-1]]
def __repr__(self):
return str(self.color_scheme)
def __getitem__(self, path):
'''This lets us use the object somewhat like a dict where we do a lookup using a path e.g. cs['Scaffold.mutations']
This also lets us use the object in a string formatting e.g. print('%(Scaffold.mutations)s' % cs) which is useful
for the PyMOL script generators.'''
return self.lookup(path)
if __name__ == '__main__':
cs = ColorScheme()
cs.update('ExpStructure.b', 'thallium')
cs.update('ExpStructure.mutations', 'thallium')
print('')
print((cs.lookup('ExpStructure.b', must_be_leaf = True)))
print((cs['Scaffold.mutations']))
print(('Testing string formatting: Scaffold.mutations = %(Scaffold.mutations)s, RosettaModel.hetatm = %(RosettaModel.hetatm)s.' % cs))
print((cs['global.background-color']))
print('')
cs = ColorScheme({'global' : {'background-color' : 'black'}})
print(cs)
print((cs['global.background-color']))
print('')
|
Kortemme-Lab/klab
|
klab/bio/pymolmod/colors.py
|
Python
|
mit
| 19,019
|
[
"PyMOL"
] |
cd524f4a8533b1cb64f5d784a43776ab36b687c17e4e47c9149a5740b1577d09
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.